code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from pathlib import Path
from sys import platform
from cffi import FFI
from platform import architecture, machine
_ffi_def = """
extern char* ffiverify(char* proofQREncoded, char* configpath);
extern void freeCString(char* s);
"""
def _libpath():
return Path(__file__).parent.absolute()
def listlibs():
return [x.name for x in list(_libpath().glob('*.so')) + list(_libpath().glob('*.dll'))]
def loadlib(lib='auto'):
if lib == 'auto':
lib = _autodetect()
lib = _libpath() / lib
if not lib.is_file():
raise ValueError(f'Could not find verifier lib {lib.name} choose one of: {", ".join(listlibs())}')
ffi = FFI()
ffi.cdef(_ffi_def)
verifier = ffi.dlopen(str(lib.absolute()))
return verifier, ffi
def _autodetect():
libos = None
libext = '.so'
if platform.startswith("linux"):
libos = 'linux'
elif platform.startswith("darwin"):
libos = 'darwin'
elif platform.startswith("win32"):
libos = "windows"
libext = '.dll'
if libos is None:
raise ValueError(f'Auto detect failed OS unknown: {platform}')
libarch = None
arch = architecture()
mach = machine().lower()
if 'arm' in mach:
if 'v7' in mach:
libarch = 'armv7'
elif 'v6' in mach:
libarch = 'armv6'
# detect rpi
if libos == 'linux':
chips = ('BCM2835', )
with open('/proc/cpuinfo', 'r') as cpuinfo:
for line in cpuinfo:
if line.startswith('Hardware') and any([chip in line for chip in chips]):
libarch = 'armv6l'
elif 'v5' in mach:
libarch = 'armv5'
elif '64' in mach:
libarch = 'arm64'
if 'aarch64' in mach:
libarch = 'arm64'
if 'x86' in mach:
if '64' in mach:
libarch = 'amd64'
else:
libarch = '386'
if 'amd64' in mach:
libarch = 'amd64'
b32arches = ['i686', 'i386', 'i486']
if mach.lower() in b32arches:
if '64' in mach:
libarch = 'amd64'
else:
libarch = '386'
if libarch is None:
raise ValueError(f'Auto detect failed CPU Architecture unknown: {arch} {mach}')
return f"verifier-{libos}-{libarch}{libext}" | verifier/lib/__init__.py | from pathlib import Path
from sys import platform
from cffi import FFI
from platform import architecture, machine
_ffi_def = """
extern char* ffiverify(char* proofQREncoded, char* configpath);
extern void freeCString(char* s);
"""
def _libpath():
return Path(__file__).parent.absolute()
def listlibs():
return [x.name for x in list(_libpath().glob('*.so')) + list(_libpath().glob('*.dll'))]
def loadlib(lib='auto'):
if lib == 'auto':
lib = _autodetect()
lib = _libpath() / lib
if not lib.is_file():
raise ValueError(f'Could not find verifier lib {lib.name} choose one of: {", ".join(listlibs())}')
ffi = FFI()
ffi.cdef(_ffi_def)
verifier = ffi.dlopen(str(lib.absolute()))
return verifier, ffi
def _autodetect():
libos = None
libext = '.so'
if platform.startswith("linux"):
libos = 'linux'
elif platform.startswith("darwin"):
libos = 'darwin'
elif platform.startswith("win32"):
libos = "windows"
libext = '.dll'
if libos is None:
raise ValueError(f'Auto detect failed OS unknown: {platform}')
libarch = None
arch = architecture()
mach = machine().lower()
if 'arm' in mach:
if 'v7' in mach:
libarch = 'armv7'
elif 'v6' in mach:
libarch = 'armv6'
# detect rpi
if libos == 'linux':
chips = ('BCM2835', )
with open('/proc/cpuinfo', 'r') as cpuinfo:
for line in cpuinfo:
if line.startswith('Hardware') and any([chip in line for chip in chips]):
libarch = 'armv6l'
elif 'v5' in mach:
libarch = 'armv5'
elif '64' in mach:
libarch = 'arm64'
if 'aarch64' in mach:
libarch = 'arm64'
if 'x86' in mach:
if '64' in mach:
libarch = 'amd64'
else:
libarch = '386'
if 'amd64' in mach:
libarch = 'amd64'
b32arches = ['i686', 'i386', 'i486']
if mach.lower() in b32arches:
if '64' in mach:
libarch = 'amd64'
else:
libarch = '386'
if libarch is None:
raise ValueError(f'Auto detect failed CPU Architecture unknown: {arch} {mach}')
return f"verifier-{libos}-{libarch}{libext}" | 0.264263 | 0.104432 |
from pathlib import Path
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
)
import attr
import pytest
from .constants import EXIT_STATUS_FAIL_UNUSED
from .data import SnapshotFossils
from .report import SnapshotReport
if TYPE_CHECKING:
from .assertion import SnapshotAssertion
from .extensions.base import AbstractSyrupyExtension
@attr.s
class SnapshotSession:
base_dir: str = attr.ib()
update_snapshots: bool = attr.ib()
warn_unused_snapshots: bool = attr.ib()
_invocation_args: Tuple[str, ...] = attr.ib(factory=tuple)
report: Optional["SnapshotReport"] = attr.ib(default=None)
# All the collected test items
_collected_items: Set["pytest.Item"] = attr.ib(factory=set)
# All the selected test items. Will be set to False until the test item is run.
_selected_items: Dict[str, bool] = attr.ib(factory=dict)
_assertions: List["SnapshotAssertion"] = attr.ib(factory=list)
_extensions: Dict[str, "AbstractSyrupyExtension"] = attr.ib(factory=dict)
def collect_items(self, items: List["pytest.Item"]) -> None:
self._collected_items.update(self.filter_valid_items(items))
def select_items(self, items: List["pytest.Item"]) -> None:
for item in self.filter_valid_items(items):
self._selected_items[getattr(item, "nodeid", None)] = False
def start(self) -> None:
self.report = None
self._collected_items = set()
self._selected_items = {}
self._assertions = []
self._extensions = {}
def ran_item(self, nodeid: str) -> None:
self._selected_items[nodeid] = True
def finish(self) -> int:
exitstatus = 0
self.report = SnapshotReport(
base_dir=self.base_dir,
collected_items=self._collected_items,
selected_items=self._selected_items,
assertions=self._assertions,
update_snapshots=self.update_snapshots,
warn_unused_snapshots=self.warn_unused_snapshots,
invocation_args=self._invocation_args,
)
if self.report.num_unused:
if self.update_snapshots:
self.remove_unused_snapshots(
unused_snapshot_fossils=self.report.unused,
used_snapshot_fossils=self.report.used,
)
elif not self.warn_unused_snapshots:
exitstatus |= EXIT_STATUS_FAIL_UNUSED
return exitstatus
def register_request(self, assertion: "SnapshotAssertion") -> None:
self._assertions.append(assertion)
discovered_extensions = {
discovered.location: assertion.extension
for discovered in assertion.extension.discover_snapshots()
if discovered.has_snapshots
}
self._extensions.update(discovered_extensions)
def remove_unused_snapshots(
self,
unused_snapshot_fossils: "SnapshotFossils",
used_snapshot_fossils: "SnapshotFossils",
) -> None:
"""
Remove all unused snapshots using the registed extension for the fossil file
If there is not registered extension and the location is unused delete the file
"""
for unused_snapshot_fossil in unused_snapshot_fossils:
snapshot_location = unused_snapshot_fossil.location
extension = self._extensions.get(snapshot_location)
if extension:
extension.delete_snapshots(
snapshot_location=snapshot_location,
snapshot_names={
snapshot.name for snapshot in unused_snapshot_fossil
},
)
elif snapshot_location not in used_snapshot_fossils:
Path(snapshot_location).unlink()
@staticmethod
def filter_valid_items(items: List["pytest.Item"]) -> Iterable["pytest.Item"]:
return (item for item in items if isinstance(item, pytest.Function)) | src/syrupy/session.py | from pathlib import Path
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
)
import attr
import pytest
from .constants import EXIT_STATUS_FAIL_UNUSED
from .data import SnapshotFossils
from .report import SnapshotReport
if TYPE_CHECKING:
from .assertion import SnapshotAssertion
from .extensions.base import AbstractSyrupyExtension
@attr.s
class SnapshotSession:
base_dir: str = attr.ib()
update_snapshots: bool = attr.ib()
warn_unused_snapshots: bool = attr.ib()
_invocation_args: Tuple[str, ...] = attr.ib(factory=tuple)
report: Optional["SnapshotReport"] = attr.ib(default=None)
# All the collected test items
_collected_items: Set["pytest.Item"] = attr.ib(factory=set)
# All the selected test items. Will be set to False until the test item is run.
_selected_items: Dict[str, bool] = attr.ib(factory=dict)
_assertions: List["SnapshotAssertion"] = attr.ib(factory=list)
_extensions: Dict[str, "AbstractSyrupyExtension"] = attr.ib(factory=dict)
def collect_items(self, items: List["pytest.Item"]) -> None:
self._collected_items.update(self.filter_valid_items(items))
def select_items(self, items: List["pytest.Item"]) -> None:
for item in self.filter_valid_items(items):
self._selected_items[getattr(item, "nodeid", None)] = False
def start(self) -> None:
self.report = None
self._collected_items = set()
self._selected_items = {}
self._assertions = []
self._extensions = {}
def ran_item(self, nodeid: str) -> None:
self._selected_items[nodeid] = True
def finish(self) -> int:
exitstatus = 0
self.report = SnapshotReport(
base_dir=self.base_dir,
collected_items=self._collected_items,
selected_items=self._selected_items,
assertions=self._assertions,
update_snapshots=self.update_snapshots,
warn_unused_snapshots=self.warn_unused_snapshots,
invocation_args=self._invocation_args,
)
if self.report.num_unused:
if self.update_snapshots:
self.remove_unused_snapshots(
unused_snapshot_fossils=self.report.unused,
used_snapshot_fossils=self.report.used,
)
elif not self.warn_unused_snapshots:
exitstatus |= EXIT_STATUS_FAIL_UNUSED
return exitstatus
def register_request(self, assertion: "SnapshotAssertion") -> None:
self._assertions.append(assertion)
discovered_extensions = {
discovered.location: assertion.extension
for discovered in assertion.extension.discover_snapshots()
if discovered.has_snapshots
}
self._extensions.update(discovered_extensions)
def remove_unused_snapshots(
self,
unused_snapshot_fossils: "SnapshotFossils",
used_snapshot_fossils: "SnapshotFossils",
) -> None:
"""
Remove all unused snapshots using the registed extension for the fossil file
If there is not registered extension and the location is unused delete the file
"""
for unused_snapshot_fossil in unused_snapshot_fossils:
snapshot_location = unused_snapshot_fossil.location
extension = self._extensions.get(snapshot_location)
if extension:
extension.delete_snapshots(
snapshot_location=snapshot_location,
snapshot_names={
snapshot.name for snapshot in unused_snapshot_fossil
},
)
elif snapshot_location not in used_snapshot_fossils:
Path(snapshot_location).unlink()
@staticmethod
def filter_valid_items(items: List["pytest.Item"]) -> Iterable["pytest.Item"]:
return (item for item in items if isinstance(item, pytest.Function)) | 0.764188 | 0.210198 |
from pycocotools import mask as maskUtils
import mmcv
import numpy as np
from .coco import CocoDataset
from .builder import DATASETS
@DATASETS.register_module()
class OCHumanDataset(CocoDataset):
CLASSES = ('person')
def _ochuman_segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
maskencode = maskUtils.encode(np.asfortranarray(segms[i]))
maskencode['counts'] = maskencode['counts'].decode('ascii')
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._ochuman_segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files | mmdet/datasets/ochuman.py | from pycocotools import mask as maskUtils
import mmcv
import numpy as np
from .coco import CocoDataset
from .builder import DATASETS
@DATASETS.register_module()
class OCHumanDataset(CocoDataset):
CLASSES = ('person')
def _ochuman_segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
maskencode = maskUtils.encode(np.asfortranarray(segms[i]))
maskencode['counts'] = maskencode['counts'].decode('ascii')
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._ochuman_segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files | 0.617051 | 0.242071 |
import codecs
import locale
import pathlib
import os
import re
class Cite:
"""Citation package emurating contents and commands.
Parameters
----------
citeleft : str, default '['
Left delimiter of list.
citeright : str, default ']'
Right delimiter of list.
use_cite_package : bool, default False
If False, emulate LaTeX's use_cite_package citation handling.
If True, emulate cite package's behavior.
"""
def __init__(
self,
citeleft='[',
citeright=']',
targetbasename='wdbib',
use_cite_package=False,
workdir='.tmp',
):
"""Costructor of Cite.
"""
# Store settings in internal attributes.
if os.path.isabs(workdir):
self.workdir = pathlib.Path(workdir)
else:
self.workdir = (
pathlib.Path(os.getcwd()) / workdir
).resolve()
self._targetbasename = targetbasename
self._replacer = None
self._citation = []
self._bibstyle = None
self._bibdata = None
self._bibcite = {}
self._conversion_dict = {}
self._citation_labels = dict()
self._citeleft = citeleft
self._citeright = citeright
self._use_cite_package = use_cite_package
self._citation_keys_in_context = []
@property
def citeleft(self):
r"""Left delimiter of list. Default '['.
Returns
-------
str
Left delimiter of list.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.citeleft
'['
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1]'
>>> tx.citeleft = '('
>>> tx.citeleft
'('
>>> tx.cite('\\cite{key1}')
'(1]'
>>> tx.cite('\\cite{key2,key3}')
'(2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'(3,2,1]'
"""
return self._citeleft
@citeleft.setter
def citeleft(self, s):
if not isinstance(s, str):
TypeError(
'expected string object but '
'%s object given.' % type(s))
self._citeleft = s
@property
def citeright(self):
r"""Right delimiter of list. Default ']'.
Returns
-------
str
Right delimiter of list.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.citeright
']'
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1]'
>>> tx.citeright = ')'
>>> tx.citeright
')'
>>> tx.cite('\\cite{key1}')
'[1)'
>>> tx.cite('\\cite{key2,key3}')
'[2,3)'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1)'
"""
return self._citeright
@citeright.setter
def citeright(self, s):
if not isinstance(s, str):
TypeError(
'expected string object but '
'%s object given.' % type(s))
self._citeright = s
@property
def citation_labels(self):
"""Key to number map of citations.
Returns
-------
dict
Citation key to citation number map.
"""
return self._citation_labels
@citation_labels.setter
def citation_labels(self, d):
if not isinstance(d, str):
TypeError(
'expected dictionary object but '
'%s object given.' % type(d))
self._citation_labels = d
def _parse_context(self, c):
r"""Find all citation keys from context written to .tex file.
Find all citation keys from context written to .tex file.
Found keys are stores to citation_keys_in_context attribute.
Parameters
----------
c : str
Parsed texts.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx._parse_context(
... 'Some citation \\cite{key}. Some example \\cite{key1,key2}'
... )
>>> tx._citation_keys_in_context
['key', 'key1,key2']
"""
found_keys = re.findall(r'\\+cite\{(.*?)\}', c)
for k in found_keys:
self._citation_keys_in_context.append(k)
def read_aux(self):
r"""Read .aux file.
Aux file will be read line-by-line.
Following four types of the line will be
interpreted and stored to the LaTeX attributes.
- \\citation{keys}
Appended to the citation attribute
(list object) key as string.
- \\bibstyle{s}
Stored as bibstyle string attribute.
- \\bibdata{d}
Stored as bibdata string attribute.
- \\bibcite{k}{n}
Added to bibcite attribute
(dictionary) as {k: n}.
"""
fn = self.workdir / (self._targetbasename + '.aux')
with codecs.open(fn, 'r', 'utf-8') as f:
self._auxdata = f.readlines()
for line in self._auxdata:
self._parse_line(line)
self._build_conversion_dict()
self._citation_labels.update(self._bibcite)
self._get_replacer()
def _parse_line(self, line):
r"""Parse one line of .aux
Parameters
----------
line : str
One line of .aux file to parse.
"""
if line.startswith('\\citation'):
self._citation.append(line[len('\\citation{'): -len('}\n')])
elif line.startswith('\\bibstyle'):
self._bibstyle = line[len('\\bibstyle{'): -len('}\n')]
elif line.startswith('\\bibdata'):
self._bibdata = line[len('\\bibdata{'): -len('}\n')]
elif line.startswith('\\bibcite'):
key, value = line[len('\\bibcite{'): -len('}\n')].split('}{')
value = int(value)
self._bibcite.update({key: value})
def _get_replacer(self):
"""Get key and value for replace word document.
"""
replacer = dict()
for k, v in self._conversion_dict.items():
replacer.update({'\\\\cite\\{%s\\}' % k: '[%s]' % v})
self._replacer = replacer
def _build_conversion_dict(self):
r"""Prepare replaing citation keys with dashed range strings.
Generate dictionary of such as {'refa,refb,refc,refe,refg': '1-3,5,7'}.
"""
for cite in self._citation:
cite_nums = [self._bibcite[c] for c in cite.split(',')]
self._conversion_dict.update(
{cite: self._compress(cite_nums)}
)
for cite in self._citation_keys_in_context:
cite_nums = [self._bibcite[c] for c in cite.split(',')]
if self._use_cite_package:
self._conversion_dict.update(
{cite: self._compress(sorted(cite_nums))}
)
else:
self._conversion_dict.update(
{cite: ','.join(str(c) for c in cite_nums)}
)
def cite(self, s):
r"""Do \cite command formatting.
Returns formated text from citation commands such as
\cite{key1} and \cite{key1,key2,key3}, etc.
By default, if there are three or more consecutive numbers,
they are compressed into a range using an en-dash.
Citation numbers are also sorted in the default condition.
Parameters
----------
s : str
Raw string to be formatted.
For example, \\cite{key1} or \\cite{key2,key3}.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1]'
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.add_package('cite')
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[1\u20133]'
Note \\u2013 is en-dash.
"""
p = re.compile(r'\\+cite\{(.*)\}')
if p.match(s):
keys = p.match(s).group(1).split(',')
if len(keys) == 1:
key = keys[0]
return (
self._citeleft
+ str(self._citation_labels[key])
+ self._citeright
)
if len(keys) > 1:
if self._use_cite_package:
nums = sorted(
[self._citation_labels[key] for key in keys]
)
return (
self._citeleft
+ self._compress(nums)
+ self._citeright
)
else:
nums = [str(self._citation_labels[key]) for key in keys]
return (
self._citeleft
+ ','.join(nums)
+ self._citeright
)
else:
ValueError(
'no citation pattern matched.'
)
def _compress(self, nums, sep=u'\u2013'):
r"""Compress groups of three or more consecutive numbers into a range.
Compress poor list of positive integers with three or more
consecutive numbers into a range using a separating character.
For example, a list ``[1,2,3,6]`` will be converted into ``[1-3,6]``.
Parameters
----------
nums : list of positive integers
Multiple integers to convert dashed range string.
A list of single element integer is also allowd.
sep : str, default en-dash(U+2013)
A character inserted betwen start and end of range.
"""
seq = []
final = []
last = 0
for index, val in enumerate(nums):
if last + 1 == val or index == 0:
seq.append(val)
last = val
else:
if len(seq) > 2:
final.append(str(seq[0]) + sep + str(seq[len(seq)-1]))
elif len(seq) == 2:
final.append(str(seq[0]) + ',' + str(seq[len(seq)-1]))
else:
final.append(str(seq[0]))
seq = []
seq.append(val)
last = val
if index == len(nums) - 1:
if len(seq) > 2:
final.append(str(seq[0]) + sep + str(seq[len(seq)-1]))
elif len(seq) == 2:
final.append(str(seq[0]) + ',' + str(seq[len(seq)-1]))
else:
final.append(str(seq[0]))
final_str = ','.join(map(str, final))
return final_str
class Bibliography:
"""LaTeX bbl file related contents and commands.
Parameters
----------
targetbasename : str, default 'wdbib'
Base name of LaTeX related files.
workdir : str or path object, default '.tmp'
Temporal working directory to store LaTeX contents.
Examples
--------
>>> import wdbibtex
>>> bb = wdbibtex.Bibliography()
>>> bb.read_bbl() # doctest: +SKIP
"""
def __init__(
self,
targetbasename='wdbib',
workdir='.tmp',
):
"""Cunstructor of Bibliography
"""
# Store settings in internal attributes.
if os.path.isabs(workdir):
self.workdir = pathlib.Path(workdir)
else:
self.workdir = (
pathlib.Path(os.getcwd()) / workdir
).resolve()
self._targetbasename = targetbasename
@property
def thebibliography(self):
r"""Plain text to replace \\thebibliography in word file.
A plain text of LaTeX-processed bibliography list.
An tab string is inserted between each citenum and citation string.
Example in IEEE format follows:
.. code-block:: text
[1]\\tF. Author, S. Author, "Paper Title," Journal Name, vol. 1, no. 1, p. 1, march 2022.
[2]\\tG. Name, F. Name, "Title," Journal, vol. 2, no. 2, pp. 1-10, 2020.
Returns
-------
str
Plain text of the thebibliography.
Raises
------
ValueError
If thebibliography text is not set.
""" # noqa E501
if self._thebibtext is None:
raise ValueError(
'Thebibliography text is not set yet.'
)
return self._thebibtext
def read_bbl(self):
"""Read .bbl file.
Read .bbl file to extract formatted thebibliography text.
Examples
--------
>>> import wdbibtex
>>> bb = wdbibtex.Bibliography()
>>> bb.read_bbl() # doctest: +SKIP
"""
fn = self.workdir / (self._targetbasename + '.bbl')
with codecs.open(fn, 'r', 'utf-8') as f:
self._bbldata = f.readlines()
self._make_thebibliography_text()
def _make_thebibliography_text(self):
"""Generate thebibliography plain text to incert word file.
"""
replacer = {}
replacer.update({
r'\n ': ' ',
r'\{\\em (.*?)\}': r'\1',
r'\\emph\{(?!\\)(.*?)\}': r'\1',
r'\\BIBforeignlanguage\{(.*?)\}\{(.*?)\}': r'\2',
r'\\BIBforeignlanguage\{(.*?)\{(.*?)\}\}': r'\2',
r'~': ' ',
r'--': u'\u2014',
r'``': '“',
r"''": '”',
r'\n\n': '\n',
r'\\BIBentryALTinterwordspacing\n': '',
r'\\BIBentrySTDinterwordspacing\n': '',
r'\\url\{(.*?)\}': r'\1',
})
thebib_begin = None
for i, line in enumerate(self._bbldata):
if line.startswith('\\bibitem') and thebib_begin is None:
thebib_begin = i
if line.startswith('\\end{thebibliography}'):
thebib_end = i
thebibtext = ''.join(self._bbldata[thebib_begin: thebib_end])
# Replace thebibliography text
found = True
while found:
found = False
for k, v in replacer.items():
thebibold = thebibtext
thebibtext = re.sub(k, v, thebibtext)
if thebibold != thebibtext:
found = True
for c, m in enumerate(re.findall('\\\\bibitem{(.*)}\n', thebibtext)):
thebibtext = re.sub(
'\\\\bibitem{%s}\n' % m, '[%s]\t' % (c+1), thebibtext
)
self._thebibtext = thebibtext
class LaTeX(Cite, Bibliography):
"""LaTeX related contents and commands.
Run LaTeX and BibTeX commands. Write .tex files.
Read and parse .aux and .bbl files.
Prepare conversion LaTeX keys in Word file into BibTeX processed texts.
Parameters
----------
bibtexcmd : str or None, default None
BibTeX command.
If None, automatically selected accorting to system locale.
bibtexopts : str or None, default None
BibTeX command options.
If None, automatically selected according to system locale.
preamble : str or None, default None
Preamble of .tex file.
If None, automatically selected.
targetbasename : str, default 'wdbib'
Base name of LaTeX related files.
texcmd : str or None, default None
LaTeX command.
If None, automatically selected according to system locale.
texopts : str or None, default None
LaTeX command options.
If None, automatically selected accorgin to system locale.
workdir : str or path object, default '.tmp'
Temporal working directory to store LaTeX contents.
"""
def __init__(
self,
bibtexcmd=None,
bibtexopts=None,
preamble=None,
targetbasename='wdbib',
texcmd=None,
texopts=None,
workdir='.tmp',
):
super(LaTeX, self).__init__()
self.__locale = self.__default_locale()
# Set automatically selected values
if texcmd is None:
if self.__locale == 'en':
texcmd = 'latex'
elif self.__locale == 'ja':
texcmd = 'uplatex'
if texopts is None:
texopts = '-interaction=nonstopmode -file-line-error'
if bibtexcmd is None:
if self.__locale == 'en':
bibtexcmd = 'bibtex'
elif self.__locale == 'ja':
bibtexcmd = 'upbibtex'
if bibtexopts is None:
bibtexopts = ''
# Store settings in internal attributes.
if os.path.isabs(workdir):
self.workdir = pathlib.Path(workdir)
else:
self.workdir = (
pathlib.Path(os.getcwd()) / workdir
).resolve()
self.__targetbasename = targetbasename
self.__texcmd = texcmd
self.__texopts = texopts
self.__bibtexcmd = bibtexcmd
self.__bibtexopts = bibtexopts
self.__packages = None
self.__bibliographystyle = None
self.__formatted_bibliographystyle = None
self.__documentclass = None
self.__package_list = []
self.preamble = preamble
# Makedir working directory if not exist.
self.workdir.mkdir(exist_ok=True)
@property
def documentclass(self):
"""LaTeX documentclass string."""
return self.__documentclass
@documentclass.setter
def documentclass(self, documentclass):
if not documentclass.startswith('\\'):
raise ValueError(
'Invalid documentclass.'
)
self.__documentclass = documentclass
# Update preamble
self.__update_preamble()
def set_documentclass(self, documentclass, *options):
"""Documentclass setter.
Parameters
----------
documentclass
Documentclass
*options
Documentclass options.
"""
if documentclass.startswith('\\'):
self.__documentclass = documentclass
else:
if bool(options):
opts = '[%s]' % ','.join(options)
self.__documentclass = \
'\\documentclass%s{%s}' % (opts, documentclass)
# Update preamble
self.__update_preamble()
@property
def formatted_bibliographystyle(self):
r"""[Read only] Formatted bibliographystyle, e.g. \bibliographystyle{IEEEtran}
Formatted bibliography string to be written in preamble.
In the case ``bibliographystyle`` is ``SomeBST``,
``formatted_bibliographystyle`` is ``\bibliographystyle{SomeBST}``.
See Also
--------
bibliographystyle : bare bibliographystyle to be used
"""
return self.__formatted_bibliographystyle
@property
def bibliographystyle(self):
r"""Bibliographystyle string.
Bibliography string. If None is set, a .bst is automatically selected.
The ``bibliography`` string is, for example,
``SomeBST`` of ``\bibliographystyle{SomeBST}``.
While the ``formatted_bibliographystyle``
is ``\bibliographystyle{SomeBST}``.
See Also
--------
formatted_bibliographystyle : formatted line to be written in preamble
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.bibliographystyle = 'IEEEtran'
>>> tx.bibliographystyle
'IEEEtran'
>>> tx.formatted_bibliographystyle
'\\bibliographystyle{IEEEtran}'
In the case of None and no .bst file is found, raise ValueError.
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.bibliographystyle = None
Traceback (most recent call last):
...
ValueError: No .bst files found in working directory.
In the case of None and some .bst file is in the working directory,
the .bst file is automatically selected.
>>> import wdbibtex
>>> import pathlib
>>> import shutil
>>> shutil.rmtree('.tmp', ignore_errors=True)
>>> tx = wdbibtex.LaTeX(workdir='.tmp')
>>> pathlib.Path('.tmp/testbst.bst').touch()
>>> tx.bibliographystyle = None
>>> tx.bibliographystyle
'testbst'
>>> tx.formatted_bibliographystyle
'\\bibliographystyle{testbst}'
Raises
------
ValueError
If bst is None and there is no or multiple .bst files in cwd.
"""
return self.__bibliographystyle
@bibliographystyle.setter
def bibliographystyle(self, bibliographystyle):
import glob
if bibliographystyle:
self.set_bibliographystyle(bibliographystyle)
else:
bibliographystyle = glob.glob(str(self.workdir) + '/*.bst')
if len(bibliographystyle) > 1:
raise ValueError(
'More than two .bst files found in working directory.'
)
elif len(bibliographystyle) == 0:
raise ValueError(
'No .bst files found in working directory.'
)
else:
bstfile = os.path.basename(bibliographystyle[0])
bibliographystyle = os.path.splitext(bstfile)[0]
self.set_bibliographystyle(bibliographystyle)
def set_bibliographystyle(self, bst):
"""Bibliographystyle setter.
Parameters
----------
bst : str
Bibliography style such as IEEEtran or ieeetr.
"""
if re.search(r'[^a-zA-Z]', bst):
raise ValueError(
'Invalid bibliographystyle. Only plain alphabets are allowed.'
)
else:
self.__bibliographystyle = bst
self.__formatted_bibliographystyle = \
'\\bibliographystyle{%s}' % bst
# Update preamble
self.__update_preamble()
@property
def packages(self):
r"""Returns used LaTeX packages.
Returns
-------
str
Multi-line LaTeX \\usepackage[options]{package} string.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.add_package('cite')
>>> print(tx.packages)
\usepackage{cite}
>>> tx.add_package('graphicx', 'dvipdfmx')
>>> print(tx.packages)
\usepackage{cite}
\usepackage[dvipdfmx]{graphicx}
"""
return self.__packages
def __update_packages(self):
pkgs = []
is_cite_package_found = False
for pkg, *opts in self.__package_list:
if bool(opts):
pkgs.append('\\usepackage[%s]{%s}' % (','.join(opts), pkg))
else:
pkgs.append('\\usepackage{%s}' % pkg)
if pkg == 'cite':
is_cite_package_found = True
self.__packages = '\n'.join(pkgs)
self._use_cite_package = is_cite_package_found
def add_package(self, package, *options):
"""Add a package to the package list
Add a package to the package list of package_list.
The package can have option.
The package will used in the preamble attribute.
Parameters
----------
package : str
Package name.
*options
Options of the package.
"""
# Overwrite duplicated package
for i, (p, *o) in enumerate(self.__package_list):
if p == package:
self.__package_list.pop(i)
break
self.__package_list.append(
[package, *options]
)
# Update package string.
self.__update_packages()
# Update preamble
self.__update_preamble()
def is_package_used(self, p):
r"""Returns if the package is used.
Returns False if the package is not used
while True if the package is used without option.
If the package is used with option(s), returns List of option(s).
Parameters
----------
p : str
Package name to find.
Returns
-------
bool or list
False if the package is not used.
True if the package is used without option.
List of option(s) if the package is used with option(s).
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.add_package('cite')
>>> tx.is_package_used('cite')
True
>>> tx.add_package('graphicx', 'dvipdfmx')
>>> tx.is_package_used('graphicx')
['dvipdfmx']
>>> tx.is_package_used('xcolor')
False
>>> print(tx.packages)
\usepackage{cite}
\usepackage[dvipdfmx]{graphicx}
"""
for package in self.__package_list:
if package[0] == p:
if len(package) == 1:
return True
else:
return package[1:]
else:
return False
def write(self, c, bib=None):
r"""Write .tex file.
Write minimal .tex file into workdir.
TeX file contains only citation contents,
pre-defined (at constructor of LaTeX object) preamble,
\\bibliography, and \\bibliographystyle.
Parameters
----------
c : str
String data to be written in .tex file.
bib : str or None, default None
Bibliography library file(s). If None, use all .bib files in cwd.
"""
import glob
if bib is None:
# Use only root name (file name without extension).
bib = ''.join(
[os.path.splitext(b)[0] for b in glob.glob('*.bib')]
)
fn = self.workdir / (self.__targetbasename + '.tex')
with codecs.open(fn, 'w', 'utf-8') as f:
f.writelines(
'\n'.join([
self.preamble,
'\\begin{document}',
c,
'\\bibliography{%s}' % bib,
'\\end{document}',
'',
])
)
self._parse_context(c)
def build(self):
"""Build LaTeX related files.
Build LaTeX files in old-style four steps (without PDF generation).
1. latex: to generate .aux from .tex
2. bibtex: to generate .bbl and update .aux from .aux and .bst.
3. latex: to update .aux.
4. latex: to complete .aux.
Firstly the current directory is switched to the working directory.
Secondly the above four steps are invoked.
Thirdly read .bbl and .aux files are parsed.
Finally, the current directory is switched
to the original working directory.
"""
import subprocess
cwd = os.getcwd() # Save original working directory.
os.chdir(self.workdir)
latexcmd = ' '.join(filter(None, [
self.__texcmd,
self.__texopts,
self.__targetbasename + '.tex'
]))
bibtexcmd = ' '.join(filter(None, [
self.__bibtexcmd,
self.__bibtexopts,
self.__targetbasename,
]))
# Four steps to complete build LaTeX project.
subprocess.call(latexcmd, shell=True)
subprocess.call(bibtexcmd, shell=True)
subprocess.call(latexcmd, shell=True)
subprocess.call(latexcmd, shell=True)
os.chdir(cwd) # Back to original working directory.
@property
def preamble(self):
r"""Returns latex preamble text.
A text to be used as LaTeX preamble. Note that not all latex-compatible
preamble is used in WdBibTeX package. LaTeX class accepts None
for preamble attribute. In this case, the following default preamble
text is used according to system locale.
Note BST is replaced a bibliography style file
placed in the project directory.
.. code-block:: text
\documentclass[latex]{article}
\bibliographystyle{BST}
.. code-block:: text
\documentclass[uplatex]{jsarticle}
\bibliographystyle{BST}
Returns
-------
str
Preamble text.
"""
return self.__preamble
@preamble.setter
def preamble(self, s):
if s is None:
if self.__locale == 'en':
self.set_documentclass('article')
elif self.__locale == 'ja':
self.set_documentclass('jsarticle', 'uplatex')
elif isinstance(s, str):
self.__parse_preamble(s)
else:
raise ValueError(
'Invalid preamble. '
'Only None or str is allowed.'
)
def __update_preamble(self):
contents = [
self.documentclass,
self.packages,
self.formatted_bibliographystyle,
]
self.__preamble = '\n'.join(
[c for c in contents if c is not None]
)
def __parse_preamble(self, preamble):
detect_documentclass = False
for ln in preamble.split('\n'):
if ln.startswith('%') and not detect_documentclass:
pass
elif re.match(r'.*documentclass.*', ln):
detect_documentclass = True
m = re.match(r'.*documentclass(\[(.*)\])*\{(.*)\}', ln)
documentclass_opt = []
if m.group(1) is not None:
documentclass_opt = m.group(2).replace(' ', '').split(',')
documentclsass = m.group(3)
self.set_documentclass(documentclsass, *documentclass_opt)
elif re.match(r'.*usepackage.*', ln):
m = re.match(r'.*usepackage(\[(.*)\])*\{(.*)\}', ln)
package_opt = []
if m.group(1) is not None:
package_opt = m.group(2).replace(' ', '').split(',')
package = m.group(3)
self.add_package(package, *package_opt)
elif re.match(r'.*bibliographystyle.*', ln):
m = re.match(r'.*bibliographystyle\{(.*)\}', ln)
bibliographystyle = m.group(1)
self.set_bibliographystyle(bibliographystyle)
elif re.match(r'.*renewcommand\\citeleft.*', ln):
m = re.match(r'.*renewcommand\\citeleft\{(.*)\}', ln)
self.citeleft = m.group(1)
elif re.match(r'.*renewcommand\\citeright.*', ln):
m = re.match(r'.*renewcommand\\citeright\{(.*)\}', ln)
self.citeright = m.group(1)
else:
pass
@property
def locale(self):
"""Returns system locale
Locale string to decide which latex commands used.
Currently english(en) and japanese(ja) are supported.
If locale is manually set, returns the local as is.
Else, determined using locale.getlocale().
Returns
-------
str
Locale text in two characters for example 'en' or 'ja'.
"""
return self.__locale
@locale.setter
def locale(self, s):
if isinstance(s, str) and len(s) == 2:
self.__locale = s
else:
raise ValueError(
'Invalid locale string. '
'Only 2-characters string is allowed.'
)
def __default_locale(self):
loca, locb = locale.getlocale()
if 'en' in loca or 'en' in locb:
return 'en'
elif 'English' in loca or 'English' in locb:
return 'en'
elif 'ja' in loca or 'ja' in locb:
return 'ja'
elif 'Japanese' in loca or 'Japanese' in locb:
return 'ja'
else:
raise ValueError('Unhandled locale %s' % locale.getlocale()) | wdbibtex/latex.py | import codecs
import locale
import pathlib
import os
import re
class Cite:
"""Citation package emurating contents and commands.
Parameters
----------
citeleft : str, default '['
Left delimiter of list.
citeright : str, default ']'
Right delimiter of list.
use_cite_package : bool, default False
If False, emulate LaTeX's use_cite_package citation handling.
If True, emulate cite package's behavior.
"""
def __init__(
self,
citeleft='[',
citeright=']',
targetbasename='wdbib',
use_cite_package=False,
workdir='.tmp',
):
"""Costructor of Cite.
"""
# Store settings in internal attributes.
if os.path.isabs(workdir):
self.workdir = pathlib.Path(workdir)
else:
self.workdir = (
pathlib.Path(os.getcwd()) / workdir
).resolve()
self._targetbasename = targetbasename
self._replacer = None
self._citation = []
self._bibstyle = None
self._bibdata = None
self._bibcite = {}
self._conversion_dict = {}
self._citation_labels = dict()
self._citeleft = citeleft
self._citeright = citeright
self._use_cite_package = use_cite_package
self._citation_keys_in_context = []
@property
def citeleft(self):
r"""Left delimiter of list. Default '['.
Returns
-------
str
Left delimiter of list.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.citeleft
'['
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1]'
>>> tx.citeleft = '('
>>> tx.citeleft
'('
>>> tx.cite('\\cite{key1}')
'(1]'
>>> tx.cite('\\cite{key2,key3}')
'(2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'(3,2,1]'
"""
return self._citeleft
@citeleft.setter
def citeleft(self, s):
if not isinstance(s, str):
TypeError(
'expected string object but '
'%s object given.' % type(s))
self._citeleft = s
@property
def citeright(self):
r"""Right delimiter of list. Default ']'.
Returns
-------
str
Right delimiter of list.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.citeright
']'
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1]'
>>> tx.citeright = ')'
>>> tx.citeright
')'
>>> tx.cite('\\cite{key1}')
'[1)'
>>> tx.cite('\\cite{key2,key3}')
'[2,3)'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1)'
"""
return self._citeright
@citeright.setter
def citeright(self, s):
if not isinstance(s, str):
TypeError(
'expected string object but '
'%s object given.' % type(s))
self._citeright = s
@property
def citation_labels(self):
"""Key to number map of citations.
Returns
-------
dict
Citation key to citation number map.
"""
return self._citation_labels
@citation_labels.setter
def citation_labels(self, d):
if not isinstance(d, str):
TypeError(
'expected dictionary object but '
'%s object given.' % type(d))
self._citation_labels = d
def _parse_context(self, c):
r"""Find all citation keys from context written to .tex file.
Find all citation keys from context written to .tex file.
Found keys are stores to citation_keys_in_context attribute.
Parameters
----------
c : str
Parsed texts.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx._parse_context(
... 'Some citation \\cite{key}. Some example \\cite{key1,key2}'
... )
>>> tx._citation_keys_in_context
['key', 'key1,key2']
"""
found_keys = re.findall(r'\\+cite\{(.*?)\}', c)
for k in found_keys:
self._citation_keys_in_context.append(k)
def read_aux(self):
r"""Read .aux file.
Aux file will be read line-by-line.
Following four types of the line will be
interpreted and stored to the LaTeX attributes.
- \\citation{keys}
Appended to the citation attribute
(list object) key as string.
- \\bibstyle{s}
Stored as bibstyle string attribute.
- \\bibdata{d}
Stored as bibdata string attribute.
- \\bibcite{k}{n}
Added to bibcite attribute
(dictionary) as {k: n}.
"""
fn = self.workdir / (self._targetbasename + '.aux')
with codecs.open(fn, 'r', 'utf-8') as f:
self._auxdata = f.readlines()
for line in self._auxdata:
self._parse_line(line)
self._build_conversion_dict()
self._citation_labels.update(self._bibcite)
self._get_replacer()
def _parse_line(self, line):
r"""Parse one line of .aux
Parameters
----------
line : str
One line of .aux file to parse.
"""
if line.startswith('\\citation'):
self._citation.append(line[len('\\citation{'): -len('}\n')])
elif line.startswith('\\bibstyle'):
self._bibstyle = line[len('\\bibstyle{'): -len('}\n')]
elif line.startswith('\\bibdata'):
self._bibdata = line[len('\\bibdata{'): -len('}\n')]
elif line.startswith('\\bibcite'):
key, value = line[len('\\bibcite{'): -len('}\n')].split('}{')
value = int(value)
self._bibcite.update({key: value})
def _get_replacer(self):
"""Get key and value for replace word document.
"""
replacer = dict()
for k, v in self._conversion_dict.items():
replacer.update({'\\\\cite\\{%s\\}' % k: '[%s]' % v})
self._replacer = replacer
def _build_conversion_dict(self):
r"""Prepare replaing citation keys with dashed range strings.
Generate dictionary of such as {'refa,refb,refc,refe,refg': '1-3,5,7'}.
"""
for cite in self._citation:
cite_nums = [self._bibcite[c] for c in cite.split(',')]
self._conversion_dict.update(
{cite: self._compress(cite_nums)}
)
for cite in self._citation_keys_in_context:
cite_nums = [self._bibcite[c] for c in cite.split(',')]
if self._use_cite_package:
self._conversion_dict.update(
{cite: self._compress(sorted(cite_nums))}
)
else:
self._conversion_dict.update(
{cite: ','.join(str(c) for c in cite_nums)}
)
def cite(self, s):
r"""Do \cite command formatting.
Returns formated text from citation commands such as
\cite{key1} and \cite{key1,key2,key3}, etc.
By default, if there are three or more consecutive numbers,
they are compressed into a range using an en-dash.
Citation numbers are also sorted in the default condition.
Parameters
----------
s : str
Raw string to be formatted.
For example, \\cite{key1} or \\cite{key2,key3}.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[3,2,1]'
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.add_package('cite')
>>> tx.citation_labels = {'key1': 1, 'key2': 2, 'key3': 3}
>>> tx.cite('\\cite{key1}')
'[1]'
>>> tx.cite('\\cite{key2,key3}')
'[2,3]'
>>> tx.cite('\\cite{key3,key2,key1}')
'[1\u20133]'
Note \\u2013 is en-dash.
"""
p = re.compile(r'\\+cite\{(.*)\}')
if p.match(s):
keys = p.match(s).group(1).split(',')
if len(keys) == 1:
key = keys[0]
return (
self._citeleft
+ str(self._citation_labels[key])
+ self._citeright
)
if len(keys) > 1:
if self._use_cite_package:
nums = sorted(
[self._citation_labels[key] for key in keys]
)
return (
self._citeleft
+ self._compress(nums)
+ self._citeright
)
else:
nums = [str(self._citation_labels[key]) for key in keys]
return (
self._citeleft
+ ','.join(nums)
+ self._citeright
)
else:
ValueError(
'no citation pattern matched.'
)
def _compress(self, nums, sep=u'\u2013'):
r"""Compress groups of three or more consecutive numbers into a range.
Compress poor list of positive integers with three or more
consecutive numbers into a range using a separating character.
For example, a list ``[1,2,3,6]`` will be converted into ``[1-3,6]``.
Parameters
----------
nums : list of positive integers
Multiple integers to convert dashed range string.
A list of single element integer is also allowd.
sep : str, default en-dash(U+2013)
A character inserted betwen start and end of range.
"""
seq = []
final = []
last = 0
for index, val in enumerate(nums):
if last + 1 == val or index == 0:
seq.append(val)
last = val
else:
if len(seq) > 2:
final.append(str(seq[0]) + sep + str(seq[len(seq)-1]))
elif len(seq) == 2:
final.append(str(seq[0]) + ',' + str(seq[len(seq)-1]))
else:
final.append(str(seq[0]))
seq = []
seq.append(val)
last = val
if index == len(nums) - 1:
if len(seq) > 2:
final.append(str(seq[0]) + sep + str(seq[len(seq)-1]))
elif len(seq) == 2:
final.append(str(seq[0]) + ',' + str(seq[len(seq)-1]))
else:
final.append(str(seq[0]))
final_str = ','.join(map(str, final))
return final_str
class Bibliography:
"""LaTeX bbl file related contents and commands.
Parameters
----------
targetbasename : str, default 'wdbib'
Base name of LaTeX related files.
workdir : str or path object, default '.tmp'
Temporal working directory to store LaTeX contents.
Examples
--------
>>> import wdbibtex
>>> bb = wdbibtex.Bibliography()
>>> bb.read_bbl() # doctest: +SKIP
"""
def __init__(
self,
targetbasename='wdbib',
workdir='.tmp',
):
"""Cunstructor of Bibliography
"""
# Store settings in internal attributes.
if os.path.isabs(workdir):
self.workdir = pathlib.Path(workdir)
else:
self.workdir = (
pathlib.Path(os.getcwd()) / workdir
).resolve()
self._targetbasename = targetbasename
@property
def thebibliography(self):
r"""Plain text to replace \\thebibliography in word file.
A plain text of LaTeX-processed bibliography list.
An tab string is inserted between each citenum and citation string.
Example in IEEE format follows:
.. code-block:: text
[1]\\tF. Author, S. Author, "Paper Title," Journal Name, vol. 1, no. 1, p. 1, march 2022.
[2]\\tG. Name, F. Name, "Title," Journal, vol. 2, no. 2, pp. 1-10, 2020.
Returns
-------
str
Plain text of the thebibliography.
Raises
------
ValueError
If thebibliography text is not set.
""" # noqa E501
if self._thebibtext is None:
raise ValueError(
'Thebibliography text is not set yet.'
)
return self._thebibtext
def read_bbl(self):
"""Read .bbl file.
Read .bbl file to extract formatted thebibliography text.
Examples
--------
>>> import wdbibtex
>>> bb = wdbibtex.Bibliography()
>>> bb.read_bbl() # doctest: +SKIP
"""
fn = self.workdir / (self._targetbasename + '.bbl')
with codecs.open(fn, 'r', 'utf-8') as f:
self._bbldata = f.readlines()
self._make_thebibliography_text()
def _make_thebibliography_text(self):
"""Generate thebibliography plain text to incert word file.
"""
replacer = {}
replacer.update({
r'\n ': ' ',
r'\{\\em (.*?)\}': r'\1',
r'\\emph\{(?!\\)(.*?)\}': r'\1',
r'\\BIBforeignlanguage\{(.*?)\}\{(.*?)\}': r'\2',
r'\\BIBforeignlanguage\{(.*?)\{(.*?)\}\}': r'\2',
r'~': ' ',
r'--': u'\u2014',
r'``': '“',
r"''": '”',
r'\n\n': '\n',
r'\\BIBentryALTinterwordspacing\n': '',
r'\\BIBentrySTDinterwordspacing\n': '',
r'\\url\{(.*?)\}': r'\1',
})
thebib_begin = None
for i, line in enumerate(self._bbldata):
if line.startswith('\\bibitem') and thebib_begin is None:
thebib_begin = i
if line.startswith('\\end{thebibliography}'):
thebib_end = i
thebibtext = ''.join(self._bbldata[thebib_begin: thebib_end])
# Replace thebibliography text
found = True
while found:
found = False
for k, v in replacer.items():
thebibold = thebibtext
thebibtext = re.sub(k, v, thebibtext)
if thebibold != thebibtext:
found = True
for c, m in enumerate(re.findall('\\\\bibitem{(.*)}\n', thebibtext)):
thebibtext = re.sub(
'\\\\bibitem{%s}\n' % m, '[%s]\t' % (c+1), thebibtext
)
self._thebibtext = thebibtext
class LaTeX(Cite, Bibliography):
"""LaTeX related contents and commands.
Run LaTeX and BibTeX commands. Write .tex files.
Read and parse .aux and .bbl files.
Prepare conversion LaTeX keys in Word file into BibTeX processed texts.
Parameters
----------
bibtexcmd : str or None, default None
BibTeX command.
If None, automatically selected accorting to system locale.
bibtexopts : str or None, default None
BibTeX command options.
If None, automatically selected according to system locale.
preamble : str or None, default None
Preamble of .tex file.
If None, automatically selected.
targetbasename : str, default 'wdbib'
Base name of LaTeX related files.
texcmd : str or None, default None
LaTeX command.
If None, automatically selected according to system locale.
texopts : str or None, default None
LaTeX command options.
If None, automatically selected accorgin to system locale.
workdir : str or path object, default '.tmp'
Temporal working directory to store LaTeX contents.
"""
def __init__(
self,
bibtexcmd=None,
bibtexopts=None,
preamble=None,
targetbasename='wdbib',
texcmd=None,
texopts=None,
workdir='.tmp',
):
super(LaTeX, self).__init__()
self.__locale = self.__default_locale()
# Set automatically selected values
if texcmd is None:
if self.__locale == 'en':
texcmd = 'latex'
elif self.__locale == 'ja':
texcmd = 'uplatex'
if texopts is None:
texopts = '-interaction=nonstopmode -file-line-error'
if bibtexcmd is None:
if self.__locale == 'en':
bibtexcmd = 'bibtex'
elif self.__locale == 'ja':
bibtexcmd = 'upbibtex'
if bibtexopts is None:
bibtexopts = ''
# Store settings in internal attributes.
if os.path.isabs(workdir):
self.workdir = pathlib.Path(workdir)
else:
self.workdir = (
pathlib.Path(os.getcwd()) / workdir
).resolve()
self.__targetbasename = targetbasename
self.__texcmd = texcmd
self.__texopts = texopts
self.__bibtexcmd = bibtexcmd
self.__bibtexopts = bibtexopts
self.__packages = None
self.__bibliographystyle = None
self.__formatted_bibliographystyle = None
self.__documentclass = None
self.__package_list = []
self.preamble = preamble
# Makedir working directory if not exist.
self.workdir.mkdir(exist_ok=True)
@property
def documentclass(self):
"""LaTeX documentclass string."""
return self.__documentclass
@documentclass.setter
def documentclass(self, documentclass):
if not documentclass.startswith('\\'):
raise ValueError(
'Invalid documentclass.'
)
self.__documentclass = documentclass
# Update preamble
self.__update_preamble()
def set_documentclass(self, documentclass, *options):
"""Documentclass setter.
Parameters
----------
documentclass
Documentclass
*options
Documentclass options.
"""
if documentclass.startswith('\\'):
self.__documentclass = documentclass
else:
if bool(options):
opts = '[%s]' % ','.join(options)
self.__documentclass = \
'\\documentclass%s{%s}' % (opts, documentclass)
# Update preamble
self.__update_preamble()
@property
def formatted_bibliographystyle(self):
r"""[Read only] Formatted bibliographystyle, e.g. \bibliographystyle{IEEEtran}
Formatted bibliography string to be written in preamble.
In the case ``bibliographystyle`` is ``SomeBST``,
``formatted_bibliographystyle`` is ``\bibliographystyle{SomeBST}``.
See Also
--------
bibliographystyle : bare bibliographystyle to be used
"""
return self.__formatted_bibliographystyle
@property
def bibliographystyle(self):
r"""Bibliographystyle string.
Bibliography string. If None is set, a .bst is automatically selected.
The ``bibliography`` string is, for example,
``SomeBST`` of ``\bibliographystyle{SomeBST}``.
While the ``formatted_bibliographystyle``
is ``\bibliographystyle{SomeBST}``.
See Also
--------
formatted_bibliographystyle : formatted line to be written in preamble
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.bibliographystyle = 'IEEEtran'
>>> tx.bibliographystyle
'IEEEtran'
>>> tx.formatted_bibliographystyle
'\\bibliographystyle{IEEEtran}'
In the case of None and no .bst file is found, raise ValueError.
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.bibliographystyle = None
Traceback (most recent call last):
...
ValueError: No .bst files found in working directory.
In the case of None and some .bst file is in the working directory,
the .bst file is automatically selected.
>>> import wdbibtex
>>> import pathlib
>>> import shutil
>>> shutil.rmtree('.tmp', ignore_errors=True)
>>> tx = wdbibtex.LaTeX(workdir='.tmp')
>>> pathlib.Path('.tmp/testbst.bst').touch()
>>> tx.bibliographystyle = None
>>> tx.bibliographystyle
'testbst'
>>> tx.formatted_bibliographystyle
'\\bibliographystyle{testbst}'
Raises
------
ValueError
If bst is None and there is no or multiple .bst files in cwd.
"""
return self.__bibliographystyle
@bibliographystyle.setter
def bibliographystyle(self, bibliographystyle):
import glob
if bibliographystyle:
self.set_bibliographystyle(bibliographystyle)
else:
bibliographystyle = glob.glob(str(self.workdir) + '/*.bst')
if len(bibliographystyle) > 1:
raise ValueError(
'More than two .bst files found in working directory.'
)
elif len(bibliographystyle) == 0:
raise ValueError(
'No .bst files found in working directory.'
)
else:
bstfile = os.path.basename(bibliographystyle[0])
bibliographystyle = os.path.splitext(bstfile)[0]
self.set_bibliographystyle(bibliographystyle)
def set_bibliographystyle(self, bst):
"""Bibliographystyle setter.
Parameters
----------
bst : str
Bibliography style such as IEEEtran or ieeetr.
"""
if re.search(r'[^a-zA-Z]', bst):
raise ValueError(
'Invalid bibliographystyle. Only plain alphabets are allowed.'
)
else:
self.__bibliographystyle = bst
self.__formatted_bibliographystyle = \
'\\bibliographystyle{%s}' % bst
# Update preamble
self.__update_preamble()
@property
def packages(self):
r"""Returns used LaTeX packages.
Returns
-------
str
Multi-line LaTeX \\usepackage[options]{package} string.
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.add_package('cite')
>>> print(tx.packages)
\usepackage{cite}
>>> tx.add_package('graphicx', 'dvipdfmx')
>>> print(tx.packages)
\usepackage{cite}
\usepackage[dvipdfmx]{graphicx}
"""
return self.__packages
def __update_packages(self):
pkgs = []
is_cite_package_found = False
for pkg, *opts in self.__package_list:
if bool(opts):
pkgs.append('\\usepackage[%s]{%s}' % (','.join(opts), pkg))
else:
pkgs.append('\\usepackage{%s}' % pkg)
if pkg == 'cite':
is_cite_package_found = True
self.__packages = '\n'.join(pkgs)
self._use_cite_package = is_cite_package_found
def add_package(self, package, *options):
"""Add a package to the package list
Add a package to the package list of package_list.
The package can have option.
The package will used in the preamble attribute.
Parameters
----------
package : str
Package name.
*options
Options of the package.
"""
# Overwrite duplicated package
for i, (p, *o) in enumerate(self.__package_list):
if p == package:
self.__package_list.pop(i)
break
self.__package_list.append(
[package, *options]
)
# Update package string.
self.__update_packages()
# Update preamble
self.__update_preamble()
def is_package_used(self, p):
r"""Returns if the package is used.
Returns False if the package is not used
while True if the package is used without option.
If the package is used with option(s), returns List of option(s).
Parameters
----------
p : str
Package name to find.
Returns
-------
bool or list
False if the package is not used.
True if the package is used without option.
List of option(s) if the package is used with option(s).
Examples
--------
>>> import wdbibtex
>>> tx = wdbibtex.LaTeX()
>>> tx.add_package('cite')
>>> tx.is_package_used('cite')
True
>>> tx.add_package('graphicx', 'dvipdfmx')
>>> tx.is_package_used('graphicx')
['dvipdfmx']
>>> tx.is_package_used('xcolor')
False
>>> print(tx.packages)
\usepackage{cite}
\usepackage[dvipdfmx]{graphicx}
"""
for package in self.__package_list:
if package[0] == p:
if len(package) == 1:
return True
else:
return package[1:]
else:
return False
def write(self, c, bib=None):
r"""Write .tex file.
Write minimal .tex file into workdir.
TeX file contains only citation contents,
pre-defined (at constructor of LaTeX object) preamble,
\\bibliography, and \\bibliographystyle.
Parameters
----------
c : str
String data to be written in .tex file.
bib : str or None, default None
Bibliography library file(s). If None, use all .bib files in cwd.
"""
import glob
if bib is None:
# Use only root name (file name without extension).
bib = ''.join(
[os.path.splitext(b)[0] for b in glob.glob('*.bib')]
)
fn = self.workdir / (self.__targetbasename + '.tex')
with codecs.open(fn, 'w', 'utf-8') as f:
f.writelines(
'\n'.join([
self.preamble,
'\\begin{document}',
c,
'\\bibliography{%s}' % bib,
'\\end{document}',
'',
])
)
self._parse_context(c)
def build(self):
"""Build LaTeX related files.
Build LaTeX files in old-style four steps (without PDF generation).
1. latex: to generate .aux from .tex
2. bibtex: to generate .bbl and update .aux from .aux and .bst.
3. latex: to update .aux.
4. latex: to complete .aux.
Firstly the current directory is switched to the working directory.
Secondly the above four steps are invoked.
Thirdly read .bbl and .aux files are parsed.
Finally, the current directory is switched
to the original working directory.
"""
import subprocess
cwd = os.getcwd() # Save original working directory.
os.chdir(self.workdir)
latexcmd = ' '.join(filter(None, [
self.__texcmd,
self.__texopts,
self.__targetbasename + '.tex'
]))
bibtexcmd = ' '.join(filter(None, [
self.__bibtexcmd,
self.__bibtexopts,
self.__targetbasename,
]))
# Four steps to complete build LaTeX project.
subprocess.call(latexcmd, shell=True)
subprocess.call(bibtexcmd, shell=True)
subprocess.call(latexcmd, shell=True)
subprocess.call(latexcmd, shell=True)
os.chdir(cwd) # Back to original working directory.
@property
def preamble(self):
r"""Returns latex preamble text.
A text to be used as LaTeX preamble. Note that not all latex-compatible
preamble is used in WdBibTeX package. LaTeX class accepts None
for preamble attribute. In this case, the following default preamble
text is used according to system locale.
Note BST is replaced a bibliography style file
placed in the project directory.
.. code-block:: text
\documentclass[latex]{article}
\bibliographystyle{BST}
.. code-block:: text
\documentclass[uplatex]{jsarticle}
\bibliographystyle{BST}
Returns
-------
str
Preamble text.
"""
return self.__preamble
@preamble.setter
def preamble(self, s):
if s is None:
if self.__locale == 'en':
self.set_documentclass('article')
elif self.__locale == 'ja':
self.set_documentclass('jsarticle', 'uplatex')
elif isinstance(s, str):
self.__parse_preamble(s)
else:
raise ValueError(
'Invalid preamble. '
'Only None or str is allowed.'
)
def __update_preamble(self):
contents = [
self.documentclass,
self.packages,
self.formatted_bibliographystyle,
]
self.__preamble = '\n'.join(
[c for c in contents if c is not None]
)
def __parse_preamble(self, preamble):
detect_documentclass = False
for ln in preamble.split('\n'):
if ln.startswith('%') and not detect_documentclass:
pass
elif re.match(r'.*documentclass.*', ln):
detect_documentclass = True
m = re.match(r'.*documentclass(\[(.*)\])*\{(.*)\}', ln)
documentclass_opt = []
if m.group(1) is not None:
documentclass_opt = m.group(2).replace(' ', '').split(',')
documentclsass = m.group(3)
self.set_documentclass(documentclsass, *documentclass_opt)
elif re.match(r'.*usepackage.*', ln):
m = re.match(r'.*usepackage(\[(.*)\])*\{(.*)\}', ln)
package_opt = []
if m.group(1) is not None:
package_opt = m.group(2).replace(' ', '').split(',')
package = m.group(3)
self.add_package(package, *package_opt)
elif re.match(r'.*bibliographystyle.*', ln):
m = re.match(r'.*bibliographystyle\{(.*)\}', ln)
bibliographystyle = m.group(1)
self.set_bibliographystyle(bibliographystyle)
elif re.match(r'.*renewcommand\\citeleft.*', ln):
m = re.match(r'.*renewcommand\\citeleft\{(.*)\}', ln)
self.citeleft = m.group(1)
elif re.match(r'.*renewcommand\\citeright.*', ln):
m = re.match(r'.*renewcommand\\citeright\{(.*)\}', ln)
self.citeright = m.group(1)
else:
pass
@property
def locale(self):
"""Returns system locale
Locale string to decide which latex commands used.
Currently english(en) and japanese(ja) are supported.
If locale is manually set, returns the local as is.
Else, determined using locale.getlocale().
Returns
-------
str
Locale text in two characters for example 'en' or 'ja'.
"""
return self.__locale
@locale.setter
def locale(self, s):
if isinstance(s, str) and len(s) == 2:
self.__locale = s
else:
raise ValueError(
'Invalid locale string. '
'Only 2-characters string is allowed.'
)
def __default_locale(self):
loca, locb = locale.getlocale()
if 'en' in loca or 'en' in locb:
return 'en'
elif 'English' in loca or 'English' in locb:
return 'en'
elif 'ja' in loca or 'ja' in locb:
return 'ja'
elif 'Japanese' in loca or 'Japanese' in locb:
return 'ja'
else:
raise ValueError('Unhandled locale %s' % locale.getlocale()) | 0.714628 | 0.132767 |
import os
import torch
from settings import constants, arg
from game import card_tools, TerminalEquity
from logs import logger
import numpy as np
import random
from scipy import stats
class TreeMatch():
def __init__(self):
self.match_nums = 1000000
self.terminal_equity_cache = {}
def match(self, root):
my_pos, opp_pos = [constants.players.P1, constants.players.P2]
results = []
for i in range(self.match_nums):
cards = [i for i in range(constants.card_count)]
random.shuffle(cards)
my_card, opp_card, public_card = cards[:3]
result = self.run_match(root, my_pos, opp_pos, my_card, opp_card, public_card)
results.append(result)
if (i + 1) % 1000 == 0:
self.save_result(results)
results = []
my_pos, opp_pos = opp_pos, my_pos
def match_using_AIVAT(self, root):
random.seed(0)
my_pos, opp_pos = [constants.players.P1, constants.players.P2]
aivat_results, direct_results = [], []
for i in range(self.match_nums):
cards = [i for i in range(constants.card_count)]
random.shuffle(cards)
my_card, opp_card, public_card = cards[:3]
aivat_result, direct_result = self.run_match_using_AIVAT(root, my_pos, opp_pos, my_card, opp_card, public_card)
aivat_results.append(aivat_result)
direct_results.append(direct_result)
if (i + 1) % 1000 == 0:
self.save_aivat_result(aivat_results, direct_results)
aivat_results, direct_results = [], []
my_pos, opp_pos = opp_pos, my_pos
def save_aivat_result(self, aivat_results, direct_results):
path = "./data/result/"
name1 = str(arg.cfr_iters) + "_vs_" + str(arg.cfr_iters) + "_aivat" + ".npy"
name2 = str(arg.cfr_iters) + "_vs_" + str(arg.cfr_iters) + "_direct" + ".npy"
filename1 = path + name1
filename2 = path + name2
if os.path.exists(filename1):
pre_aivat_results = np.load(filename1)
else:
pre_aivat_results = np.array([])
if os.path.exists(filename2):
pre_direct_results = np.load(filename2)
else:
pre_direct_results = np.array([])
aivat_total = np.append(pre_aivat_results, aivat_results)
direct_total = np.append(pre_direct_results, direct_results)
np.save(filename1, aivat_total)
np.save(filename2, direct_total)
mean, sigma = np.mean(aivat_total), np.std(aivat_total)
conf_int = stats.norm.interval(0.95, loc=mean, scale=sigma / np.sqrt(len(aivat_total)))
dis = conf_int[1] - mean
logger.debug("match = {}, aivat_result = {:.6f} ± {:.6f}, std = {:.6f}", len(aivat_total), mean, dis, sigma)
mean, sigma = np.mean(direct_total), np.std(direct_total)
conf_int = stats.norm.interval(0.95, loc=mean, scale=sigma / np.sqrt(len(direct_total)))
dis = conf_int[1] - mean
logger.debug("match = {}, direct_result = {:.6f} ± {:.6f}, std = {:.6f}", len(direct_total), mean, dis, sigma)
def save_result(self, results):
path = "./data/result/"
name = str(arg.cfr_iters) + "_vs_" + str(arg.cfr_iters) + ".npy"
filename = path + name
if os.path.exists(filename):
pre_results = np.load(filename)
else:
pre_results = np.array([])
total = np.append(pre_results, results)
np.save(filename, total)
mean, sigma = np.mean(total), np.std(total)
conf_int = stats.norm.interval(0.95, loc=mean, scale=sigma / np.sqrt(len(total)))
dis = conf_int[1] - mean
logger.debug("match = {}, result = {:.6f} ± {:.6f}", len(total), mean, dis)
def run_match(self, node, my_pos, opp_pos, my_card, opp_card, public_card):
while not node.terminal:
if node.current_player == my_pos:
strategy = node.strategy[:, my_card]
action = self.choose_action(strategy)
node = node.children[action]
elif node.current_player == opp_pos:
strategy = node.strategy[:, opp_card]
action = self.choose_action(strategy)
node = node.children[action]
else:
for child in node.children:
if child.board[0].item() == public_card:
node = child
break
result = self.compute_utility(node, my_pos, opp_pos, my_card, opp_card, public_card)
return result
def compute_utility(self, node, my_pos, opp_pos, my_card, opp_card, public_card):
if node.node_type == constants.node_types.terminal_fold:
if node.current_player == my_pos:
result = node.pot
else:
result = -node.pot
elif node.node_type == constants.node_types.terminal_call:
strength = card_tools.get_hand_strength(node.board)
if strength[my_card] > strength[opp_card]:
result = node.pot
elif strength[my_card] < strength[opp_card]:
result = -node.pot
else:
result = 0
return result
def choose_action(self, strategy):
prop = random.random()
cnt = 0
for i, s in enumerate(strategy):
cnt += s
if prop < cnt:
return i
return len(strategy) - 1
def compute_correction_item(self, node, action):
range_children = torch.zeros_like(node.estimate_value)
for i, child in enumerate(node.children):
range_children[i] = child.range[node.current_player]
correction_item = torch.sum(node.estimate_value * range_children / torch.sum(range_children))
correction_item -= torch.sum(node.estimate_value[action, :] * range_children[action, :] /
torch.sum(range_children[action, :]))
return correction_item
def run_match_using_AIVAT(self, node, my_pos, opp_pos, my_card, opp_card, public_card):
reach_prop = card_tools.get_uniform_range(node.board)
correction_items = 0
while not node.terminal:
if node.current_player == my_pos:
strategy = node.strategy[:, my_card]
action = self.choose_action(strategy)
reach_prop *= node.strategy[action, :]
correction_items += self.compute_correction_item(node, action)
node = node.children[action]
elif node.current_player == opp_pos:
strategy = node.strategy[:, opp_card]
action = self.choose_action(strategy)
node = node.children[action]
else:
for i, child in enumerate(node.children):
if child.board[0].item() == public_card:
reach_prop *= node.strategy[i, :]
correction_items += self.compute_correction_item(node, i)
node = child
break
terminal_equity = self.get_terminal_equity(node)
if node.node_type == constants.node_types.terminal_call:
equity_matrix = terminal_equity.call_matrix
elif node.node_type == constants.node_types.terminal_fold:
equity_matrix = terminal_equity.fold_matrix
# 减去对手的手牌概率,以消除双方手牌的冲突
base_value = torch.sum(equity_matrix[:, opp_card] * reach_prop[:] / (torch.sum(reach_prop[:]) - reach_prop[opp_card])) * node.pot
if node.node_type == constants.node_types.terminal_fold and node.current_player == opp_pos:
base_value = -base_value
result = base_value + correction_items
result2 = self.compute_utility(node, my_pos, opp_pos, my_card, opp_card, public_card)
return result, result2
def get_terminal_equity(self, node):
if node.board not in self.terminal_equity_cache:
self.terminal_equity_cache[node.board] = TerminalEquity()
self.terminal_equity_cache[node.board].set_board(node.board)
return self.terminal_equity_cache[node.board] | src/tree/tree_match.py | import os
import torch
from settings import constants, arg
from game import card_tools, TerminalEquity
from logs import logger
import numpy as np
import random
from scipy import stats
class TreeMatch():
def __init__(self):
self.match_nums = 1000000
self.terminal_equity_cache = {}
def match(self, root):
my_pos, opp_pos = [constants.players.P1, constants.players.P2]
results = []
for i in range(self.match_nums):
cards = [i for i in range(constants.card_count)]
random.shuffle(cards)
my_card, opp_card, public_card = cards[:3]
result = self.run_match(root, my_pos, opp_pos, my_card, opp_card, public_card)
results.append(result)
if (i + 1) % 1000 == 0:
self.save_result(results)
results = []
my_pos, opp_pos = opp_pos, my_pos
def match_using_AIVAT(self, root):
random.seed(0)
my_pos, opp_pos = [constants.players.P1, constants.players.P2]
aivat_results, direct_results = [], []
for i in range(self.match_nums):
cards = [i for i in range(constants.card_count)]
random.shuffle(cards)
my_card, opp_card, public_card = cards[:3]
aivat_result, direct_result = self.run_match_using_AIVAT(root, my_pos, opp_pos, my_card, opp_card, public_card)
aivat_results.append(aivat_result)
direct_results.append(direct_result)
if (i + 1) % 1000 == 0:
self.save_aivat_result(aivat_results, direct_results)
aivat_results, direct_results = [], []
my_pos, opp_pos = opp_pos, my_pos
def save_aivat_result(self, aivat_results, direct_results):
path = "./data/result/"
name1 = str(arg.cfr_iters) + "_vs_" + str(arg.cfr_iters) + "_aivat" + ".npy"
name2 = str(arg.cfr_iters) + "_vs_" + str(arg.cfr_iters) + "_direct" + ".npy"
filename1 = path + name1
filename2 = path + name2
if os.path.exists(filename1):
pre_aivat_results = np.load(filename1)
else:
pre_aivat_results = np.array([])
if os.path.exists(filename2):
pre_direct_results = np.load(filename2)
else:
pre_direct_results = np.array([])
aivat_total = np.append(pre_aivat_results, aivat_results)
direct_total = np.append(pre_direct_results, direct_results)
np.save(filename1, aivat_total)
np.save(filename2, direct_total)
mean, sigma = np.mean(aivat_total), np.std(aivat_total)
conf_int = stats.norm.interval(0.95, loc=mean, scale=sigma / np.sqrt(len(aivat_total)))
dis = conf_int[1] - mean
logger.debug("match = {}, aivat_result = {:.6f} ± {:.6f}, std = {:.6f}", len(aivat_total), mean, dis, sigma)
mean, sigma = np.mean(direct_total), np.std(direct_total)
conf_int = stats.norm.interval(0.95, loc=mean, scale=sigma / np.sqrt(len(direct_total)))
dis = conf_int[1] - mean
logger.debug("match = {}, direct_result = {:.6f} ± {:.6f}, std = {:.6f}", len(direct_total), mean, dis, sigma)
def save_result(self, results):
path = "./data/result/"
name = str(arg.cfr_iters) + "_vs_" + str(arg.cfr_iters) + ".npy"
filename = path + name
if os.path.exists(filename):
pre_results = np.load(filename)
else:
pre_results = np.array([])
total = np.append(pre_results, results)
np.save(filename, total)
mean, sigma = np.mean(total), np.std(total)
conf_int = stats.norm.interval(0.95, loc=mean, scale=sigma / np.sqrt(len(total)))
dis = conf_int[1] - mean
logger.debug("match = {}, result = {:.6f} ± {:.6f}", len(total), mean, dis)
def run_match(self, node, my_pos, opp_pos, my_card, opp_card, public_card):
while not node.terminal:
if node.current_player == my_pos:
strategy = node.strategy[:, my_card]
action = self.choose_action(strategy)
node = node.children[action]
elif node.current_player == opp_pos:
strategy = node.strategy[:, opp_card]
action = self.choose_action(strategy)
node = node.children[action]
else:
for child in node.children:
if child.board[0].item() == public_card:
node = child
break
result = self.compute_utility(node, my_pos, opp_pos, my_card, opp_card, public_card)
return result
def compute_utility(self, node, my_pos, opp_pos, my_card, opp_card, public_card):
if node.node_type == constants.node_types.terminal_fold:
if node.current_player == my_pos:
result = node.pot
else:
result = -node.pot
elif node.node_type == constants.node_types.terminal_call:
strength = card_tools.get_hand_strength(node.board)
if strength[my_card] > strength[opp_card]:
result = node.pot
elif strength[my_card] < strength[opp_card]:
result = -node.pot
else:
result = 0
return result
def choose_action(self, strategy):
prop = random.random()
cnt = 0
for i, s in enumerate(strategy):
cnt += s
if prop < cnt:
return i
return len(strategy) - 1
def compute_correction_item(self, node, action):
range_children = torch.zeros_like(node.estimate_value)
for i, child in enumerate(node.children):
range_children[i] = child.range[node.current_player]
correction_item = torch.sum(node.estimate_value * range_children / torch.sum(range_children))
correction_item -= torch.sum(node.estimate_value[action, :] * range_children[action, :] /
torch.sum(range_children[action, :]))
return correction_item
def run_match_using_AIVAT(self, node, my_pos, opp_pos, my_card, opp_card, public_card):
reach_prop = card_tools.get_uniform_range(node.board)
correction_items = 0
while not node.terminal:
if node.current_player == my_pos:
strategy = node.strategy[:, my_card]
action = self.choose_action(strategy)
reach_prop *= node.strategy[action, :]
correction_items += self.compute_correction_item(node, action)
node = node.children[action]
elif node.current_player == opp_pos:
strategy = node.strategy[:, opp_card]
action = self.choose_action(strategy)
node = node.children[action]
else:
for i, child in enumerate(node.children):
if child.board[0].item() == public_card:
reach_prop *= node.strategy[i, :]
correction_items += self.compute_correction_item(node, i)
node = child
break
terminal_equity = self.get_terminal_equity(node)
if node.node_type == constants.node_types.terminal_call:
equity_matrix = terminal_equity.call_matrix
elif node.node_type == constants.node_types.terminal_fold:
equity_matrix = terminal_equity.fold_matrix
# 减去对手的手牌概率,以消除双方手牌的冲突
base_value = torch.sum(equity_matrix[:, opp_card] * reach_prop[:] / (torch.sum(reach_prop[:]) - reach_prop[opp_card])) * node.pot
if node.node_type == constants.node_types.terminal_fold and node.current_player == opp_pos:
base_value = -base_value
result = base_value + correction_items
result2 = self.compute_utility(node, my_pos, opp_pos, my_card, opp_card, public_card)
return result, result2
def get_terminal_equity(self, node):
if node.board not in self.terminal_equity_cache:
self.terminal_equity_cache[node.board] = TerminalEquity()
self.terminal_equity_cache[node.board].set_board(node.board)
return self.terminal_equity_cache[node.board] | 0.325413 | 0.278609 |
import os
import hashlib
from termcolor import colored
import pickle
from . import init as user_init
from . import utility as user_utility
from . import drive as user_drive
class Folder:
def __init__(self, name, root, parent_id, folder_id):
self.name = name
self.root = root
self.parent_id = parent_id
self.folder_id = folder_id
def __str__(self):
return f"Folder data : {self.name} : {self.root} : {self.parent_id} : {self.folder_id}"
class File:
def __init__(self, name, root, parent_id, file_id, file_hash):
self.name = name
self.root = root
self.parent_id = parent_id
self.file_id = file_id
self.hash = file_hash
def __str__(self):
return f"File data : {self.name} : {self.root} : {self.parent_id} : {self.file_id} : {self.hash}"
def hashing_function(filename):
""" Takes in complete file path as input and returns MD5 hash of contents """
md5_hash = hashlib.md5()
with open(filename, "rb") as f:
content = f.read()
md5_hash.update(content)
return md5_hash.hexdigest()
def ignore_list(curr_dir, mode = 0):
"""
Returns a list of only filenames (without root and with extensions) to be ignored
Mode = 0 -> returns files
Mode = 1 -> returns directories
"""
path = os.path.join(curr_dir, '.sink', 'ignore.txt')
# path = curr_dir + "/.sink/ignore.txt"
with open(path, "r") as ignorefile:
ignore_files = ignorefile.read().split("\n")
ignore_directories = []
i = 0
while i < len(ignore_files):
entry = ignore_files[i]
if not entry:
i += 1
continue
if entry[0] == '!':
ignore_directories.append(entry[1:])
ignore_files.pop(i)
else:
i += 1
if mode == 0:
return ignore_files
else:
return ignore_directories
def write_metadata(metadata, mode = 0):
"""
Writes the filedict data to filesdata in metadata. Can take dict as input or default
Mode : 0 -> filesdata.pickle
1 -> foldersdata.pickle
"""
curr_dir = user_init.read_config_file("general", "root")
if mode == 0:
path = os.path.join(curr_dir, '.sink', 'meta', 'filesdata.pickle')
# path = curr_dir + "/.sink/meta/filesdata.pickle"
else:
path = os.path.join(curr_dir, '.sink', 'meta', 'foldersdata.pickle')
# path = curr_dir + "/.sink/meta/foldersdata.pickle"
with open(path , "wb") as file:
pickle.dump(metadata, file)
user_utility.log(f"Metadata written to file mode : {mode}")
def read_metadata(mode = 0):
"""
Loads the datafile from .sink/meta/filesdata.pickle and returns dict
Mode : 0 -> filesdata.pickle
1 -> foldersdata.pickle
"""
curr_dir = user_init.read_config_file("general", "root")
if mode == 0:
path = os.path.join(curr_dir, '.sink', 'meta', 'filesdata.pickle')
# path = curr_dir + "/.sink/meta/filesdata.pickle"
else:
path = os.path.join(curr_dir, '.sink', 'meta', 'foldersdata.pickle')
# path = curr_dir + "/.sink/meta/foldersdata.pickle"
with open(path, "rb") as file:
prefiledict = pickle.load(file)
return prefiledict
def make_folder_changes():
""" Take data from scan_folder_changes and then commit them to drive """
mydrive = user_drive.MyDrive()
folder_data = read_metadata(1)
new_folders , deleted_folders = scan_folder_changes()
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
# Adding folders
for k , v in new_folders.items():
# Check if folder root is drive root v[0] = root and v[1] = dir
if v[0] == curr_dir:
new_folder_id = mydrive.create_folder(v[1], curr_dir_id)
folder_data[k] = Folder(v[1], v[0], curr_dir_id, new_folder_id)
else:
try:
new_folder_id = mydrive.create_folder(v[1], folder_data[v[0]].folder_id)
folder_data[k] = Folder(v[1], v[0], folder_data[v[0]].folder_id, new_folder_id)
except:
pass
write_metadata(folder_data, 1)
## Deletion logic
for k, v in deleted_folders.items():
mydrive.delete_file(v.folder_id)
folder_data.pop(k)
write_metadata(folder_data, 1)
user_utility.log(f"{len(new_folders)} folders added , {len(deleted_folders)} folders deleted")
write_metadata(folder_data, 1)
def if_ignored(root, dir, ignore_list):
""" Input -> Complete root path
Output->
"""
curr_dir = user_utility.read_config_file()
root = root.replace(curr_dir, '')
if root != '':
root = root[1:].split(root[0])
for folder in root:
if folder in ignore_list:
return True
return False
def scan_folder_changes():
"""
Scans for any new folders created or deleted and
returns a tuple of dicts having new folders and deleted folders
"""
try:
print("Folders : ")
curr_dir = user_init.read_config_file()
ignored = ignore_list(curr_dir , 1 )
folder_data = read_metadata(1)
# Entries in the form of 'root + dir : (root, dir)'
new_folders = dict()
for root, dirs, files in os.walk(curr_dir):
for dir in dirs:
if dir in ignored or if_ignored(root, dir, ignored):
continue
else:
if os.path.join(root, dir) not in folder_data.keys():
new_folders[os.path.join(root, dir)] = (root, dir)
# Deleted
deleted_folders = dict()
for folder, data in folder_data.items():
if not (os.path.exists(folder)):
deleted_folders[folder] = data
# Logging
if len(new_folders) == 0:
print("No new folders added!")
else:
print(f"{len(new_folders)} new folder/folders added : ")
for key in new_folders.keys():
print(colored("\t" + key, 'green'))
if len(deleted_folders) == 0:
print("No folders were deleted!")
else:
print(f"{len(deleted_folders)} folder/folders were deleted : ")
for key in deleted_folders.keys():
print(colored("\t" + key, 'red'))
return (new_folders, deleted_folders)
except:
user_utility.print_error("There is some problem with the installation! Reinstall to continue")
exit(1)
def scan_file_changes():
"""
Returns the data of changed files
(added, deleted, updated)
added -> key : (root, dir)
deleted -> path : object
updated -> path : object
"""
print("Files : ")
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
file_data = read_metadata(0)
folder_data = read_metadata(1)
ignored = ignore_list(curr_dir)
newfiles = dict()
for root, dirs, files in os.walk(curr_dir):
for file in files:
if file in ignored:
continue
else:
if os.path.join(root, file) not in file_data.keys():
if root in folder_data:
newfiles[os.path.join(root, file)] = (root, file)
if root == curr_dir:
newfiles[os.path.join(root, file)] = (root, file)
# print(newfiles)
deleted_files = dict()
for file, data in file_data.items():
if not (os.path.exists(file)):
deleted_files[file] = data
# print(deleted_files)
updated_files = dict()
for file, data in file_data.items():
if os.path.exists(file):
if hashing_function(file) != data.hash:
updated_files[file] = data
updated_files[file].hash = hashing_function(file)
# print(updated_files)
if len(newfiles) == 0:
print("No new files added!")
else:
print(f"{len(newfiles)} new file/files added : ")
for key in newfiles.keys():
print(colored("\t" + key, 'green'))
if len(deleted_files) == 0:
print("No files were deleted!")
else:
print(f"{len(deleted_files)} file/files were deleted : ")
for key in deleted_files.keys():
print(colored("\t" + key, 'red'))
if len(updated_files) == 0:
print("No files were updated!")
else:
print(f"{len(updated_files)} file/files were updated: ")
for key in updated_files.keys():
print(colored("\t" + key, 'green'))
return (newfiles, deleted_files, updated_files)
def make_file_changes():
""" Commit the scanned changes to the drive and local machines """
mydrive = user_drive.MyDrive()
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
new_files , deleted_files , updated_files = scan_file_changes()
file_data = read_metadata(0)
folder_data = read_metadata(1)
# Addition
for file, value in new_files.items():
if value[0] == curr_dir:
new_file_id = mydrive.upload_file(value[1], value[0], curr_dir_id)
new_file_hash = hashing_function(file)
file_data[file] = File(value[1], value[0], curr_dir_id,new_file_id, new_file_hash)
else:
parent_id = folder_data[value[0]].folder_id
new_file_id = mydrive.upload_file(value[1], value[0], parent_id)
new_file_hash = hashing_function(file)
file_data[file] = File(value[1], value[0], parent_id, new_file_id, new_file_hash)
write_metadata(file_data, 0)
# print(file_data[file])
# Deletion
for file, data in deleted_files.items():
try:
mydrive.delete_file(data.file_id)
except:
print("File Not Found on the Drive")
file_data.pop(file)
write_metadata(file_data, 0)
# Updation
for file, data in updated_files.items():
mydrive.update_file(data.name, data.root, data.file_id)
print(f"{file} : Updated!")
file_data[file] = data
write_metadata(file_data, 0)
user_utility.log(f"{len(new_files)} files added , {len(deleted_files)} files deleted and {len(updated_files)} files were updated")
write_metadata(file_data, 0)
def init_folder_structure():
""" Initializes the folder structure and generates the folder data
Folder data format :
name, root, parent_id, folder_id
"""
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
ignored = ignore_list(curr_dir , 1 )
print(ignored)
mydrive = user_drive.MyDrive()
folders = dict()
for root, dirs, files in os.walk(curr_dir):
for dir in dirs:
if dir in ignored:
continue
else:
if root == curr_dir:
new_folder_id = mydrive.create_folder(dir, curr_dir_id)
folders[os.path.join(root, dir)] = Folder(dir, root, curr_dir_id, new_folder_id)
else:
if root in folders:
new_folder_id = mydrive.create_folder(dir, folders[root].folder_id)
folders[os.path.join(root,dir)] = Folder(dir, root, folders[root].folder_id, new_folder_id)
write_metadata(folders, 1)
user_utility.log("Folder structure initialised properly!")
init_file_structure()
def init_file_structure():
""" Initializes the files inside the folders
File data format:
name, root, parent_id, folder_id
"""
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
folder_data = read_metadata(1)
ignored = ignore_list(curr_dir)
mydrive = user_drive.MyDrive()
filesdict = dict()
for root, dirs, files in os.walk(curr_dir):
for file in files:
if file in ignored:
continue
else:
if root == curr_dir:
new_file_id = mydrive.upload_file(file, root, curr_dir_id)
new_file_hash = hashing_function(os.path.join(root, file))
filesdict[os.path.join(root, file)] = File(file, root, curr_dir_id, new_file_id , new_file_hash)
else:
if root in folder_data:
parent_id = folder_data[root].folder_id
new_file_id = mydrive.upload_file(file, root, parent_id)
new_file_hash = hashing_function(os.path.join(root, file))
filesdict[os.path.join(root, file)] = File(file, root, parent_id, new_file_id, new_file_hash)
write_metadata(filesdict, 0)
user_utility.edit_config_file("general", "populated", "True")
user_utility.log("File structure initialised properly!")
print("File structure initialised properly!") | src/scan.py | import os
import hashlib
from termcolor import colored
import pickle
from . import init as user_init
from . import utility as user_utility
from . import drive as user_drive
class Folder:
def __init__(self, name, root, parent_id, folder_id):
self.name = name
self.root = root
self.parent_id = parent_id
self.folder_id = folder_id
def __str__(self):
return f"Folder data : {self.name} : {self.root} : {self.parent_id} : {self.folder_id}"
class File:
def __init__(self, name, root, parent_id, file_id, file_hash):
self.name = name
self.root = root
self.parent_id = parent_id
self.file_id = file_id
self.hash = file_hash
def __str__(self):
return f"File data : {self.name} : {self.root} : {self.parent_id} : {self.file_id} : {self.hash}"
def hashing_function(filename):
""" Takes in complete file path as input and returns MD5 hash of contents """
md5_hash = hashlib.md5()
with open(filename, "rb") as f:
content = f.read()
md5_hash.update(content)
return md5_hash.hexdigest()
def ignore_list(curr_dir, mode = 0):
"""
Returns a list of only filenames (without root and with extensions) to be ignored
Mode = 0 -> returns files
Mode = 1 -> returns directories
"""
path = os.path.join(curr_dir, '.sink', 'ignore.txt')
# path = curr_dir + "/.sink/ignore.txt"
with open(path, "r") as ignorefile:
ignore_files = ignorefile.read().split("\n")
ignore_directories = []
i = 0
while i < len(ignore_files):
entry = ignore_files[i]
if not entry:
i += 1
continue
if entry[0] == '!':
ignore_directories.append(entry[1:])
ignore_files.pop(i)
else:
i += 1
if mode == 0:
return ignore_files
else:
return ignore_directories
def write_metadata(metadata, mode = 0):
"""
Writes the filedict data to filesdata in metadata. Can take dict as input or default
Mode : 0 -> filesdata.pickle
1 -> foldersdata.pickle
"""
curr_dir = user_init.read_config_file("general", "root")
if mode == 0:
path = os.path.join(curr_dir, '.sink', 'meta', 'filesdata.pickle')
# path = curr_dir + "/.sink/meta/filesdata.pickle"
else:
path = os.path.join(curr_dir, '.sink', 'meta', 'foldersdata.pickle')
# path = curr_dir + "/.sink/meta/foldersdata.pickle"
with open(path , "wb") as file:
pickle.dump(metadata, file)
user_utility.log(f"Metadata written to file mode : {mode}")
def read_metadata(mode = 0):
"""
Loads the datafile from .sink/meta/filesdata.pickle and returns dict
Mode : 0 -> filesdata.pickle
1 -> foldersdata.pickle
"""
curr_dir = user_init.read_config_file("general", "root")
if mode == 0:
path = os.path.join(curr_dir, '.sink', 'meta', 'filesdata.pickle')
# path = curr_dir + "/.sink/meta/filesdata.pickle"
else:
path = os.path.join(curr_dir, '.sink', 'meta', 'foldersdata.pickle')
# path = curr_dir + "/.sink/meta/foldersdata.pickle"
with open(path, "rb") as file:
prefiledict = pickle.load(file)
return prefiledict
def make_folder_changes():
""" Take data from scan_folder_changes and then commit them to drive """
mydrive = user_drive.MyDrive()
folder_data = read_metadata(1)
new_folders , deleted_folders = scan_folder_changes()
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
# Adding folders
for k , v in new_folders.items():
# Check if folder root is drive root v[0] = root and v[1] = dir
if v[0] == curr_dir:
new_folder_id = mydrive.create_folder(v[1], curr_dir_id)
folder_data[k] = Folder(v[1], v[0], curr_dir_id, new_folder_id)
else:
try:
new_folder_id = mydrive.create_folder(v[1], folder_data[v[0]].folder_id)
folder_data[k] = Folder(v[1], v[0], folder_data[v[0]].folder_id, new_folder_id)
except:
pass
write_metadata(folder_data, 1)
## Deletion logic
for k, v in deleted_folders.items():
mydrive.delete_file(v.folder_id)
folder_data.pop(k)
write_metadata(folder_data, 1)
user_utility.log(f"{len(new_folders)} folders added , {len(deleted_folders)} folders deleted")
write_metadata(folder_data, 1)
def if_ignored(root, dir, ignore_list):
""" Input -> Complete root path
Output->
"""
curr_dir = user_utility.read_config_file()
root = root.replace(curr_dir, '')
if root != '':
root = root[1:].split(root[0])
for folder in root:
if folder in ignore_list:
return True
return False
def scan_folder_changes():
"""
Scans for any new folders created or deleted and
returns a tuple of dicts having new folders and deleted folders
"""
try:
print("Folders : ")
curr_dir = user_init.read_config_file()
ignored = ignore_list(curr_dir , 1 )
folder_data = read_metadata(1)
# Entries in the form of 'root + dir : (root, dir)'
new_folders = dict()
for root, dirs, files in os.walk(curr_dir):
for dir in dirs:
if dir in ignored or if_ignored(root, dir, ignored):
continue
else:
if os.path.join(root, dir) not in folder_data.keys():
new_folders[os.path.join(root, dir)] = (root, dir)
# Deleted
deleted_folders = dict()
for folder, data in folder_data.items():
if not (os.path.exists(folder)):
deleted_folders[folder] = data
# Logging
if len(new_folders) == 0:
print("No new folders added!")
else:
print(f"{len(new_folders)} new folder/folders added : ")
for key in new_folders.keys():
print(colored("\t" + key, 'green'))
if len(deleted_folders) == 0:
print("No folders were deleted!")
else:
print(f"{len(deleted_folders)} folder/folders were deleted : ")
for key in deleted_folders.keys():
print(colored("\t" + key, 'red'))
return (new_folders, deleted_folders)
except:
user_utility.print_error("There is some problem with the installation! Reinstall to continue")
exit(1)
def scan_file_changes():
"""
Returns the data of changed files
(added, deleted, updated)
added -> key : (root, dir)
deleted -> path : object
updated -> path : object
"""
print("Files : ")
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
file_data = read_metadata(0)
folder_data = read_metadata(1)
ignored = ignore_list(curr_dir)
newfiles = dict()
for root, dirs, files in os.walk(curr_dir):
for file in files:
if file in ignored:
continue
else:
if os.path.join(root, file) not in file_data.keys():
if root in folder_data:
newfiles[os.path.join(root, file)] = (root, file)
if root == curr_dir:
newfiles[os.path.join(root, file)] = (root, file)
# print(newfiles)
deleted_files = dict()
for file, data in file_data.items():
if not (os.path.exists(file)):
deleted_files[file] = data
# print(deleted_files)
updated_files = dict()
for file, data in file_data.items():
if os.path.exists(file):
if hashing_function(file) != data.hash:
updated_files[file] = data
updated_files[file].hash = hashing_function(file)
# print(updated_files)
if len(newfiles) == 0:
print("No new files added!")
else:
print(f"{len(newfiles)} new file/files added : ")
for key in newfiles.keys():
print(colored("\t" + key, 'green'))
if len(deleted_files) == 0:
print("No files were deleted!")
else:
print(f"{len(deleted_files)} file/files were deleted : ")
for key in deleted_files.keys():
print(colored("\t" + key, 'red'))
if len(updated_files) == 0:
print("No files were updated!")
else:
print(f"{len(updated_files)} file/files were updated: ")
for key in updated_files.keys():
print(colored("\t" + key, 'green'))
return (newfiles, deleted_files, updated_files)
def make_file_changes():
""" Commit the scanned changes to the drive and local machines """
mydrive = user_drive.MyDrive()
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
new_files , deleted_files , updated_files = scan_file_changes()
file_data = read_metadata(0)
folder_data = read_metadata(1)
# Addition
for file, value in new_files.items():
if value[0] == curr_dir:
new_file_id = mydrive.upload_file(value[1], value[0], curr_dir_id)
new_file_hash = hashing_function(file)
file_data[file] = File(value[1], value[0], curr_dir_id,new_file_id, new_file_hash)
else:
parent_id = folder_data[value[0]].folder_id
new_file_id = mydrive.upload_file(value[1], value[0], parent_id)
new_file_hash = hashing_function(file)
file_data[file] = File(value[1], value[0], parent_id, new_file_id, new_file_hash)
write_metadata(file_data, 0)
# print(file_data[file])
# Deletion
for file, data in deleted_files.items():
try:
mydrive.delete_file(data.file_id)
except:
print("File Not Found on the Drive")
file_data.pop(file)
write_metadata(file_data, 0)
# Updation
for file, data in updated_files.items():
mydrive.update_file(data.name, data.root, data.file_id)
print(f"{file} : Updated!")
file_data[file] = data
write_metadata(file_data, 0)
user_utility.log(f"{len(new_files)} files added , {len(deleted_files)} files deleted and {len(updated_files)} files were updated")
write_metadata(file_data, 0)
def init_folder_structure():
""" Initializes the folder structure and generates the folder data
Folder data format :
name, root, parent_id, folder_id
"""
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
ignored = ignore_list(curr_dir , 1 )
print(ignored)
mydrive = user_drive.MyDrive()
folders = dict()
for root, dirs, files in os.walk(curr_dir):
for dir in dirs:
if dir in ignored:
continue
else:
if root == curr_dir:
new_folder_id = mydrive.create_folder(dir, curr_dir_id)
folders[os.path.join(root, dir)] = Folder(dir, root, curr_dir_id, new_folder_id)
else:
if root in folders:
new_folder_id = mydrive.create_folder(dir, folders[root].folder_id)
folders[os.path.join(root,dir)] = Folder(dir, root, folders[root].folder_id, new_folder_id)
write_metadata(folders, 1)
user_utility.log("Folder structure initialised properly!")
init_file_structure()
def init_file_structure():
""" Initializes the files inside the folders
File data format:
name, root, parent_id, folder_id
"""
curr_dir = user_init.read_config_file()
curr_dir_id = user_init.read_config_file("user", "folder_id")
folder_data = read_metadata(1)
ignored = ignore_list(curr_dir)
mydrive = user_drive.MyDrive()
filesdict = dict()
for root, dirs, files in os.walk(curr_dir):
for file in files:
if file in ignored:
continue
else:
if root == curr_dir:
new_file_id = mydrive.upload_file(file, root, curr_dir_id)
new_file_hash = hashing_function(os.path.join(root, file))
filesdict[os.path.join(root, file)] = File(file, root, curr_dir_id, new_file_id , new_file_hash)
else:
if root in folder_data:
parent_id = folder_data[root].folder_id
new_file_id = mydrive.upload_file(file, root, parent_id)
new_file_hash = hashing_function(os.path.join(root, file))
filesdict[os.path.join(root, file)] = File(file, root, parent_id, new_file_id, new_file_hash)
write_metadata(filesdict, 0)
user_utility.edit_config_file("general", "populated", "True")
user_utility.log("File structure initialised properly!")
print("File structure initialised properly!") | 0.318591 | 0.133981 |
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class CreateApprovalInstanceReq(object):
approval_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "approval_code"}
) # 审批定义 code
user_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "user_id"}
) # 发起审批用户
tenant_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "tenant_id"}
) # 平台租户ID
open_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "open_id"}
) # 发起审批用户 open id, 如果传了 user_id 则优先使用 user_id
department_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "department_id"}
) # 发起审批用户部门id,如果用户只属于一个部门,可以不填。如果属于多个部门,默认会选择部门列表第一个部门
form: lark_type_approval.ApprovalWidgetList = attr.ib(
factory=lambda: lark_type_approval.ApprovalWidgetList(),
metadata={"req_type": "json", "key": "form"},
) # json 数组,**控件值**
node_approver_user_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_approver_user_id_list"}
) # 如果有发起人自选节点,则需要填写对应节点的审批人<br>key: node id 或 custom node id , 通过 [查看审批定义](https://open.feishu.cn/document/ukTMukTMukTM/uADNyUjLwQjM14CM0ITN) 获取<br> value: 审批人列表
node_approver_open_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_approver_open_id_list"}
) # 审批人发起人自选 open id
node_cc_user_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_cc_user_id_list"}
) # 如果有发起人自选节点,则可填写对应节点的抄送人<br>key: node id 或 custom node id , 通过 [查看审批定义](https://open.feishu.cn/document/ukTMukTMukTM/uADNyUjLwQjM14CM0ITN) 获取<br> value: 审批人列表<br>单个节点最多选择20位抄送人
node_cc_open_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_cc_open_id_list"}
) # 抄送人发起人自选 open id<br>单个节点最多选择20位抄送人
uuid: str = attr.ib(
default="", metadata={"req_type": "json", "key": "uuid"}
) # 审批实例 uuid,用于幂等操作,同一个 uuid 只能用于创建一个审批实例,如果冲突,返回错误码 60012 ,格式必须为 XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX,不区分大小写
@attr.s
class CreateApprovalInstanceResp(object):
instance_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "instance_code"}
) # 审批实例 Code
def _gen_create_approval_instance_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=CreateApprovalInstanceResp,
scope="Approval",
api="CreateApprovalInstance",
method="POST",
url="https://www.feishu.cn/approval/openapi/v2/instance/create",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
) | pylark/api_service_approval_instance_create.py |
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class CreateApprovalInstanceReq(object):
approval_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "approval_code"}
) # 审批定义 code
user_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "user_id"}
) # 发起审批用户
tenant_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "tenant_id"}
) # 平台租户ID
open_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "open_id"}
) # 发起审批用户 open id, 如果传了 user_id 则优先使用 user_id
department_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "department_id"}
) # 发起审批用户部门id,如果用户只属于一个部门,可以不填。如果属于多个部门,默认会选择部门列表第一个部门
form: lark_type_approval.ApprovalWidgetList = attr.ib(
factory=lambda: lark_type_approval.ApprovalWidgetList(),
metadata={"req_type": "json", "key": "form"},
) # json 数组,**控件值**
node_approver_user_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_approver_user_id_list"}
) # 如果有发起人自选节点,则需要填写对应节点的审批人<br>key: node id 或 custom node id , 通过 [查看审批定义](https://open.feishu.cn/document/ukTMukTMukTM/uADNyUjLwQjM14CM0ITN) 获取<br> value: 审批人列表
node_approver_open_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_approver_open_id_list"}
) # 审批人发起人自选 open id
node_cc_user_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_cc_user_id_list"}
) # 如果有发起人自选节点,则可填写对应节点的抄送人<br>key: node id 或 custom node id , 通过 [查看审批定义](https://open.feishu.cn/document/ukTMukTMukTM/uADNyUjLwQjM14CM0ITN) 获取<br> value: 审批人列表<br>单个节点最多选择20位抄送人
node_cc_open_id_list: typing.Dict = attr.ib(
default=None, metadata={"req_type": "json", "key": "node_cc_open_id_list"}
) # 抄送人发起人自选 open id<br>单个节点最多选择20位抄送人
uuid: str = attr.ib(
default="", metadata={"req_type": "json", "key": "uuid"}
) # 审批实例 uuid,用于幂等操作,同一个 uuid 只能用于创建一个审批实例,如果冲突,返回错误码 60012 ,格式必须为 XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX,不区分大小写
@attr.s
class CreateApprovalInstanceResp(object):
instance_code: str = attr.ib(
default="", metadata={"req_type": "json", "key": "instance_code"}
) # 审批实例 Code
def _gen_create_approval_instance_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=CreateApprovalInstanceResp,
scope="Approval",
api="CreateApprovalInstance",
method="POST",
url="https://www.feishu.cn/approval/openapi/v2/instance/create",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
) | 0.354098 | 0.191082 |
import os
from time import sleep,time
from rm_dir import remover
#Function to scan and put empty directories in an dictionary
def dir_scanner(path):
emptys=[]
for paths,dirs,files in os.walk(path):
if len(files)<=0 and len(dirs)<=0:
emptys.append(paths)
print("\n***Empty directory scan complete***\n")
return emptys
#Deleting empty folders
def removeing(empty_list):
for dirs_path in empty_list:
k=dirs_path.split("\\")
remover(dirs_path)
item=k.pop()
print(f"Removed \"{item}\" folder")
print("\nDeletion Complete")
#Execution starts here
if __name__=="__main__":
print("""\n
^___________________________________________________________________^
| Welcome to empty directory cleaner! |
| Run me and I will clean all the blank folders from your device |
| **Please don't mess up with the code before running or |
| I'm not responsible if it malfunctioned** |
| |
| Checkout my github http://www.github.com/reshavcodes |
| |
|___________________________________________________________________|
""")
print("#____Enter the directory path to start scanning___#\n")
path=input()
scan=True
#Checking if given path exists
if os.path.exists(path):
print("Scanning.......")
t1=time()
#Getting the empty directories
try:
emptys=dir_scanner(path)
except:
print("Got error while Scanning please scan again")
scan=False
#Checking if scanner got any emoty directories stops execution if found 0
if len(emptys)>=1 and scan:
print(f"Found {len(emptys)} empty directories/folders")
sleep(2)
print("***Starting to delete empty folders***\n")
#Deleting those empty directories
removeing(emptys)
print(f"Process complete in {round((time()-t1),2)}s time")
else:
print("Found 0 empty folders\nStopping program")
else:
print("Wrong Path Entered, Please try entering correct path!!!") | main.py | import os
from time import sleep,time
from rm_dir import remover
#Function to scan and put empty directories in an dictionary
def dir_scanner(path):
emptys=[]
for paths,dirs,files in os.walk(path):
if len(files)<=0 and len(dirs)<=0:
emptys.append(paths)
print("\n***Empty directory scan complete***\n")
return emptys
#Deleting empty folders
def removeing(empty_list):
for dirs_path in empty_list:
k=dirs_path.split("\\")
remover(dirs_path)
item=k.pop()
print(f"Removed \"{item}\" folder")
print("\nDeletion Complete")
#Execution starts here
if __name__=="__main__":
print("""\n
^___________________________________________________________________^
| Welcome to empty directory cleaner! |
| Run me and I will clean all the blank folders from your device |
| **Please don't mess up with the code before running or |
| I'm not responsible if it malfunctioned** |
| |
| Checkout my github http://www.github.com/reshavcodes |
| |
|___________________________________________________________________|
""")
print("#____Enter the directory path to start scanning___#\n")
path=input()
scan=True
#Checking if given path exists
if os.path.exists(path):
print("Scanning.......")
t1=time()
#Getting the empty directories
try:
emptys=dir_scanner(path)
except:
print("Got error while Scanning please scan again")
scan=False
#Checking if scanner got any emoty directories stops execution if found 0
if len(emptys)>=1 and scan:
print(f"Found {len(emptys)} empty directories/folders")
sleep(2)
print("***Starting to delete empty folders***\n")
#Deleting those empty directories
removeing(emptys)
print(f"Process complete in {round((time()-t1),2)}s time")
else:
print("Found 0 empty folders\nStopping program")
else:
print("Wrong Path Entered, Please try entering correct path!!!") | 0.078539 | 0.101991 |
import resource
import signal
import time
import pytest
from bitmath import MiB
from pji.control.model import ProcessResult
_DEMO_RUSAGE = resource.struct_rusage((2.0, 1.0, 131072, 0, 0, 0, 2216, 0, 0, 0, 0, 0, 0, 0, 246, 129))
_TIME_0_0 = time.time()
_TIME_1_0 = _TIME_0_0 + 1.0
_TIME_1_5 = _TIME_0_0 + 1.5
_TIME_3_0 = _TIME_0_0 + 3.0
_TIME_5_5 = _TIME_0_0 + 5.5
@pytest.mark.unittest
class TestControlModelProcessNormal:
def test_properties(self):
pr = ProcessResult(
status=0,
start_time=_TIME_0_0,
end_time=_TIME_1_5,
resource_usage=_DEMO_RUSAGE,
)
assert pr.exitcode == 0
assert pr.signal_code == 0
assert pr.signal is None
assert pr.ok
assert pr.start_time == _TIME_0_0
assert pr.end_time == _TIME_1_5
assert pr.real_time == 1.5
assert pr.resource_usage == _DEMO_RUSAGE
assert pr.cpu_time == 2.0
assert pr.system_time == 1.0
assert pr.max_memory == MiB(128).bytes
def test_repr(self):
pr = ProcessResult(
status=0,
start_time=_TIME_0_0,
end_time=_TIME_5_5,
resource_usage=_DEMO_RUSAGE,
)
assert repr(pr) == '<ProcessResult exitcode: 0, real time: 5.500s, cpu time: 2.000s, max memory: 128.0 MiB>'
def test_json(self):
pr = ProcessResult(
status=0,
start_time=_TIME_0_0,
end_time=_TIME_1_5,
resource_usage=_DEMO_RUSAGE,
)
assert pr.json == {
'cpu_time': 2.0,
'exitcode': 0,
'max_memory': 134217728.0,
'real_time': 1.5,
'signal': None
}
@pytest.mark.unittest
class TestControlModelProcessKilled:
def test_properties(self):
pr = ProcessResult(
status=9,
start_time=_TIME_0_0,
end_time=_TIME_3_0,
resource_usage=_DEMO_RUSAGE,
)
assert pr.exitcode == 0
assert pr.signal_code == 9
assert pr.signal == signal.SIGKILL
assert not pr.ok
assert pr.start_time == _TIME_0_0
assert pr.end_time == _TIME_3_0
assert pr.real_time == 3.0
assert pr.resource_usage == _DEMO_RUSAGE
assert pr.cpu_time == 2.0
assert pr.system_time == 1.0
assert pr.max_memory == MiB(128).bytes
def test_repr(self):
pr = ProcessResult(
status=9,
start_time=_TIME_0_0,
end_time=_TIME_1_0,
resource_usage=_DEMO_RUSAGE,
)
assert repr(pr) == '<ProcessResult exitcode: 0, signal: SIGKILL, real time: 1.000s, ' \
'cpu time: 2.000s, max memory: 128.0 MiB>' | test/control/model/test_process.py | import resource
import signal
import time
import pytest
from bitmath import MiB
from pji.control.model import ProcessResult
_DEMO_RUSAGE = resource.struct_rusage((2.0, 1.0, 131072, 0, 0, 0, 2216, 0, 0, 0, 0, 0, 0, 0, 246, 129))
_TIME_0_0 = time.time()
_TIME_1_0 = _TIME_0_0 + 1.0
_TIME_1_5 = _TIME_0_0 + 1.5
_TIME_3_0 = _TIME_0_0 + 3.0
_TIME_5_5 = _TIME_0_0 + 5.5
@pytest.mark.unittest
class TestControlModelProcessNormal:
def test_properties(self):
pr = ProcessResult(
status=0,
start_time=_TIME_0_0,
end_time=_TIME_1_5,
resource_usage=_DEMO_RUSAGE,
)
assert pr.exitcode == 0
assert pr.signal_code == 0
assert pr.signal is None
assert pr.ok
assert pr.start_time == _TIME_0_0
assert pr.end_time == _TIME_1_5
assert pr.real_time == 1.5
assert pr.resource_usage == _DEMO_RUSAGE
assert pr.cpu_time == 2.0
assert pr.system_time == 1.0
assert pr.max_memory == MiB(128).bytes
def test_repr(self):
pr = ProcessResult(
status=0,
start_time=_TIME_0_0,
end_time=_TIME_5_5,
resource_usage=_DEMO_RUSAGE,
)
assert repr(pr) == '<ProcessResult exitcode: 0, real time: 5.500s, cpu time: 2.000s, max memory: 128.0 MiB>'
def test_json(self):
pr = ProcessResult(
status=0,
start_time=_TIME_0_0,
end_time=_TIME_1_5,
resource_usage=_DEMO_RUSAGE,
)
assert pr.json == {
'cpu_time': 2.0,
'exitcode': 0,
'max_memory': 134217728.0,
'real_time': 1.5,
'signal': None
}
@pytest.mark.unittest
class TestControlModelProcessKilled:
def test_properties(self):
pr = ProcessResult(
status=9,
start_time=_TIME_0_0,
end_time=_TIME_3_0,
resource_usage=_DEMO_RUSAGE,
)
assert pr.exitcode == 0
assert pr.signal_code == 9
assert pr.signal == signal.SIGKILL
assert not pr.ok
assert pr.start_time == _TIME_0_0
assert pr.end_time == _TIME_3_0
assert pr.real_time == 3.0
assert pr.resource_usage == _DEMO_RUSAGE
assert pr.cpu_time == 2.0
assert pr.system_time == 1.0
assert pr.max_memory == MiB(128).bytes
def test_repr(self):
pr = ProcessResult(
status=9,
start_time=_TIME_0_0,
end_time=_TIME_1_0,
resource_usage=_DEMO_RUSAGE,
)
assert repr(pr) == '<ProcessResult exitcode: 0, signal: SIGKILL, real time: 1.000s, ' \
'cpu time: 2.000s, max memory: 128.0 MiB>' | 0.488283 | 0.458894 |
from unittest.mock import AsyncMock
import pytest
from pytest_mock.plugin import MockerFixture
from app.core.exceptions import TakeSnapshotError
from app.services.browser import Browser
from app.services.browsers.httpx import HttpxBrowser
from app.services.browsers.playwright import PlaywrightBrowser
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
@pytest.mark.usefixtures("patch_certificate_load_from_url")
async def test_take_snapshot():
browser = Browser()
result = await browser.take_snapshot("http://example.com")
snapshot = result.snapshot
assert snapshot.url == "http://example.com/"
assert snapshot.submitted_url == "http://example.com"
assert snapshot.hostname == "example.com"
assert snapshot.status == 200
assert snapshot.asn == "AS15133"
whois = result.whois
assert whois.content == "foo"
# har should be None
assert result.har is None
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
@pytest.mark.usefixtures("patch_certificate_load_from_url")
async def test_take_snapshot_with_har():
browser = Browser(enable_har=True)
result = await browser.take_snapshot("http://example.com")
# har should be not None
assert result.har is not None
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
@pytest.mark.usefixtures("patch_certificate_load_from_url")
async def test_take_snapshot_with_scripts():
browser = Browser()
result = await browser.take_snapshot("https://github.com/")
assert len(result.script_files) > 0
# it should record ip address
for script_file in result.script_files:
assert script_file.script.ip_address is not None
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
async def test_take_snapshot_with_bad_ssl():
with pytest.raises(TakeSnapshotError):
browser = Browser()
result = await browser.take_snapshot("https://expired.badssl.com")
browser = Browser(ignore_https_errors=True)
result = await browser.take_snapshot(
"https://expired.badssl.com",
)
snapshot = result.snapshot
assert snapshot.url == "https://expired.badssl.com/"
@pytest.mark.asyncio
async def test_take_snapshot_httpx_fallback(mocker: MockerFixture):
mocker.patch(
"app.services.browsers.playwright.PlaywrightBrowser.take_snapshot", AsyncMock()
)
mocker.patch("app.services.browsers.httpx.HttpxBrowser.take_snapshot", AsyncMock())
# it should fallback to HTTPX if a host is given
browser = Browser(headers={"host": "example.com"})
await browser.take_snapshot("http://example.com")
PlaywrightBrowser.take_snapshot.assert_not_called()
HttpxBrowser.take_snapshot.assert_called_once() | tests/services/test_browser.py | from unittest.mock import AsyncMock
import pytest
from pytest_mock.plugin import MockerFixture
from app.core.exceptions import TakeSnapshotError
from app.services.browser import Browser
from app.services.browsers.httpx import HttpxBrowser
from app.services.browsers.playwright import PlaywrightBrowser
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
@pytest.mark.usefixtures("patch_certificate_load_from_url")
async def test_take_snapshot():
browser = Browser()
result = await browser.take_snapshot("http://example.com")
snapshot = result.snapshot
assert snapshot.url == "http://example.com/"
assert snapshot.submitted_url == "http://example.com"
assert snapshot.hostname == "example.com"
assert snapshot.status == 200
assert snapshot.asn == "AS15133"
whois = result.whois
assert whois.content == "foo"
# har should be None
assert result.har is None
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
@pytest.mark.usefixtures("patch_certificate_load_from_url")
async def test_take_snapshot_with_har():
browser = Browser(enable_har=True)
result = await browser.take_snapshot("http://example.com")
# har should be not None
assert result.har is not None
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
@pytest.mark.usefixtures("patch_certificate_load_from_url")
async def test_take_snapshot_with_scripts():
browser = Browser()
result = await browser.take_snapshot("https://github.com/")
assert len(result.script_files) > 0
# it should record ip address
for script_file in result.script_files:
assert script_file.script.ip_address is not None
@pytest.mark.asyncio
@pytest.mark.usefixtures("patch_whois_lookup")
@pytest.mark.usefixtures("patch_ip2asn_lookup")
async def test_take_snapshot_with_bad_ssl():
with pytest.raises(TakeSnapshotError):
browser = Browser()
result = await browser.take_snapshot("https://expired.badssl.com")
browser = Browser(ignore_https_errors=True)
result = await browser.take_snapshot(
"https://expired.badssl.com",
)
snapshot = result.snapshot
assert snapshot.url == "https://expired.badssl.com/"
@pytest.mark.asyncio
async def test_take_snapshot_httpx_fallback(mocker: MockerFixture):
mocker.patch(
"app.services.browsers.playwright.PlaywrightBrowser.take_snapshot", AsyncMock()
)
mocker.patch("app.services.browsers.httpx.HttpxBrowser.take_snapshot", AsyncMock())
# it should fallback to HTTPX if a host is given
browser = Browser(headers={"host": "example.com"})
await browser.take_snapshot("http://example.com")
PlaywrightBrowser.take_snapshot.assert_not_called()
HttpxBrowser.take_snapshot.assert_called_once() | 0.681091 | 0.452959 |
import random
from pathlib import Path
import numpy as np
import textgrid
from scipy.io import wavfile
from vietTTS.nat.model import DurationModel
from .config import AcousticInput, DurationInput
def load_phonemes_set_from_lexicon_file(fn: Path):
S = set()
for line in open(fn, 'r').readlines():
word, phonemes = line.strip().lower().split('\t')
phonemes = phonemes.split()
S.update(phonemes)
S = ['sil', 'sp', 'spn'] + sorted(list(S))
return S
def pad_seq(s, maxlen, value=0):
assert maxlen >= len(s)
return tuple(s) + (value,) * (maxlen - len(s))
def load_textgrid(fn: Path):
tg = textgrid.TextGrid.fromFile(str(fn.resolve()))
data = []
for p in tg[1]:
data.append((p.mark.strip().lower(), p.duration()))
return data
def textgrid_data_loader(data_dir: Path, seq_len: int, batch_size: int, mode: str):
tg_files = sorted(data_dir.glob('*.TextGrid'))
random.Random(42).shuffle(tg_files)
L = len(tg_files) * 8 // 10
assert mode in ['train', 'val']
phonemes = load_phonemes_set_from_lexicon_file(data_dir / 'lexicon.txt')
if mode == 'train':
tg_files = tg_files[:L]
if mode == 'val':
tg_files = tg_files[L:]
data = []
for fn in tg_files:
ps, ds = zip(*load_textgrid(fn))
ps = [phonemes.index(p) for p in ps]
l = len(ps)
ps = pad_seq(ps, seq_len, 0)
ds = pad_seq(ds, seq_len, 0)
data.append((ps, ds, l))
batch = []
while True:
random.shuffle(data)
for e in data:
batch.append(e)
if len(batch) == batch_size:
ps, ds, lengths = zip(*batch)
ps = np.array(ps, dtype=np.int32)
ds = np.array(ds, dtype=np.float32) * 10
lengths = np.array(lengths, dtype=np.int32)
yield DurationInput(ps, lengths, ds)
batch = []
def load_textgrid_wav(data_dir: Path, token_seq_len: int, batch_size, pad_wav_len, mode: str):
tg_files = sorted(data_dir.glob('*.TextGrid'))
random.Random(42).shuffle(tg_files)
L = len(tg_files) * 8 // 10
assert mode in ['train', 'val']
phonemes = load_phonemes_set_from_lexicon_file(data_dir / 'lexicon.txt')
if mode == 'train':
tg_files = tg_files[:L]
if mode == 'val':
tg_files = tg_files[L:]
data = []
for fn in tg_files:
ps, ds = zip(*load_textgrid(fn))
ps = [phonemes.index(p) for p in ps]
l = len(ps)
ps = pad_seq(ps, token_seq_len, 0)
ds = pad_seq(ds, token_seq_len, 0)
wav_file = data_dir / f'{fn.stem}.wav'
sr, y = wavfile.read(wav_file)
if len(y) > pad_wav_len:
y = y[:pad_wav_len]
wav_length = len(y)
y = np.pad(y, (0, pad_wav_len - len(y)))
data.append((ps, ds, l, y, wav_length))
batch = []
while True:
random.shuffle(data)
for e in data:
batch.append(e)
if len(batch) == batch_size:
ps, ds, lengths, wavs, wav_lengths = zip(*batch)
ps = np.array(ps, dtype=np.int32)
ds = np.array(ds, dtype=np.float32) * 10
lengths = np.array(lengths, dtype=np.int32)
wavs = np.array(wavs)
wav_lengths = np.array(wav_lengths, dtype=np.int32)
yield AcousticInput(ps, lengths, ds, wavs, wav_lengths, None)
batch = [] | vietTTS/nat/data_loader.py | import random
from pathlib import Path
import numpy as np
import textgrid
from scipy.io import wavfile
from vietTTS.nat.model import DurationModel
from .config import AcousticInput, DurationInput
def load_phonemes_set_from_lexicon_file(fn: Path):
S = set()
for line in open(fn, 'r').readlines():
word, phonemes = line.strip().lower().split('\t')
phonemes = phonemes.split()
S.update(phonemes)
S = ['sil', 'sp', 'spn'] + sorted(list(S))
return S
def pad_seq(s, maxlen, value=0):
assert maxlen >= len(s)
return tuple(s) + (value,) * (maxlen - len(s))
def load_textgrid(fn: Path):
tg = textgrid.TextGrid.fromFile(str(fn.resolve()))
data = []
for p in tg[1]:
data.append((p.mark.strip().lower(), p.duration()))
return data
def textgrid_data_loader(data_dir: Path, seq_len: int, batch_size: int, mode: str):
tg_files = sorted(data_dir.glob('*.TextGrid'))
random.Random(42).shuffle(tg_files)
L = len(tg_files) * 8 // 10
assert mode in ['train', 'val']
phonemes = load_phonemes_set_from_lexicon_file(data_dir / 'lexicon.txt')
if mode == 'train':
tg_files = tg_files[:L]
if mode == 'val':
tg_files = tg_files[L:]
data = []
for fn in tg_files:
ps, ds = zip(*load_textgrid(fn))
ps = [phonemes.index(p) for p in ps]
l = len(ps)
ps = pad_seq(ps, seq_len, 0)
ds = pad_seq(ds, seq_len, 0)
data.append((ps, ds, l))
batch = []
while True:
random.shuffle(data)
for e in data:
batch.append(e)
if len(batch) == batch_size:
ps, ds, lengths = zip(*batch)
ps = np.array(ps, dtype=np.int32)
ds = np.array(ds, dtype=np.float32) * 10
lengths = np.array(lengths, dtype=np.int32)
yield DurationInput(ps, lengths, ds)
batch = []
def load_textgrid_wav(data_dir: Path, token_seq_len: int, batch_size, pad_wav_len, mode: str):
tg_files = sorted(data_dir.glob('*.TextGrid'))
random.Random(42).shuffle(tg_files)
L = len(tg_files) * 8 // 10
assert mode in ['train', 'val']
phonemes = load_phonemes_set_from_lexicon_file(data_dir / 'lexicon.txt')
if mode == 'train':
tg_files = tg_files[:L]
if mode == 'val':
tg_files = tg_files[L:]
data = []
for fn in tg_files:
ps, ds = zip(*load_textgrid(fn))
ps = [phonemes.index(p) for p in ps]
l = len(ps)
ps = pad_seq(ps, token_seq_len, 0)
ds = pad_seq(ds, token_seq_len, 0)
wav_file = data_dir / f'{fn.stem}.wav'
sr, y = wavfile.read(wav_file)
if len(y) > pad_wav_len:
y = y[:pad_wav_len]
wav_length = len(y)
y = np.pad(y, (0, pad_wav_len - len(y)))
data.append((ps, ds, l, y, wav_length))
batch = []
while True:
random.shuffle(data)
for e in data:
batch.append(e)
if len(batch) == batch_size:
ps, ds, lengths, wavs, wav_lengths = zip(*batch)
ps = np.array(ps, dtype=np.int32)
ds = np.array(ds, dtype=np.float32) * 10
lengths = np.array(lengths, dtype=np.int32)
wavs = np.array(wavs)
wav_lengths = np.array(wav_lengths, dtype=np.int32)
yield AcousticInput(ps, lengths, ds, wavs, wav_lengths, None)
batch = [] | 0.410756 | 0.352035 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.NewsAggregationValue import NewsAggregationValue
from alipay.aop.api.domain.NewsAggregationValue import NewsAggregationValue
from alipay.aop.api.domain.NewsAggregationValue import NewsAggregationValue
class NewsEntityAggregation(object):
def __init__(self):
self._cows = None
self._ogws = None
self._ppws = None
@property
def cows(self):
return self._cows
@cows.setter
def cows(self, value):
if isinstance(value, list):
self._cows = list()
for i in value:
if isinstance(i, NewsAggregationValue):
self._cows.append(i)
else:
self._cows.append(NewsAggregationValue.from_alipay_dict(i))
@property
def ogws(self):
return self._ogws
@ogws.setter
def ogws(self, value):
if isinstance(value, list):
self._ogws = list()
for i in value:
if isinstance(i, NewsAggregationValue):
self._ogws.append(i)
else:
self._ogws.append(NewsAggregationValue.from_alipay_dict(i))
@property
def ppws(self):
return self._ppws
@ppws.setter
def ppws(self, value):
if isinstance(value, list):
self._ppws = list()
for i in value:
if isinstance(i, NewsAggregationValue):
self._ppws.append(i)
else:
self._ppws.append(NewsAggregationValue.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.cows:
if isinstance(self.cows, list):
for i in range(0, len(self.cows)):
element = self.cows[i]
if hasattr(element, 'to_alipay_dict'):
self.cows[i] = element.to_alipay_dict()
if hasattr(self.cows, 'to_alipay_dict'):
params['cows'] = self.cows.to_alipay_dict()
else:
params['cows'] = self.cows
if self.ogws:
if isinstance(self.ogws, list):
for i in range(0, len(self.ogws)):
element = self.ogws[i]
if hasattr(element, 'to_alipay_dict'):
self.ogws[i] = element.to_alipay_dict()
if hasattr(self.ogws, 'to_alipay_dict'):
params['ogws'] = self.ogws.to_alipay_dict()
else:
params['ogws'] = self.ogws
if self.ppws:
if isinstance(self.ppws, list):
for i in range(0, len(self.ppws)):
element = self.ppws[i]
if hasattr(element, 'to_alipay_dict'):
self.ppws[i] = element.to_alipay_dict()
if hasattr(self.ppws, 'to_alipay_dict'):
params['ppws'] = self.ppws.to_alipay_dict()
else:
params['ppws'] = self.ppws
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = NewsEntityAggregation()
if 'cows' in d:
o.cows = d['cows']
if 'ogws' in d:
o.ogws = d['ogws']
if 'ppws' in d:
o.ppws = d['ppws']
return o | alipay/aop/api/domain/NewsEntityAggregation.py | import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.NewsAggregationValue import NewsAggregationValue
from alipay.aop.api.domain.NewsAggregationValue import NewsAggregationValue
from alipay.aop.api.domain.NewsAggregationValue import NewsAggregationValue
class NewsEntityAggregation(object):
def __init__(self):
self._cows = None
self._ogws = None
self._ppws = None
@property
def cows(self):
return self._cows
@cows.setter
def cows(self, value):
if isinstance(value, list):
self._cows = list()
for i in value:
if isinstance(i, NewsAggregationValue):
self._cows.append(i)
else:
self._cows.append(NewsAggregationValue.from_alipay_dict(i))
@property
def ogws(self):
return self._ogws
@ogws.setter
def ogws(self, value):
if isinstance(value, list):
self._ogws = list()
for i in value:
if isinstance(i, NewsAggregationValue):
self._ogws.append(i)
else:
self._ogws.append(NewsAggregationValue.from_alipay_dict(i))
@property
def ppws(self):
return self._ppws
@ppws.setter
def ppws(self, value):
if isinstance(value, list):
self._ppws = list()
for i in value:
if isinstance(i, NewsAggregationValue):
self._ppws.append(i)
else:
self._ppws.append(NewsAggregationValue.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.cows:
if isinstance(self.cows, list):
for i in range(0, len(self.cows)):
element = self.cows[i]
if hasattr(element, 'to_alipay_dict'):
self.cows[i] = element.to_alipay_dict()
if hasattr(self.cows, 'to_alipay_dict'):
params['cows'] = self.cows.to_alipay_dict()
else:
params['cows'] = self.cows
if self.ogws:
if isinstance(self.ogws, list):
for i in range(0, len(self.ogws)):
element = self.ogws[i]
if hasattr(element, 'to_alipay_dict'):
self.ogws[i] = element.to_alipay_dict()
if hasattr(self.ogws, 'to_alipay_dict'):
params['ogws'] = self.ogws.to_alipay_dict()
else:
params['ogws'] = self.ogws
if self.ppws:
if isinstance(self.ppws, list):
for i in range(0, len(self.ppws)):
element = self.ppws[i]
if hasattr(element, 'to_alipay_dict'):
self.ppws[i] = element.to_alipay_dict()
if hasattr(self.ppws, 'to_alipay_dict'):
params['ppws'] = self.ppws.to_alipay_dict()
else:
params['ppws'] = self.ppws
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = NewsEntityAggregation()
if 'cows' in d:
o.cows = d['cows']
if 'ogws' in d:
o.ogws = d['ogws']
if 'ppws' in d:
o.ppws = d['ppws']
return o | 0.426202 | 0.070848 |
'''ModelArts model v2 action implementations'''
import logging
from osc_lib import utils
from osc_lib.command import command
from otcextensions.common import sdk_utils
from otcextensions.i18n import _
LOG = logging.getLogger(__name__)
def _flatten_output(obj):
data = {
'model_name': obj.model_name,
'model_type': obj.model_type,
'model_version': obj.model_version,
'model_id': obj.model_id,
'model_size': obj.model_size,
'description': obj.description
}
return data
def _get_columns(item):
column_map = {
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
class DeleteModel(command.Command):
_description = _('Delete ModelArts Model')
def get_parser(self, prog_name):
parser = super(DeleteModel, self).get_parser(prog_name)
parser.add_argument(
'model_id',
metavar='<model_id>',
help=_('Name of the model to delete.')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
client.delete_model(model=parsed_args.model_id, ignore_missing=False)
class CreateModel(command.ShowOne):
_description = _('Create a ModelArts model')
def get_parser(self, prog_name):
parser = super(CreateModel, self).get_parser(prog_name)
parser.add_argument(
'--model_name',
metavar='<model_name>',
required=True,
help=_('Model name.'
'Model name. The value can contain 1 to 64 visible characters, ',
'including Chinese characters. Only letters, Chinese characters, digits, '
'hyphens (-), and underscores (_) are allowed.')
)
parser.add_argument(
'--model_version',
metavar='<model_version>',
required=True,
help=_('Model version in the format of Digit.Digit.Digit. ',
'The value range of the digits is [1, 99]. ',
'Note that no part of the version number can start with 0. ',
'For example, 01.01.01 is not allowed.')
)
parser.add_argument(
'--source_location',
metavar='<source_location>',
required=True,
help=_('OBS path where the model is located or the template address of the SWR image')
)
parser.add_argument(
'--source_job_id',
metavar='<source_job_id>',
help=_('ID of the source training job. If the model is generated from a training job, ',
'input this parameter for source tracing.',
' If the model is imported from a third-party meta model, leave this parameter blank. ',
'By default, this parameter is left blank. ')
)
parser.add_argument(
'--source_job_version',
metavar='<source_job_version>',
help=_('Version of the source training job. If the model is generated from a training job, '
'input this parameter for source tracing. '
'If the model is imported from a third-party meta model, leave this parameter blank. '
'By default, this parameter is left blank.')
)
parser.add_argument(
'--source_type',
metavar='<source_type>',
help=_('Model source type. Currently, the value can only be auto, ',
'which indicates ExeML models (model download is not supported). ',
'If the model is deployed by a training job, leave this parameter blank.',
' By default, this parameter is left blank.')
)
parser.add_argument(
'--model_type',
metavar='<model_type>',
required=True,
help=_('Model type. The value can be TensorFlow, MXNet, Caffe, Spark_MLlib, ',
'Scikit_Learn, XGBoost, Image, or PyTorch, which is read from the configuration file.')
)
parser.add_argument(
'--runtime',
metavar='<runtime>',
help=_('Model running environment. The possible values of runtime are related to model_type.')
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Model remarks. The value contains a maximum of 100 characters and ',
'cannot contain the following special characters and more: &!\'\"<>= ')
)
parser.add_argument(
'--execution_code',
metavar='<execution_code>',
help=_('OBS path for storing the execution code. By default, this parameter is left blank.'
' The name of the execution code file is fixed to customize_service.py. ')
)
parser.add_argument(
'--input_params',
metavar='<input_params>',
help=_('Collection of input parameters of a model. By default, this parameter is left blank.')
)
parser.add_argument(
'--output_params',
metavar='<output_params>',
help=_('Collection of output parameters of a model. By default, this parameter is left blank.')
)
parser.add_argument(
'--dependencies',
metavar='<dependencies>',
help=_('Package required for inference code and model. By default, this parameter is left blank.')
)
parser.add_argument(
'--model_algorithm',
metavar='<model_algorithm>',
help=_('Model algorithm. If the algorithm is read from the configuration file, '
'this parameter can be left blank. For example, the value can be predict_analysis,'
' object_detection, or image_classification. ')
)
parser.add_argument(
'--model_metrics',
metavar='<model_metrics>',
help=_('Model precision, which is read from the configuration file ')
)
parser.add_argument(
'--apis',
metavar='<apis>',
help=_('All apis input and output parameters of the model. '
'If the parameters are read from the configuration file, this parameter can be left blank.')
)
parser.add_argument(
'--initial_config',
metavar='<initial_config>',
help=_('Character string converted from the final model configuration file. '
'It is recommended that the initial_config file be used to provide information'
' about the fields such as apis, dependencies, input_params, and output_params.')
)
parser.add_argument(
'--workspace_id',
metavar='<workspace_id>',
help=_('Workspace ID. Default value: 0')
)
parser.add_argument(
'--model_docs',
metavar='<model_docs>',
help=_('List of model description documents. A maximum of three documents are supported.')
)
parser.add_argument(
'--install_type',
metavar='<install_type>',
help=_('Deployment type. Only lowercase letters are supported. '
'The value can be real-time, or batch. Default value: ["real-time","batch"]')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
attrs = {}
if parsed_args.model_name:
attrs['model_name'] = parsed_args.model_name
if parsed_args.model_version:
attrs['model_version'] = parsed_args.model_version
if parsed_args.source_location:
attrs['source_location'] = parsed_args.source_location
if parsed_args.source_job_id:
attrs['source_job_id'] = parsed_args.source_job_id
if parsed_args.source_job_version:
attrs['source_job_version'] = parsed_args.source_job_version
if parsed_args.source_type:
attrs['source_type'] = parsed_args.source_type
if parsed_args.model_type:
attrs['model_type'] = parsed_args.model_type
if parsed_args.runtime:
attrs['runtime'] = parsed_args.runtime
if parsed_args.description:
attrs['description'] = parsed_args.description
if parsed_args.execution_code:
attrs['execution_code'] = parsed_args.execution_code
if parsed_args.input_params:
attrs['input_params'] = parsed_args.input_params
if parsed_args.output_params:
attrs['output_params'] = parsed_args.output_params
if parsed_args.dependencies:
attrs['dependencies'] = parsed_args.dependencies
if parsed_args.model_algorithm:
attrs['model_algorithm'] = parsed_args.model_algorithm
if parsed_args.model_metrics:
attrs['model_metrics'] = parsed_args.model_metrics
if parsed_args.apis:
attrs['apis'] = parsed_args.apis
if parsed_args.initial_config:
attrs['initial_config'] = parsed_args.initial_config
if parsed_args.workspace_id:
attrs['workspace_id'] = parsed_args.workspace_id
if parsed_args.model_docs:
attrs['model_docs'] = parsed_args.model_docs
if parsed_args.install_type:
attrs['install_type'] = parsed_args.install_type
obj = client.create_model(**attrs)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
class ShowModel(command.ShowOne):
_description = _('Show details of a modelarts model')
def get_parser(self, prog_name):
parser = super(ShowModel, self).get_parser(prog_name)
parser.add_argument(
'model_id',
metavar='<model_id>',
help=_('Enter model id')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
data = client.show_model(
model=parsed_args.model_id,
)
display_columns, columns = _get_columns(data)
data = utils.get_item_properties(data, columns)
return (display_columns, data)
class Models(command.Lister):
_description = _('Get properties of a model')
columns = (
'model_id',
'model_name',
'model_version',
'model_size',
'description',
'dimensions',
'metric_name',
'unit',
)
table_columns = (
'model_name',
'dimensions.name',
'dimensions.value',
'metric_name',
'unit',
)
def get_parser(self, prog_name):
parser = super(Models, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
query = {}
data = client.models(**query)
table = (self.columns,
(utils.get_dict_properties(
_flatten_output(s), self.columns
) for s in data))
return table | otcextensions/osclient/modelarts/v1/models.py | '''ModelArts model v2 action implementations'''
import logging
from osc_lib import utils
from osc_lib.command import command
from otcextensions.common import sdk_utils
from otcextensions.i18n import _
LOG = logging.getLogger(__name__)
def _flatten_output(obj):
data = {
'model_name': obj.model_name,
'model_type': obj.model_type,
'model_version': obj.model_version,
'model_id': obj.model_id,
'model_size': obj.model_size,
'description': obj.description
}
return data
def _get_columns(item):
column_map = {
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
class DeleteModel(command.Command):
_description = _('Delete ModelArts Model')
def get_parser(self, prog_name):
parser = super(DeleteModel, self).get_parser(prog_name)
parser.add_argument(
'model_id',
metavar='<model_id>',
help=_('Name of the model to delete.')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
client.delete_model(model=parsed_args.model_id, ignore_missing=False)
class CreateModel(command.ShowOne):
_description = _('Create a ModelArts model')
def get_parser(self, prog_name):
parser = super(CreateModel, self).get_parser(prog_name)
parser.add_argument(
'--model_name',
metavar='<model_name>',
required=True,
help=_('Model name.'
'Model name. The value can contain 1 to 64 visible characters, ',
'including Chinese characters. Only letters, Chinese characters, digits, '
'hyphens (-), and underscores (_) are allowed.')
)
parser.add_argument(
'--model_version',
metavar='<model_version>',
required=True,
help=_('Model version in the format of Digit.Digit.Digit. ',
'The value range of the digits is [1, 99]. ',
'Note that no part of the version number can start with 0. ',
'For example, 01.01.01 is not allowed.')
)
parser.add_argument(
'--source_location',
metavar='<source_location>',
required=True,
help=_('OBS path where the model is located or the template address of the SWR image')
)
parser.add_argument(
'--source_job_id',
metavar='<source_job_id>',
help=_('ID of the source training job. If the model is generated from a training job, ',
'input this parameter for source tracing.',
' If the model is imported from a third-party meta model, leave this parameter blank. ',
'By default, this parameter is left blank. ')
)
parser.add_argument(
'--source_job_version',
metavar='<source_job_version>',
help=_('Version of the source training job. If the model is generated from a training job, '
'input this parameter for source tracing. '
'If the model is imported from a third-party meta model, leave this parameter blank. '
'By default, this parameter is left blank.')
)
parser.add_argument(
'--source_type',
metavar='<source_type>',
help=_('Model source type. Currently, the value can only be auto, ',
'which indicates ExeML models (model download is not supported). ',
'If the model is deployed by a training job, leave this parameter blank.',
' By default, this parameter is left blank.')
)
parser.add_argument(
'--model_type',
metavar='<model_type>',
required=True,
help=_('Model type. The value can be TensorFlow, MXNet, Caffe, Spark_MLlib, ',
'Scikit_Learn, XGBoost, Image, or PyTorch, which is read from the configuration file.')
)
parser.add_argument(
'--runtime',
metavar='<runtime>',
help=_('Model running environment. The possible values of runtime are related to model_type.')
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Model remarks. The value contains a maximum of 100 characters and ',
'cannot contain the following special characters and more: &!\'\"<>= ')
)
parser.add_argument(
'--execution_code',
metavar='<execution_code>',
help=_('OBS path for storing the execution code. By default, this parameter is left blank.'
' The name of the execution code file is fixed to customize_service.py. ')
)
parser.add_argument(
'--input_params',
metavar='<input_params>',
help=_('Collection of input parameters of a model. By default, this parameter is left blank.')
)
parser.add_argument(
'--output_params',
metavar='<output_params>',
help=_('Collection of output parameters of a model. By default, this parameter is left blank.')
)
parser.add_argument(
'--dependencies',
metavar='<dependencies>',
help=_('Package required for inference code and model. By default, this parameter is left blank.')
)
parser.add_argument(
'--model_algorithm',
metavar='<model_algorithm>',
help=_('Model algorithm. If the algorithm is read from the configuration file, '
'this parameter can be left blank. For example, the value can be predict_analysis,'
' object_detection, or image_classification. ')
)
parser.add_argument(
'--model_metrics',
metavar='<model_metrics>',
help=_('Model precision, which is read from the configuration file ')
)
parser.add_argument(
'--apis',
metavar='<apis>',
help=_('All apis input and output parameters of the model. '
'If the parameters are read from the configuration file, this parameter can be left blank.')
)
parser.add_argument(
'--initial_config',
metavar='<initial_config>',
help=_('Character string converted from the final model configuration file. '
'It is recommended that the initial_config file be used to provide information'
' about the fields such as apis, dependencies, input_params, and output_params.')
)
parser.add_argument(
'--workspace_id',
metavar='<workspace_id>',
help=_('Workspace ID. Default value: 0')
)
parser.add_argument(
'--model_docs',
metavar='<model_docs>',
help=_('List of model description documents. A maximum of three documents are supported.')
)
parser.add_argument(
'--install_type',
metavar='<install_type>',
help=_('Deployment type. Only lowercase letters are supported. '
'The value can be real-time, or batch. Default value: ["real-time","batch"]')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
attrs = {}
if parsed_args.model_name:
attrs['model_name'] = parsed_args.model_name
if parsed_args.model_version:
attrs['model_version'] = parsed_args.model_version
if parsed_args.source_location:
attrs['source_location'] = parsed_args.source_location
if parsed_args.source_job_id:
attrs['source_job_id'] = parsed_args.source_job_id
if parsed_args.source_job_version:
attrs['source_job_version'] = parsed_args.source_job_version
if parsed_args.source_type:
attrs['source_type'] = parsed_args.source_type
if parsed_args.model_type:
attrs['model_type'] = parsed_args.model_type
if parsed_args.runtime:
attrs['runtime'] = parsed_args.runtime
if parsed_args.description:
attrs['description'] = parsed_args.description
if parsed_args.execution_code:
attrs['execution_code'] = parsed_args.execution_code
if parsed_args.input_params:
attrs['input_params'] = parsed_args.input_params
if parsed_args.output_params:
attrs['output_params'] = parsed_args.output_params
if parsed_args.dependencies:
attrs['dependencies'] = parsed_args.dependencies
if parsed_args.model_algorithm:
attrs['model_algorithm'] = parsed_args.model_algorithm
if parsed_args.model_metrics:
attrs['model_metrics'] = parsed_args.model_metrics
if parsed_args.apis:
attrs['apis'] = parsed_args.apis
if parsed_args.initial_config:
attrs['initial_config'] = parsed_args.initial_config
if parsed_args.workspace_id:
attrs['workspace_id'] = parsed_args.workspace_id
if parsed_args.model_docs:
attrs['model_docs'] = parsed_args.model_docs
if parsed_args.install_type:
attrs['install_type'] = parsed_args.install_type
obj = client.create_model(**attrs)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
class ShowModel(command.ShowOne):
_description = _('Show details of a modelarts model')
def get_parser(self, prog_name):
parser = super(ShowModel, self).get_parser(prog_name)
parser.add_argument(
'model_id',
metavar='<model_id>',
help=_('Enter model id')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
data = client.show_model(
model=parsed_args.model_id,
)
display_columns, columns = _get_columns(data)
data = utils.get_item_properties(data, columns)
return (display_columns, data)
class Models(command.Lister):
_description = _('Get properties of a model')
columns = (
'model_id',
'model_name',
'model_version',
'model_size',
'description',
'dimensions',
'metric_name',
'unit',
)
table_columns = (
'model_name',
'dimensions.name',
'dimensions.value',
'metric_name',
'unit',
)
def get_parser(self, prog_name):
parser = super(Models, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.modelarts
query = {}
data = client.models(**query)
table = (self.columns,
(utils.get_dict_properties(
_flatten_output(s), self.columns
) for s in data))
return table | 0.717408 | 0.149314 |
import argparse
import unittest
from unittest import mock
import skelebot as sb
class TestExecutor(unittest.TestCase):
@mock.patch('skelebot.systems.execution.executor.print')
@mock.patch('skelebot.systems.parsing.skeleParser')
@mock.patch('skelebot.systems.execution.executor.VERSION', '6.6.6')
def test_execute_version(self, mock_skeleParser, mock_print):
config = sb.objects.config.Config()
args = argparse.Namespace(job=None, version_global=True)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_print.assert_called_with("Skelebot v6.6.6")
@mock.patch('skelebot.systems.execution.executor.print')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_contact(self, mock_skeleParser, mock_print):
config = sb.objects.config.Config(contact="<EMAIL>")
args = argparse.Namespace(job=None, contact_global=True)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_print.assert_called_with("<EMAIL>")
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_help(self, mock_skeleParser):
config = sb.objects.config.Config()
args = argparse.Namespace(job=None)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_skeleParser.showHelp.assert_called_once()
@mock.patch('skelebot.systems.execution.executor.scaffold')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_scaffold(self, mock_skeleParser, mock_scaffold):
config = sb.objects.config.Config()
args = argparse.Namespace(job="scaffold", existing=False)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_scaffold.assert_called_once_with(False)
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_skip(self, mock_skeleParser, mock_run):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host=None)
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host=None)
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host=None)
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_host_global(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job], host="host1")
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host="host1")
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host="host1")
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_host_job(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py", host="host2")
config = sb.objects.config.Config(jobs=[job], host="host1")
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host="host2")
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host="host2")
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_host_param(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py", host="host2")
config = sb.objects.config.Config(jobs=[job], host="host1")
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False, host="host3")
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host="host3")
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host="host3")
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_ports(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py", ports=["10:10", "20:20"])
config = sb.objects.config.Config(jobs=[job], ports=["30:30", "10:10"])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host=None)
mock_run.assert_called_once_with(config, "python -u test.py", "i", ["10:10", "20:20", "30:30"], [], "test", host=None)
@mock.patch('skelebot.systems.execution.executor.call')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_native(self, mock_skeleParser, mock_call):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_call.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_call.assert_called_once_with("python -u test.py", shell=True)
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_component(self, mock_skeleParser):
mock_component = mock.MagicMock()
mock_component.commands = ["test"]
config = sb.objects.config.Config(components=[mock_component])
args = argparse.Namespace(job="test")
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_component.execute.assert_called_once_with(config, args, host=None)
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_chain(self, mock_skeleParser, mock_run):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser, ["test", "+", "test"])
test_call = mock.call(config, "python -u test.py", "i", [], [], "test", host=None)
mock_run.assert_has_calls([test_call, test_call])
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_chain_fail(self, mock_skeleParser, mock_run):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 1
try:
sb.systems.execution.executor.execute(config, mock_skeleParser, ["test", "+", "test"])
self.fail('exception expected')
except SystemExit:
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host=None)
if __name__ == '__main__':
unittest.main() | test/test_systems_execution_executor.py | import argparse
import unittest
from unittest import mock
import skelebot as sb
class TestExecutor(unittest.TestCase):
@mock.patch('skelebot.systems.execution.executor.print')
@mock.patch('skelebot.systems.parsing.skeleParser')
@mock.patch('skelebot.systems.execution.executor.VERSION', '6.6.6')
def test_execute_version(self, mock_skeleParser, mock_print):
config = sb.objects.config.Config()
args = argparse.Namespace(job=None, version_global=True)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_print.assert_called_with("Skelebot v6.6.6")
@mock.patch('skelebot.systems.execution.executor.print')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_contact(self, mock_skeleParser, mock_print):
config = sb.objects.config.Config(contact="<EMAIL>")
args = argparse.Namespace(job=None, contact_global=True)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_print.assert_called_with("<EMAIL>")
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_help(self, mock_skeleParser):
config = sb.objects.config.Config()
args = argparse.Namespace(job=None)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_skeleParser.showHelp.assert_called_once()
@mock.patch('skelebot.systems.execution.executor.scaffold')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_scaffold(self, mock_skeleParser, mock_scaffold):
config = sb.objects.config.Config()
args = argparse.Namespace(job="scaffold", existing=False)
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_scaffold.assert_called_once_with(False)
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_skip(self, mock_skeleParser, mock_run):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host=None)
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host=None)
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host=None)
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_host_global(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job], host="host1")
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host="host1")
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host="host1")
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_host_job(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py", host="host2")
config = sb.objects.config.Config(jobs=[job], host="host1")
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host="host2")
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host="host2")
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_host_param(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py", host="host2")
config = sb.objects.config.Config(jobs=[job], host="host1")
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False, host="host3")
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host="host3")
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host="host3")
@mock.patch('skelebot.systems.execution.executor.buildDocker')
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_ports(self, mock_skeleParser, mock_run, mock_build):
job = sb.objects.job.Job(name="test", source="test.py", ports=["10:10", "20:20"])
config = sb.objects.config.Config(jobs=[job], ports=["30:30", "10:10"])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=False)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_build.assert_called_once_with(config, host=None)
mock_run.assert_called_once_with(config, "python -u test.py", "i", ["10:10", "20:20", "30:30"], [], "test", host=None)
@mock.patch('skelebot.systems.execution.executor.call')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_job_native(self, mock_skeleParser, mock_call):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_call.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_call.assert_called_once_with("python -u test.py", shell=True)
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_component(self, mock_skeleParser):
mock_component = mock.MagicMock()
mock_component.commands = ["test"]
config = sb.objects.config.Config(components=[mock_component])
args = argparse.Namespace(job="test")
mock_skeleParser.parseArgs.return_value = args
sb.systems.execution.executor.execute(config, mock_skeleParser)
mock_component.execute.assert_called_once_with(config, args, host=None)
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_chain(self, mock_skeleParser, mock_run):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 0
sb.systems.execution.executor.execute(config, mock_skeleParser, ["test", "+", "test"])
test_call = mock.call(config, "python -u test.py", "i", [], [], "test", host=None)
mock_run.assert_has_calls([test_call, test_call])
@mock.patch('skelebot.systems.execution.executor.runDocker')
@mock.patch('skelebot.systems.parsing.skeleParser')
def test_execute_chain_fail(self, mock_skeleParser, mock_run):
job = sb.objects.job.Job(name="test", source="test.py")
config = sb.objects.config.Config(jobs=[job])
args = argparse.Namespace(job="test", native_global=False, skip_build_global=True)
mock_skeleParser.parseArgs.return_value = args
mock_run.return_value = 1
try:
sb.systems.execution.executor.execute(config, mock_skeleParser, ["test", "+", "test"])
self.fail('exception expected')
except SystemExit:
mock_run.assert_called_once_with(config, "python -u test.py", "i", [], [], "test", host=None)
if __name__ == '__main__':
unittest.main() | 0.630116 | 0.443781 |
import math
import os
import sys
if(len(sys.argv)<2):
print "execution : $ python FHT.py <path to directory containing files>"
exit()
# path of the directory of files for generating header matrix
path = "/Users/charanshampur/newAwsDump/dumpedContents/application/rdf+xml";
#path="/Users/charanshampur/newAwsDump/dumpedContents/application/rdf+xml"
#path = "/Users/charanshampur/newAwsDump/dumpedContents/additional_test_files/gif/base_files "
# CSV file of file signature
fhtFile4 = open("head4application_rdfXml.csv", "w")
fhtFile8 = open("head8application_rdfXml.csv", "w")
fhtFile16 = open("head16application_rdfXml.csv", "w")
fttFile4 = open("tail4application_rdfXml.csv", "w")
fttFile8 = open("tail8application_rdfXml.csv", "w")
fttFile16 = open("tail16application_rdfXml.csv", "w")
fttTable={}
fhtTable={}
for i in range (0,16):
fhtTable[i]=[0] * 256
fttTable[i]=[0] * 256
numberOfFiles = 0
def bytes2int(str):
return int(str.encode('hex'), 16)
# Traverse through each file in the repository and calculates the file header and trailer byte distribution
# matrix
for path, dirs, files in os.walk(path):
for file in files:
if file in ".DS_Store":
continue
else:
bytesDict = {}
path_to_file = path+"/"+str(file)
print path_to_file
filePointer = open(path_to_file,"rb")
bytesRead = filePointer.read()
if(len(bytesRead)==0):
continue
if(len(bytesRead)>16):
headEnd = 16
tailEndBlock = len(bytesRead) - 17
else:
headEnd = len(bytesRead)
tailEndBlock = -1
for i in range(0,headEnd):
byteStr = bytesRead[i]
byte = bytes2int(byteStr)
fhtTable[i][byte]+=1
for i in range(len(bytesRead)-1,tailEndBlock,-1):
byteStr=bytesRead[i]
byte = bytes2int(byteStr)
fttTable[len(bytesRead)-i-1][byte]+=1
numberOfFiles+=1
for key in fhtTable.keys():
fhtTable[key] = [math.sqrt(float(x)/float(numberOfFiles)) for x in fhtTable[key]]
fttTable[key] = [math.sqrt(float(x)/float(numberOfFiles)) for x in fttTable[key]]
header = ","+",".join(["Byte Value : "+ str(x) for x in range(0,256)])
fhtFile4.write(header+"\n")
fhtFile8.write(header+"\n")
fhtFile16.write(header+"\n")
fttFile4.write(header+"\n")
fttFile8.write(header+"\n")
fttFile16.write(header+"\n")
#Writes the header and trailer byte matrix to csv files.
for key in fhtTable.keys():
row = str(key)+" Header Byte,"+",".join([str(x) for x in fhtTable[key]])
if int(key) < 4 :
fhtFile4.write(row+"\n")
if int(key) < 8 :
fhtFile8.write(row+"\n")
fhtFile16.write(row+"\n")
for key in fttTable.keys():
row = str(key)+" Trailer Byte,"+",".join([str(x) for x in fttTable[key]])
if int(key) < 4 :
fttFile4.write(row+"\n")
if int(key) < 8 :
fttFile8.write(row+"\n")
fttFile16.write(row+"\n") | 6.FHT/FHT.py | import math
import os
import sys
if(len(sys.argv)<2):
print "execution : $ python FHT.py <path to directory containing files>"
exit()
# path of the directory of files for generating header matrix
path = "/Users/charanshampur/newAwsDump/dumpedContents/application/rdf+xml";
#path="/Users/charanshampur/newAwsDump/dumpedContents/application/rdf+xml"
#path = "/Users/charanshampur/newAwsDump/dumpedContents/additional_test_files/gif/base_files "
# CSV file of file signature
fhtFile4 = open("head4application_rdfXml.csv", "w")
fhtFile8 = open("head8application_rdfXml.csv", "w")
fhtFile16 = open("head16application_rdfXml.csv", "w")
fttFile4 = open("tail4application_rdfXml.csv", "w")
fttFile8 = open("tail8application_rdfXml.csv", "w")
fttFile16 = open("tail16application_rdfXml.csv", "w")
fttTable={}
fhtTable={}
for i in range (0,16):
fhtTable[i]=[0] * 256
fttTable[i]=[0] * 256
numberOfFiles = 0
def bytes2int(str):
return int(str.encode('hex'), 16)
# Traverse through each file in the repository and calculates the file header and trailer byte distribution
# matrix
for path, dirs, files in os.walk(path):
for file in files:
if file in ".DS_Store":
continue
else:
bytesDict = {}
path_to_file = path+"/"+str(file)
print path_to_file
filePointer = open(path_to_file,"rb")
bytesRead = filePointer.read()
if(len(bytesRead)==0):
continue
if(len(bytesRead)>16):
headEnd = 16
tailEndBlock = len(bytesRead) - 17
else:
headEnd = len(bytesRead)
tailEndBlock = -1
for i in range(0,headEnd):
byteStr = bytesRead[i]
byte = bytes2int(byteStr)
fhtTable[i][byte]+=1
for i in range(len(bytesRead)-1,tailEndBlock,-1):
byteStr=bytesRead[i]
byte = bytes2int(byteStr)
fttTable[len(bytesRead)-i-1][byte]+=1
numberOfFiles+=1
for key in fhtTable.keys():
fhtTable[key] = [math.sqrt(float(x)/float(numberOfFiles)) for x in fhtTable[key]]
fttTable[key] = [math.sqrt(float(x)/float(numberOfFiles)) for x in fttTable[key]]
header = ","+",".join(["Byte Value : "+ str(x) for x in range(0,256)])
fhtFile4.write(header+"\n")
fhtFile8.write(header+"\n")
fhtFile16.write(header+"\n")
fttFile4.write(header+"\n")
fttFile8.write(header+"\n")
fttFile16.write(header+"\n")
#Writes the header and trailer byte matrix to csv files.
for key in fhtTable.keys():
row = str(key)+" Header Byte,"+",".join([str(x) for x in fhtTable[key]])
if int(key) < 4 :
fhtFile4.write(row+"\n")
if int(key) < 8 :
fhtFile8.write(row+"\n")
fhtFile16.write(row+"\n")
for key in fttTable.keys():
row = str(key)+" Trailer Byte,"+",".join([str(x) for x in fttTable[key]])
if int(key) < 4 :
fttFile4.write(row+"\n")
if int(key) < 8 :
fttFile8.write(row+"\n")
fttFile16.write(row+"\n") | 0.043305 | 0.100172 |
import streamlit as st
import pandas as pd
import pyfolio as pf
import matplotlib.pyplot as plt
from fastbt.rapid import backtest
from fastbt.datasource import DataSource
@st.cache
def load_data(x, y):
tmp = x[x.symbol.isin(y)]
return tmp
@st.cache
def transform(data):
"""
Return transform data
"""
ds = DataSource(data)
ds.add_pct_change(col_name='ret', lag=1)
ds.add_formula('(open/prevclose)-1', col_name='pret')
return ds.data
@st.cache
def backtesting(data, **kwargs):
results = backtest(data=data, **kwargs)
return results
@st.cache
def results_frame(data):
byday = result.groupby('timestamp').net_profit.sum().reset_index()
byday['cum_profit'] = byday.net_profit.cumsum()
byday['max_profit'] = byday.cum_profit.expanding().max()
byday['year'] = byday.timestamp.dt.year
byday['month'] = byday.timestamp.dt.month
return byday.set_index('timestamp')
data_uploader = st.text_input(label='Enter the entire path of your file')
universe_uploader = st.file_uploader(label='Load your universe Excel file')
universes = []
symbols = None
xls = None
data = None
if universe_uploader:
xls = pd.ExcelFile(universe_uploader)
universes = xls.sheet_names
universe_select = st.selectbox(label='Select your universe', options=universes)
if universe_select:
st.write(universe_select)
symbols = xls.parse(sheet_name=universe_select, header=None).values.ravel()
symbols = list(symbols)
if st.checkbox('Show symbols'):
st.write(symbols)
order = st.radio('BUY or SELL', options=['B', 'S'])
price = st.text_input('Enter price formula', value='open')
stop_loss = st.number_input(label='stop loss', min_value=0.5, max_value=5.0, value=2.0, step=.5)
sort_by = st.selectbox('Select a metric to rank', ['pret', 'ret'])
sort_mode = st.radio('This is to select the bottom or top stocks', [True, False])
if data_uploader:
data = pd.read_hdf(data_uploader)
df2 = load_data(data, symbols)
df2 = transform(df2)
if st.checkbox('Run Backtest'):
result = backtesting(data=df2, order=order, price=price, stop_loss=stop_loss, sort_by=sort_by, sort_mode=sort_mode,
commission=0.035, slippage=0.03)
res = results_frame(result)
st.line_chart(res[['cum_profit', 'max_profit']])
by_month = res.groupby(['year', 'month']).net_profit.sum()
by_month.plot.bar(title='Net profit by month')
st.pyplot()
st.subheader('Statistics')
st.write(pf.timeseries.perf_stats(res.net_profit/100000))
st.subheader('Drawdown table')
st.table(pf.timeseries.gen_drawdown_table(res.net_profit/100000))
if st.checkbox('Export results to csv'):
result.to_csv('output.csv')
st.write('File saved') | examples/apps/simple.py | import streamlit as st
import pandas as pd
import pyfolio as pf
import matplotlib.pyplot as plt
from fastbt.rapid import backtest
from fastbt.datasource import DataSource
@st.cache
def load_data(x, y):
tmp = x[x.symbol.isin(y)]
return tmp
@st.cache
def transform(data):
"""
Return transform data
"""
ds = DataSource(data)
ds.add_pct_change(col_name='ret', lag=1)
ds.add_formula('(open/prevclose)-1', col_name='pret')
return ds.data
@st.cache
def backtesting(data, **kwargs):
results = backtest(data=data, **kwargs)
return results
@st.cache
def results_frame(data):
byday = result.groupby('timestamp').net_profit.sum().reset_index()
byday['cum_profit'] = byday.net_profit.cumsum()
byday['max_profit'] = byday.cum_profit.expanding().max()
byday['year'] = byday.timestamp.dt.year
byday['month'] = byday.timestamp.dt.month
return byday.set_index('timestamp')
data_uploader = st.text_input(label='Enter the entire path of your file')
universe_uploader = st.file_uploader(label='Load your universe Excel file')
universes = []
symbols = None
xls = None
data = None
if universe_uploader:
xls = pd.ExcelFile(universe_uploader)
universes = xls.sheet_names
universe_select = st.selectbox(label='Select your universe', options=universes)
if universe_select:
st.write(universe_select)
symbols = xls.parse(sheet_name=universe_select, header=None).values.ravel()
symbols = list(symbols)
if st.checkbox('Show symbols'):
st.write(symbols)
order = st.radio('BUY or SELL', options=['B', 'S'])
price = st.text_input('Enter price formula', value='open')
stop_loss = st.number_input(label='stop loss', min_value=0.5, max_value=5.0, value=2.0, step=.5)
sort_by = st.selectbox('Select a metric to rank', ['pret', 'ret'])
sort_mode = st.radio('This is to select the bottom or top stocks', [True, False])
if data_uploader:
data = pd.read_hdf(data_uploader)
df2 = load_data(data, symbols)
df2 = transform(df2)
if st.checkbox('Run Backtest'):
result = backtesting(data=df2, order=order, price=price, stop_loss=stop_loss, sort_by=sort_by, sort_mode=sort_mode,
commission=0.035, slippage=0.03)
res = results_frame(result)
st.line_chart(res[['cum_profit', 'max_profit']])
by_month = res.groupby(['year', 'month']).net_profit.sum()
by_month.plot.bar(title='Net profit by month')
st.pyplot()
st.subheader('Statistics')
st.write(pf.timeseries.perf_stats(res.net_profit/100000))
st.subheader('Drawdown table')
st.table(pf.timeseries.gen_drawdown_table(res.net_profit/100000))
if st.checkbox('Export results to csv'):
result.to_csv('output.csv')
st.write('File saved') | 0.506591 | 0.352369 |
import pytest
from django.core.validators import (
MaxValueValidator,
MinValueValidator,
)
from django.db import models
from django.test import TestCase
from rest_framework import (
exceptions,
metadata,
serializers,
status,
versioning,
views
)
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.test import APIRequestFactory
from complete_metadata import ApiMetadata
request = APIRequestFactory().options('/')
def test_metadata():
class ExampleView(views.APIView):
"""Example view."""
metadata_class = ApiMetadata
view = ExampleView.as_view()
response = view(request=request)
expected = {
'name': 'Example',
'description': 'Example view.',
'renders': [
'application/json',
'text/html'
],
'parses': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
]
}
assert response.status_code == status.HTTP_200_OK
assert response.data == expected
def test_actions():
class NestedField(serializers.Serializer):
child1 = serializers.IntegerField()
child2 = serializers.IntegerField()
class ExampleSerializer(serializers.Serializer):
choice_field = serializers.ChoiceField(['circle', 'triangle', 'square'])
integer_field = serializers.IntegerField(min_value=1, max_value=1024)
char_field = serializers.CharField(required=False, min_length=2, max_length=20)
list_field = serializers.ListField(child=serializers.ListField(child=serializers.IntegerField()))
nested_field = NestedField()
class ExampleView(views.APIView):
"""Example view."""
metadata_class = ApiMetadata
def post(self, request):
pass
def get_serializer(self):
return ExampleSerializer()
view = ExampleView.as_view()
response = view(request=request)
expected = {
'name': 'Example',
'description': 'Example view.',
'renders': [
'application/json',
'text/html'
],
'parses': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
],
'actions': {
'POST': {
'choice_field': {
'type': 'choice',
'required': True,
'read_only': False,
'label': 'Choice field',
'default': None,
'choices': [
{'display_name': 'circle', 'value': 'circle'},
{'display_name': 'triangle', 'value': 'triangle'},
{'display_name': 'square', 'value': 'square'}
],
'info_messages': []
},
'integer_field': {
'type': 'integer',
'required': True,
'read_only': False,
'label': 'Integer field',
'min_value': 1,
'max_value': 1024,
'default': None,
'info_messages': []
},
'char_field': {
'type': 'string',
'required': False,
'read_only': False,
'label': 'Char field',
'min_length': 2,
'max_length': 20,
'default': None,
'info_messages': []
},
'list_field': {
'type': 'list',
'required': True,
'read_only': False,
'label': 'List field',
'default': None,
'child': {
'type': 'list',
'required': True,
'read_only': False,
'default': None,
'child': {
'type': 'integer',
'required': True,
'read_only': False,
'default': None,
'info_messages': []
},
'info_messages': []
},
'info_messages': []
},
'nested_field': {
'type': 'nested object',
'required': True,
'read_only': False,
'label': 'Nested field',
'default': None,
'children': {
'child1': {
'type': 'integer',
'required': True,
'read_only': False,
'label': 'Child1',
'default': None,
'info_messages': []
},
'child2': {
'type': 'integer',
'required': True,
'read_only': False,
'label': 'Child2',
'default': None,
'info_messages': []
}
},
'info_messages': []
}
}
},
'extra_metadata':{
'permitted_actions': {
'POST': True
}
}
}
assert response.status_code == status.HTTP_200_OK
assert response.data == expected | tests/tests.py | import pytest
from django.core.validators import (
MaxValueValidator,
MinValueValidator,
)
from django.db import models
from django.test import TestCase
from rest_framework import (
exceptions,
metadata,
serializers,
status,
versioning,
views
)
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.test import APIRequestFactory
from complete_metadata import ApiMetadata
request = APIRequestFactory().options('/')
def test_metadata():
class ExampleView(views.APIView):
"""Example view."""
metadata_class = ApiMetadata
view = ExampleView.as_view()
response = view(request=request)
expected = {
'name': 'Example',
'description': 'Example view.',
'renders': [
'application/json',
'text/html'
],
'parses': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
]
}
assert response.status_code == status.HTTP_200_OK
assert response.data == expected
def test_actions():
class NestedField(serializers.Serializer):
child1 = serializers.IntegerField()
child2 = serializers.IntegerField()
class ExampleSerializer(serializers.Serializer):
choice_field = serializers.ChoiceField(['circle', 'triangle', 'square'])
integer_field = serializers.IntegerField(min_value=1, max_value=1024)
char_field = serializers.CharField(required=False, min_length=2, max_length=20)
list_field = serializers.ListField(child=serializers.ListField(child=serializers.IntegerField()))
nested_field = NestedField()
class ExampleView(views.APIView):
"""Example view."""
metadata_class = ApiMetadata
def post(self, request):
pass
def get_serializer(self):
return ExampleSerializer()
view = ExampleView.as_view()
response = view(request=request)
expected = {
'name': 'Example',
'description': 'Example view.',
'renders': [
'application/json',
'text/html'
],
'parses': [
'application/json',
'application/x-www-form-urlencoded',
'multipart/form-data'
],
'actions': {
'POST': {
'choice_field': {
'type': 'choice',
'required': True,
'read_only': False,
'label': 'Choice field',
'default': None,
'choices': [
{'display_name': 'circle', 'value': 'circle'},
{'display_name': 'triangle', 'value': 'triangle'},
{'display_name': 'square', 'value': 'square'}
],
'info_messages': []
},
'integer_field': {
'type': 'integer',
'required': True,
'read_only': False,
'label': 'Integer field',
'min_value': 1,
'max_value': 1024,
'default': None,
'info_messages': []
},
'char_field': {
'type': 'string',
'required': False,
'read_only': False,
'label': 'Char field',
'min_length': 2,
'max_length': 20,
'default': None,
'info_messages': []
},
'list_field': {
'type': 'list',
'required': True,
'read_only': False,
'label': 'List field',
'default': None,
'child': {
'type': 'list',
'required': True,
'read_only': False,
'default': None,
'child': {
'type': 'integer',
'required': True,
'read_only': False,
'default': None,
'info_messages': []
},
'info_messages': []
},
'info_messages': []
},
'nested_field': {
'type': 'nested object',
'required': True,
'read_only': False,
'label': 'Nested field',
'default': None,
'children': {
'child1': {
'type': 'integer',
'required': True,
'read_only': False,
'label': 'Child1',
'default': None,
'info_messages': []
},
'child2': {
'type': 'integer',
'required': True,
'read_only': False,
'label': 'Child2',
'default': None,
'info_messages': []
}
},
'info_messages': []
}
}
},
'extra_metadata':{
'permitted_actions': {
'POST': True
}
}
}
assert response.status_code == status.HTTP_200_OK
assert response.data == expected | 0.6488 | 0.332785 |
from abc import ABC, abstractmethod
import paho.mqtt.client as mqtt
from beamline.model.parameters import *
class AbstractMiner(ABC):
def __init__(self, id):
self._id = id
self._name = ""
self._description = ""
self._running = False
self._configured = True
self._stream = Stream()
self._client = mqtt.Client()
@abstractmethod
def configure(self, configuration):
pass
@abstractmethod
def consume_event(self, case_id, activity_name):
pass
@abstractmethod
def get_views(self, configuration):
pass
@abstractmethod
def get_configuration_parameters(self):
pass
@abstractmethod
def get_view_parameters(self):
pass
def stream(self, stream):
self._stream = stream
def on_message(self, client, userdata, msg):
structure = msg.topic.split("/")
activity_name = structure[-1]
case_id = structure[-2]
self.consume_event(case_id, activity_name)
def start(self):
if self._running :
raise Exception("Miner instance already running")
if not self._configured:
raise Exception("Miner instance not yet configured")
self._client.connect(self._stream.broker_host, 1883, 60)
self._client.subscribe(self._stream.topic_base + "/" + self._stream.process_name + "/#")
self._client.on_message = self.on_message
self._client.loop_start()
self._running = True
def stop(self):
if not self._running:
raise Exception("Miner instance not running")
self._client.disconnect()
self._client.loop_stop()
def serialize(self):
return {
"id": self._id,
"name": self._name,
"description": self._description,
"configurationParameters": [
x.serialize() for x in self.get_configuration_parameters()
],
"viewParameters": [
x.serialize() for x in self.get_view_parameters()
]
} | beamline/miners/abstract.py | from abc import ABC, abstractmethod
import paho.mqtt.client as mqtt
from beamline.model.parameters import *
class AbstractMiner(ABC):
def __init__(self, id):
self._id = id
self._name = ""
self._description = ""
self._running = False
self._configured = True
self._stream = Stream()
self._client = mqtt.Client()
@abstractmethod
def configure(self, configuration):
pass
@abstractmethod
def consume_event(self, case_id, activity_name):
pass
@abstractmethod
def get_views(self, configuration):
pass
@abstractmethod
def get_configuration_parameters(self):
pass
@abstractmethod
def get_view_parameters(self):
pass
def stream(self, stream):
self._stream = stream
def on_message(self, client, userdata, msg):
structure = msg.topic.split("/")
activity_name = structure[-1]
case_id = structure[-2]
self.consume_event(case_id, activity_name)
def start(self):
if self._running :
raise Exception("Miner instance already running")
if not self._configured:
raise Exception("Miner instance not yet configured")
self._client.connect(self._stream.broker_host, 1883, 60)
self._client.subscribe(self._stream.topic_base + "/" + self._stream.process_name + "/#")
self._client.on_message = self.on_message
self._client.loop_start()
self._running = True
def stop(self):
if not self._running:
raise Exception("Miner instance not running")
self._client.disconnect()
self._client.loop_stop()
def serialize(self):
return {
"id": self._id,
"name": self._name,
"description": self._description,
"configurationParameters": [
x.serialize() for x in self.get_configuration_parameters()
],
"viewParameters": [
x.serialize() for x in self.get_view_parameters()
]
} | 0.744285 | 0.086323 |
import json
import uuid
from typing import Any, Tuple
import numpy as np
from aea.exceptions import enforce
from aea.helpers.search.generic import (
AGENT_LOCATION_MODEL,
AGENT_PERSONALITY_MODEL,
AGENT_REMOVE_SERVICE_MODEL,
AGENT_SET_SERVICE_MODEL,
SIMPLE_DATA_MODEL,
)
from aea.helpers.search.models import Description, Location, Query
from aea.skills.base import Model
DEFAULT_PRICE_PER_DATA_BATCH = 10
DEFAULT_BATCH_SIZE = 32
DEFAULT_SELLER_TX_FEE = 0
DEFAULT_BUYER_TX_FEE = 0
DEFAULT_SERVICE_ID = "data_service"
DEFAULT_LOCATION = {"longitude": 0.1270, "latitude": 51.5194}
DEFAULT_PERSONALITY_DATA = {"piece": "genus", "value": "data"}
DEFAULT_SERVICE_DATA = {"key": "dataset_id", "value": "fmnist"}
DEFAULT_CLASSIFICATION = {"piece": "classification", "value": "seller"}
class NumpyArrayEncoder(json.JSONEncoder):
"""This class defines a custom JSON encoder for numpy ndarray objects."""
def default(self, obj: Any) -> Any: # pylint: disable=arguments-differ
"""Encode an object (including a numpy ndarray) into its JSON representation."""
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj) # pragma: nocover
class Strategy(Model):
"""This class defines a strategy for the agent."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the strategy of the agent."""
self.price_per_data_batch = kwargs.pop(
"price_per_data_batch", DEFAULT_PRICE_PER_DATA_BATCH
)
self.batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
self.seller_tx_fee = kwargs.pop("seller_tx_fee", DEFAULT_SELLER_TX_FEE)
self.buyer_tx_fee = kwargs.pop("buyer_tx_fee", DEFAULT_BUYER_TX_FEE)
currency_id = kwargs.pop("currency_id", None)
ledger_id = kwargs.pop("ledger_id", None)
self._is_ledger_tx = kwargs.pop("is_ledger_tx", False)
self._service_id = kwargs.pop("service_id", DEFAULT_SERVICE_ID)
location = kwargs.pop("location", DEFAULT_LOCATION)
self._agent_location = {
"location": Location(
latitude=location["latitude"], longitude=location["longitude"]
)
}
self._set_personality_data = kwargs.pop(
"personality_data", DEFAULT_PERSONALITY_DATA
)
enforce(
len(self._set_personality_data) == 2
and "piece" in self._set_personality_data
and "value" in self._set_personality_data,
"personality_data must contain keys `key` and `value`",
)
self._set_classification = kwargs.pop("classification", DEFAULT_CLASSIFICATION)
enforce(
len(self._set_classification) == 2
and "piece" in self._set_classification
and "value" in self._set_classification,
"classification must contain keys `key` and `value`",
)
self._set_service_data = kwargs.pop("service_data", DEFAULT_SERVICE_DATA)
enforce(
len(self._set_service_data) == 2
and "key" in self._set_service_data
and "value" in self._set_service_data,
"service_data must contain keys `key` and `value`",
)
self._remove_service_data = {"key": self._set_service_data["key"]}
self._simple_service_data = {
self._set_service_data["key"]: self._set_service_data["value"]
}
super().__init__(**kwargs)
self._ledger_id = (
ledger_id if ledger_id is not None else self.context.default_ledger_id
)
if currency_id is None:
currency_id = self.context.currency_denominations.get(self._ledger_id, None)
enforce(
currency_id is not None,
f"Currency denomination for ledger_id={self._ledger_id} not specified.",
)
self._currency_id = currency_id
# loading ML dataset
# (this could be parametrized)
from tensorflow import keras # pylint: disable=import-outside-toplevel
(
(self.train_x, self.train_y),
(self.test_x, self.test_y),
) = keras.datasets.fashion_mnist.load_data()
@property
def ledger_id(self) -> str:
"""Get the ledger id."""
return self._ledger_id
@property
def is_ledger_tx(self) -> str:
"""Get the is_ledger_tx."""
return self._is_ledger_tx
def get_location_description(self) -> Description:
"""
Get the location description.
:return: a description of the agent's location
"""
description = Description(
self._agent_location, data_model=AGENT_LOCATION_MODEL,
)
return description
def get_register_personality_description(self) -> Description:
"""
Get the register personality description.
:return: a description of the personality
"""
description = Description(
self._set_personality_data, data_model=AGENT_PERSONALITY_MODEL,
)
return description
def get_register_classification_description(self) -> Description:
"""
Get the register classification description.
:return: a description of the classification
"""
description = Description(
self._set_classification, data_model=AGENT_PERSONALITY_MODEL,
)
return description
def get_register_service_description(self) -> Description:
"""
Get the register service description.
:return: a description of the offered services
"""
description = Description(
self._set_service_data, data_model=AGENT_SET_SERVICE_MODEL,
)
return description
def get_service_description(self) -> Description:
"""
Get the simple service description.
:return: a description of the offered services
"""
description = Description(
self._simple_service_data, data_model=SIMPLE_DATA_MODEL,
)
return description
def get_unregister_service_description(self) -> Description:
"""
Get the unregister service description.
:return: a description of the to be removed service
"""
description = Description(
self._remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL,
)
return description
def sample_data(self, n: int) -> Tuple:
"""Sample N rows from data."""
idx = np.arange(self.train_x.shape[0])
mask = np.zeros_like(idx, dtype=bool)
selected = np.random.choice(idx, n, replace=False)
mask[selected] = True
x_sample = self.train_x[mask]
y_sample = self.train_y[mask]
return x_sample, y_sample
@staticmethod
def encode_sample_data(data: Tuple) -> bytes:
"""Serialize data (a tuple of two numpy ndarrays) into bytes."""
data_dict = {
"data_0": data[0],
"data_1": data[1],
}
return json.dumps(data_dict, cls=NumpyArrayEncoder).encode("utf-8")
def is_matching_supply(self, query: Query) -> bool:
"""
Check if the query matches the supply.
:param query: the query
:return: bool indicating whether matches or not
"""
service_desc = self.get_service_description()
return query.check(service_desc)
def generate_terms(self) -> Description:
"""
Generate a proposal.
:return: a tuple of proposal and the weather data
"""
address = self.context.agent_addresses[self.ledger_id]
proposal = Description(
{
"batch_size": self.batch_size,
"price": self.price_per_data_batch,
"seller_tx_fee": self.seller_tx_fee,
"buyer_tx_fee": self.buyer_tx_fee,
"currency_id": self._currency_id,
"ledger_id": self.ledger_id,
"address": address,
"service_id": self._service_id,
"nonce": uuid.uuid4().hex,
}
)
return proposal
def is_valid_terms(self, terms: Description) -> bool:
"""
Check the terms are valid.
:param terms: the terms
:return: boolean
"""
generated_terms = self.generate_terms()
return all(
[
terms.values[key] == generated_terms.values[key]
for key in [
"batch_size",
"price",
"seller_tx_fee",
"buyer_tx_fee",
"currency_id",
"ledger_id",
"address",
"service_id",
]
]
) | packages/fetchai/skills/ml_data_provider/strategy.py | import json
import uuid
from typing import Any, Tuple
import numpy as np
from aea.exceptions import enforce
from aea.helpers.search.generic import (
AGENT_LOCATION_MODEL,
AGENT_PERSONALITY_MODEL,
AGENT_REMOVE_SERVICE_MODEL,
AGENT_SET_SERVICE_MODEL,
SIMPLE_DATA_MODEL,
)
from aea.helpers.search.models import Description, Location, Query
from aea.skills.base import Model
DEFAULT_PRICE_PER_DATA_BATCH = 10
DEFAULT_BATCH_SIZE = 32
DEFAULT_SELLER_TX_FEE = 0
DEFAULT_BUYER_TX_FEE = 0
DEFAULT_SERVICE_ID = "data_service"
DEFAULT_LOCATION = {"longitude": 0.1270, "latitude": 51.5194}
DEFAULT_PERSONALITY_DATA = {"piece": "genus", "value": "data"}
DEFAULT_SERVICE_DATA = {"key": "dataset_id", "value": "fmnist"}
DEFAULT_CLASSIFICATION = {"piece": "classification", "value": "seller"}
class NumpyArrayEncoder(json.JSONEncoder):
"""This class defines a custom JSON encoder for numpy ndarray objects."""
def default(self, obj: Any) -> Any: # pylint: disable=arguments-differ
"""Encode an object (including a numpy ndarray) into its JSON representation."""
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj) # pragma: nocover
class Strategy(Model):
"""This class defines a strategy for the agent."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the strategy of the agent."""
self.price_per_data_batch = kwargs.pop(
"price_per_data_batch", DEFAULT_PRICE_PER_DATA_BATCH
)
self.batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
self.seller_tx_fee = kwargs.pop("seller_tx_fee", DEFAULT_SELLER_TX_FEE)
self.buyer_tx_fee = kwargs.pop("buyer_tx_fee", DEFAULT_BUYER_TX_FEE)
currency_id = kwargs.pop("currency_id", None)
ledger_id = kwargs.pop("ledger_id", None)
self._is_ledger_tx = kwargs.pop("is_ledger_tx", False)
self._service_id = kwargs.pop("service_id", DEFAULT_SERVICE_ID)
location = kwargs.pop("location", DEFAULT_LOCATION)
self._agent_location = {
"location": Location(
latitude=location["latitude"], longitude=location["longitude"]
)
}
self._set_personality_data = kwargs.pop(
"personality_data", DEFAULT_PERSONALITY_DATA
)
enforce(
len(self._set_personality_data) == 2
and "piece" in self._set_personality_data
and "value" in self._set_personality_data,
"personality_data must contain keys `key` and `value`",
)
self._set_classification = kwargs.pop("classification", DEFAULT_CLASSIFICATION)
enforce(
len(self._set_classification) == 2
and "piece" in self._set_classification
and "value" in self._set_classification,
"classification must contain keys `key` and `value`",
)
self._set_service_data = kwargs.pop("service_data", DEFAULT_SERVICE_DATA)
enforce(
len(self._set_service_data) == 2
and "key" in self._set_service_data
and "value" in self._set_service_data,
"service_data must contain keys `key` and `value`",
)
self._remove_service_data = {"key": self._set_service_data["key"]}
self._simple_service_data = {
self._set_service_data["key"]: self._set_service_data["value"]
}
super().__init__(**kwargs)
self._ledger_id = (
ledger_id if ledger_id is not None else self.context.default_ledger_id
)
if currency_id is None:
currency_id = self.context.currency_denominations.get(self._ledger_id, None)
enforce(
currency_id is not None,
f"Currency denomination for ledger_id={self._ledger_id} not specified.",
)
self._currency_id = currency_id
# loading ML dataset
# (this could be parametrized)
from tensorflow import keras # pylint: disable=import-outside-toplevel
(
(self.train_x, self.train_y),
(self.test_x, self.test_y),
) = keras.datasets.fashion_mnist.load_data()
@property
def ledger_id(self) -> str:
"""Get the ledger id."""
return self._ledger_id
@property
def is_ledger_tx(self) -> str:
"""Get the is_ledger_tx."""
return self._is_ledger_tx
def get_location_description(self) -> Description:
"""
Get the location description.
:return: a description of the agent's location
"""
description = Description(
self._agent_location, data_model=AGENT_LOCATION_MODEL,
)
return description
def get_register_personality_description(self) -> Description:
"""
Get the register personality description.
:return: a description of the personality
"""
description = Description(
self._set_personality_data, data_model=AGENT_PERSONALITY_MODEL,
)
return description
def get_register_classification_description(self) -> Description:
"""
Get the register classification description.
:return: a description of the classification
"""
description = Description(
self._set_classification, data_model=AGENT_PERSONALITY_MODEL,
)
return description
def get_register_service_description(self) -> Description:
"""
Get the register service description.
:return: a description of the offered services
"""
description = Description(
self._set_service_data, data_model=AGENT_SET_SERVICE_MODEL,
)
return description
def get_service_description(self) -> Description:
"""
Get the simple service description.
:return: a description of the offered services
"""
description = Description(
self._simple_service_data, data_model=SIMPLE_DATA_MODEL,
)
return description
def get_unregister_service_description(self) -> Description:
"""
Get the unregister service description.
:return: a description of the to be removed service
"""
description = Description(
self._remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL,
)
return description
def sample_data(self, n: int) -> Tuple:
"""Sample N rows from data."""
idx = np.arange(self.train_x.shape[0])
mask = np.zeros_like(idx, dtype=bool)
selected = np.random.choice(idx, n, replace=False)
mask[selected] = True
x_sample = self.train_x[mask]
y_sample = self.train_y[mask]
return x_sample, y_sample
@staticmethod
def encode_sample_data(data: Tuple) -> bytes:
"""Serialize data (a tuple of two numpy ndarrays) into bytes."""
data_dict = {
"data_0": data[0],
"data_1": data[1],
}
return json.dumps(data_dict, cls=NumpyArrayEncoder).encode("utf-8")
def is_matching_supply(self, query: Query) -> bool:
"""
Check if the query matches the supply.
:param query: the query
:return: bool indicating whether matches or not
"""
service_desc = self.get_service_description()
return query.check(service_desc)
def generate_terms(self) -> Description:
"""
Generate a proposal.
:return: a tuple of proposal and the weather data
"""
address = self.context.agent_addresses[self.ledger_id]
proposal = Description(
{
"batch_size": self.batch_size,
"price": self.price_per_data_batch,
"seller_tx_fee": self.seller_tx_fee,
"buyer_tx_fee": self.buyer_tx_fee,
"currency_id": self._currency_id,
"ledger_id": self.ledger_id,
"address": address,
"service_id": self._service_id,
"nonce": uuid.uuid4().hex,
}
)
return proposal
def is_valid_terms(self, terms: Description) -> bool:
"""
Check the terms are valid.
:param terms: the terms
:return: boolean
"""
generated_terms = self.generate_terms()
return all(
[
terms.values[key] == generated_terms.values[key]
for key in [
"batch_size",
"price",
"seller_tx_fee",
"buyer_tx_fee",
"currency_id",
"ledger_id",
"address",
"service_id",
]
]
) | 0.900311 | 0.246772 |
data = {
"apache-2.0": {
"name": "Apache-2.0",
"fullName": "Apache License 2.0",
"rules": {
"appendNoticeFileIfExists": True,
"distributeOriginalLicenseText": True,
}
},
"beerware": {
"name": "Beerware",
"fullName": "Beerware",
"rules": {}
},
"bsd-1": {
"name": "BSD 1-clause",
"fullName": "BSD 1-Clause License",
"rules": {}
},
"bsd-2": {
"name": "BSD 2-clause",
"fullName": "Simplified BSD License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"bsd-3": {
"name": "BSD 3-clause",
"fullName": "Modified BSD License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"bsl-1.0": {
"name": "BSD 3-clause",
"fullName": "Modified BSD License",
"rules": {}
},
"bzip2-1.0.6": {
"name": "bzip2-1.0.6",
"fullName": "bzip2 and libbzip2 License v1.0.6",
"rules": {}
},
"ftl": {
"name": "FTL",
"fullName": "MIT License",
"rules": {
"mentionSource": True,
}
},
"gpl-2-lsn": {
"name": "GPL-2.0 WITH Linux-syscall-note",
"fullName":
"GNU General Public License Version 2.0 WITH Linux-syscall-note",
"rules": {}
},
"hpnd": {
"name": "HPND",
"fullName": "Historical Permission Notice and Disclaimer",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"hpnd-sv": {
"name": "HPND-sell-variant",
"fullName":
"Historical Permission Notice and Disclaimer - sell variant",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"icu": {
"name": "ICU",
"fullName": "ICU License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"ijg": {
"name": "IJG",
"fullName": "Independent JPEG Group License",
"rules": {
"mentionSource": True,
}
},
"isc": {
"name": "ISC",
"fullName": "ISC license",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"lgpl-2": {
"name": "LGPL-2",
"fullName": "GNU Lesser General Public License Version 2.0",
"rules": {
"noStaticLink": True,
}
},
"lgpl-2.1": {
"name": "LGPL-2.1",
"fullName": "GNU Lesser General Public License Version 2.1",
"rules": {
"noStaticLink": True,
}
},
"lgpl-2.1+": {
"name": "LGPL-2.1 or later",
"fullName": "GNU Lesser General Public License Version 2.1 or later",
"rules": {
"noStaticLink": True,
}
},
"lgpl-3": {
"name": "LGPL-3",
"fullName": "GNU Lesser General Public License Version 3",
"rules": {
"noStaticLink": True,
}
},
"libpng-2.0": {
"name": "libpng-2.0",
"fullName": "PNG Reference Library License version 2",
"rules": {}
},
"libtiff": {
"name": "libtiff",
"fullName": "libtiff License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"mit": {
"name": "MIT",
"fullName": "MIT License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"naist": {
"name": "NAIST-2003",
"fullName": "Nara Institute License 2003",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"pd": {
"name": "Public Domain",
"fullName": "Public Domain",
"rules": {}
},
"sgi-b-1.1": {
"name": "SGI-B-1.1",
"fullName": "SGI Free Software License B v1.1",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"sgi-b-2.0": {
"name": "SGI-B-2.0",
"fullName": "SGI Free Software License B v2.0",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"smlnj": {
"name": "SML/NJ",
"fullName":
"Standard ML of New Jersey Copyright Notice, License And "
"Disclaimer",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"unicode": {
"name": "Unicode",
"fullName":
"Unicode, Inc. License Agreement - Data Files And Software",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"unlicense": {
"name": "Unlicense",
"fullName": "Unlicense",
"rules": {}
},
"x11": {
"name": "X11",
"fullName": "X11 License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"zlib": {
"name": "Zlib",
"fullName":
"Unicode, Inc. License Agreement - Data Files And Software",
"rules": {}
},
}
class LicensesData:
def __init__(self, customData={}):
self.data = {**data, **customData}
def AddData(self, customData):
self.data = {**self.data, **customData}
def GetData(self):
return self.data | vgazer/licenses.py | data = {
"apache-2.0": {
"name": "Apache-2.0",
"fullName": "Apache License 2.0",
"rules": {
"appendNoticeFileIfExists": True,
"distributeOriginalLicenseText": True,
}
},
"beerware": {
"name": "Beerware",
"fullName": "Beerware",
"rules": {}
},
"bsd-1": {
"name": "BSD 1-clause",
"fullName": "BSD 1-Clause License",
"rules": {}
},
"bsd-2": {
"name": "BSD 2-clause",
"fullName": "Simplified BSD License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"bsd-3": {
"name": "BSD 3-clause",
"fullName": "Modified BSD License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"bsl-1.0": {
"name": "BSD 3-clause",
"fullName": "Modified BSD License",
"rules": {}
},
"bzip2-1.0.6": {
"name": "bzip2-1.0.6",
"fullName": "bzip2 and libbzip2 License v1.0.6",
"rules": {}
},
"ftl": {
"name": "FTL",
"fullName": "MIT License",
"rules": {
"mentionSource": True,
}
},
"gpl-2-lsn": {
"name": "GPL-2.0 WITH Linux-syscall-note",
"fullName":
"GNU General Public License Version 2.0 WITH Linux-syscall-note",
"rules": {}
},
"hpnd": {
"name": "HPND",
"fullName": "Historical Permission Notice and Disclaimer",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"hpnd-sv": {
"name": "HPND-sell-variant",
"fullName":
"Historical Permission Notice and Disclaimer - sell variant",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"icu": {
"name": "ICU",
"fullName": "ICU License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"ijg": {
"name": "IJG",
"fullName": "Independent JPEG Group License",
"rules": {
"mentionSource": True,
}
},
"isc": {
"name": "ISC",
"fullName": "ISC license",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"lgpl-2": {
"name": "LGPL-2",
"fullName": "GNU Lesser General Public License Version 2.0",
"rules": {
"noStaticLink": True,
}
},
"lgpl-2.1": {
"name": "LGPL-2.1",
"fullName": "GNU Lesser General Public License Version 2.1",
"rules": {
"noStaticLink": True,
}
},
"lgpl-2.1+": {
"name": "LGPL-2.1 or later",
"fullName": "GNU Lesser General Public License Version 2.1 or later",
"rules": {
"noStaticLink": True,
}
},
"lgpl-3": {
"name": "LGPL-3",
"fullName": "GNU Lesser General Public License Version 3",
"rules": {
"noStaticLink": True,
}
},
"libpng-2.0": {
"name": "libpng-2.0",
"fullName": "PNG Reference Library License version 2",
"rules": {}
},
"libtiff": {
"name": "libtiff",
"fullName": "libtiff License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"mit": {
"name": "MIT",
"fullName": "MIT License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"naist": {
"name": "NAIST-2003",
"fullName": "Nara Institute License 2003",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"pd": {
"name": "Public Domain",
"fullName": "Public Domain",
"rules": {}
},
"sgi-b-1.1": {
"name": "SGI-B-1.1",
"fullName": "SGI Free Software License B v1.1",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"sgi-b-2.0": {
"name": "SGI-B-2.0",
"fullName": "SGI Free Software License B v2.0",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"smlnj": {
"name": "SML/NJ",
"fullName":
"Standard ML of New Jersey Copyright Notice, License And "
"Disclaimer",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"unicode": {
"name": "Unicode",
"fullName":
"Unicode, Inc. License Agreement - Data Files And Software",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"unlicense": {
"name": "Unlicense",
"fullName": "Unlicense",
"rules": {}
},
"x11": {
"name": "X11",
"fullName": "X11 License",
"rules": {
"distributeOriginalLicenseText": True,
}
},
"zlib": {
"name": "Zlib",
"fullName":
"Unicode, Inc. License Agreement - Data Files And Software",
"rules": {}
},
}
class LicensesData:
def __init__(self, customData={}):
self.data = {**data, **customData}
def AddData(self, customData):
self.data = {**self.data, **customData}
def GetData(self):
return self.data | 0.401219 | 0.465205 |
from types import SimpleNamespace
import torch
import torch.nn as nn
from torch import Tensor
from transformers import AutoConfig, AutoModel
class AttentionHead(nn.Module):
def __init__(self, in_size: int = 768, hidden_size: int = 512) -> None:
super().__init__()
self.W = nn.Linear(in_size, hidden_size)
self.V = nn.Linear(hidden_size, 1)
def forward(self, features: Tensor) -> Tensor:
att = torch.tanh(self.W(features))
score = self.V(att)
attention_weights = torch.softmax(score, dim=1)
context_vector = attention_weights * features
context_vector = torch.sum(context_vector, dim=1)
return context_vector
class TransformerWithAttentionHead(nn.Module):
def __init__(
self,
transformer_checkpoint: str,
attn_hidden_size: int = 768,
hidden_dropout_prob: float = 0.0,
layer_norm_eps: float = 1e-7,
return_simplenamespace: bool = False
) -> None:
super(TransformerWithAttentionHead, self).__init__()
config = AutoConfig.from_pretrained(transformer_checkpoint)
config.update({
"output_hidden_states": True,
"hidden_dropout_prob": hidden_dropout_prob,
"layer_norm_eps": layer_norm_eps
})
self.transformer = AutoModel.from_pretrained(transformer_checkpoint, config=config)
self.attention = AttentionHead(
in_size=config.hidden_size,
hidden_size=attn_hidden_size
)
self.regressor = nn.Linear(config.hidden_size, 1)
self.return_simplenamespace = return_simplenamespace
def forward(self, input_ids: Tensor, attention_mask: Tensor) -> Tensor:
transformer_out = self.transformer(input_ids, attention_mask)
attention_out = self.attention(transformer_out.last_hidden_state)
regressor_out = self.regressor(attention_out)
if self.return_simplenamespace:
return SimpleNamespace(logits=regressor_out)
else:
return regressor_out | models.py |
from types import SimpleNamespace
import torch
import torch.nn as nn
from torch import Tensor
from transformers import AutoConfig, AutoModel
class AttentionHead(nn.Module):
def __init__(self, in_size: int = 768, hidden_size: int = 512) -> None:
super().__init__()
self.W = nn.Linear(in_size, hidden_size)
self.V = nn.Linear(hidden_size, 1)
def forward(self, features: Tensor) -> Tensor:
att = torch.tanh(self.W(features))
score = self.V(att)
attention_weights = torch.softmax(score, dim=1)
context_vector = attention_weights * features
context_vector = torch.sum(context_vector, dim=1)
return context_vector
class TransformerWithAttentionHead(nn.Module):
def __init__(
self,
transformer_checkpoint: str,
attn_hidden_size: int = 768,
hidden_dropout_prob: float = 0.0,
layer_norm_eps: float = 1e-7,
return_simplenamespace: bool = False
) -> None:
super(TransformerWithAttentionHead, self).__init__()
config = AutoConfig.from_pretrained(transformer_checkpoint)
config.update({
"output_hidden_states": True,
"hidden_dropout_prob": hidden_dropout_prob,
"layer_norm_eps": layer_norm_eps
})
self.transformer = AutoModel.from_pretrained(transformer_checkpoint, config=config)
self.attention = AttentionHead(
in_size=config.hidden_size,
hidden_size=attn_hidden_size
)
self.regressor = nn.Linear(config.hidden_size, 1)
self.return_simplenamespace = return_simplenamespace
def forward(self, input_ids: Tensor, attention_mask: Tensor) -> Tensor:
transformer_out = self.transformer(input_ids, attention_mask)
attention_out = self.attention(transformer_out.last_hidden_state)
regressor_out = self.regressor(attention_out)
if self.return_simplenamespace:
return SimpleNamespace(logits=regressor_out)
else:
return regressor_out | 0.954816 | 0.535766 |
import sys
sys.path.append('thirdparty/AdaptiveWingLoss')
import os, glob
import numpy as np
import cv2
import argparse
from src.dataset.image_translation import landmark_extraction, landmark_image_to_data
from approaches.train_image_translation import Image_translation_block
import platform
import torch
if platform.release() == '4.4.0-83-generic':
src_dir = r'/mnt/ntfs/Dataset/TalkingToon/VoxCeleb2_imagetranslation/raw_fl3d'
mp4_dir = r'/mnt/ntfs/Dataset/VoxCeleb2/train_set/dev/mp4'
jpg_dir = r'img_output'
ckpt_dir = r'img_output'
log_dir = r'img_output'
else: # 3.10.0-957.21.2.el7.x86_64
# root = r'/mnt/nfs/scratch1/yangzhou/VoxCeleb2_imagetranslation'
root = r'/mnt/nfs/scratch1/yangzhou/PreprocessedVox_imagetranslation'
src_dir = os.path.join(root, 'raw_fl3d')
# mp4_dir = r'/mnt/nfs/work1/kalo/yangzhou/VoxCeleb2/train_set/dev/mp4'
mp4_dir = r'/mnt/nfs/scratch1/yangzhou/PreprocessedVox_mp4'
jpg_dir = os.path.join(root, 'tmp_v')
ckpt_dir = os.path.join(root, 'ckpt')
log_dir = os.path.join(root, 'log')
''' Step 1. Data preparation '''
# landmark extraction
# landmark_extraction(int(sys.argv[1]), int(sys.argv[2]))
# save image data ahead -> saved file too large, will create data online
# landmark_image_to_data(0, 0, show=False)
''' Step 2. Train the network '''
parser = argparse.ArgumentParser()
parser.add_argument('--nepoch', type=int, default=150, help='number of epochs to train for')
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--num_frames', type=int, default=1, help='')
parser.add_argument('--num_workers', type=int, default=4, help='number of frames extracted from each video')
parser.add_argument('--lr', type=float, default=0.0001, help='')
parser.add_argument('--write', default=False, action='store_true')
parser.add_argument('--train', default=False, action='store_true')
parser.add_argument('--name', type=str, default='tmp')
parser.add_argument('--test_speed', default=False, action='store_true')
parser.add_argument('--jpg_dir', type=str, default=jpg_dir)
parser.add_argument('--ckpt_dir', type=str, default=ckpt_dir)
parser.add_argument('--log_dir', type=str, default=log_dir)
parser.add_argument('--jpg_freq', type=int, default=50, help='')
parser.add_argument('--ckpt_last_freq', type=int, default=1000, help='')
parser.add_argument('--ckpt_epoch_freq', type=int, default=1, help='')
parser.add_argument('--load_G_name', type=str, default='')
parser.add_argument('--use_vox_dataset', type=str, default='raw')
parser.add_argument('--add_audio_in', default=False, action='store_true')
parser.add_argument('--comb_fan_awing', default=False, action='store_true')
parser.add_argument('--fan_2or3D', type=str, default='3D')
parser.add_argument('--single_test', type=str, default='')
opt_parser = parser.parse_args()
model = Image_translation_block(opt_parser)
if(opt_parser.single_test != ''):
with torch.no_grad():
model.single_test()
if(opt_parser.train):
model.train()
else:
with torch.no_grad():
model.test() | Tencent/Video_Generation/MakeItTalk/main_train_image_translation.py | import sys
sys.path.append('thirdparty/AdaptiveWingLoss')
import os, glob
import numpy as np
import cv2
import argparse
from src.dataset.image_translation import landmark_extraction, landmark_image_to_data
from approaches.train_image_translation import Image_translation_block
import platform
import torch
if platform.release() == '4.4.0-83-generic':
src_dir = r'/mnt/ntfs/Dataset/TalkingToon/VoxCeleb2_imagetranslation/raw_fl3d'
mp4_dir = r'/mnt/ntfs/Dataset/VoxCeleb2/train_set/dev/mp4'
jpg_dir = r'img_output'
ckpt_dir = r'img_output'
log_dir = r'img_output'
else: # 3.10.0-957.21.2.el7.x86_64
# root = r'/mnt/nfs/scratch1/yangzhou/VoxCeleb2_imagetranslation'
root = r'/mnt/nfs/scratch1/yangzhou/PreprocessedVox_imagetranslation'
src_dir = os.path.join(root, 'raw_fl3d')
# mp4_dir = r'/mnt/nfs/work1/kalo/yangzhou/VoxCeleb2/train_set/dev/mp4'
mp4_dir = r'/mnt/nfs/scratch1/yangzhou/PreprocessedVox_mp4'
jpg_dir = os.path.join(root, 'tmp_v')
ckpt_dir = os.path.join(root, 'ckpt')
log_dir = os.path.join(root, 'log')
''' Step 1. Data preparation '''
# landmark extraction
# landmark_extraction(int(sys.argv[1]), int(sys.argv[2]))
# save image data ahead -> saved file too large, will create data online
# landmark_image_to_data(0, 0, show=False)
''' Step 2. Train the network '''
parser = argparse.ArgumentParser()
parser.add_argument('--nepoch', type=int, default=150, help='number of epochs to train for')
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--num_frames', type=int, default=1, help='')
parser.add_argument('--num_workers', type=int, default=4, help='number of frames extracted from each video')
parser.add_argument('--lr', type=float, default=0.0001, help='')
parser.add_argument('--write', default=False, action='store_true')
parser.add_argument('--train', default=False, action='store_true')
parser.add_argument('--name', type=str, default='tmp')
parser.add_argument('--test_speed', default=False, action='store_true')
parser.add_argument('--jpg_dir', type=str, default=jpg_dir)
parser.add_argument('--ckpt_dir', type=str, default=ckpt_dir)
parser.add_argument('--log_dir', type=str, default=log_dir)
parser.add_argument('--jpg_freq', type=int, default=50, help='')
parser.add_argument('--ckpt_last_freq', type=int, default=1000, help='')
parser.add_argument('--ckpt_epoch_freq', type=int, default=1, help='')
parser.add_argument('--load_G_name', type=str, default='')
parser.add_argument('--use_vox_dataset', type=str, default='raw')
parser.add_argument('--add_audio_in', default=False, action='store_true')
parser.add_argument('--comb_fan_awing', default=False, action='store_true')
parser.add_argument('--fan_2or3D', type=str, default='3D')
parser.add_argument('--single_test', type=str, default='')
opt_parser = parser.parse_args()
model = Image_translation_block(opt_parser)
if(opt_parser.single_test != ''):
with torch.no_grad():
model.single_test()
if(opt_parser.train):
model.train()
else:
with torch.no_grad():
model.test() | 0.2676 | 0.070592 |
import sys
import threading
import warnings
from pyaedt.generic.general_methods import is_ironpython
if not is_ironpython:
try:
import numpy as np
except ImportError:
warnings.warn(
"The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython."
)
class ThreadTrace(threading.Thread):
"""Control a thread with python"""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
self.__run_backup = self.run
self.run = self.__run
threading.Thread.start(self)
def __run(self):
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, event, arg):
if event == "call":
return self.localtrace
else:
return None
def localtrace(self, frame, event, arg):
if self.killed:
if event == "line":
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class GeneticAlgorithm(object):
"""Genetic Algorithm for Python
Basic implementation of elitist genetic algorithm for solving problems with integers, continuous, boolean
or mixed variables.
Parameters
----------
function : callable
The Objective function to be minimized. This implementation minimizes the given objective function.
dim : int
Number of variables
reference_file : str, optional
Reference file to create the cromosomes. If it is not specified, the function should create the cromose.
goal : float, optional
If after 'max_iteration_no_improv' iterations the goal is not improvedaf, the algorithm stops
var_type: str
Type of the optimization variables. The default is 'bool'.
Other options are: 'int' if all variables are integer, and 'real' if all variables are
real value or continuous
boundaries: <numpy array/None>
By default is None. None if var_type is 'bool', otherwise provide an array of tuples
of length two as boundaries for each variable, the length of the array must be equal dimension.
For example, np.array([0,100],[0,200]) determines lower boundary 0 and upper boundary 100 for first
and upper boundary 200 for second variable where dimension is 2.
var_type_mixed: <numpy array/None> -
By default is None. None if all variables have the same type, otherwise this can be used to specify the type of
each variable separately.
For example if the first variable is integer but the second one is real the input is:
np.array(['int'],['real']). NOTE: it does not accept 'bool'. If variable type is Boolean use 'int' and provide
a boundary as [0,1] in variable_boundaries.
function_timeout: float
If the given function does not provide output before function_timeout (unit is seconds)
the algorithm raise error.
For example, when there is an infinite loop in the given function.
algorithm_parameters: dict
Genetic algorithm parameters:
max_num_iteration : int
population_size : int
crossover_prob: float
parents_portion: float
crossover_type: string
The default is 'uniform'. Other options are 'one_point' or 'two_point'
mutation_prob : float
elite_ration : float
max_iteration_no_improv: int
Successive iterations without improvement. If None it is ineffective
progress_bar: bool
Show progress bar. The default is True.
Examples
--------
Optimize a defined function using a genetic algorithm.
>>>import numpy as np
>>>from pyaedt.generic.python_optimizers import GeneticAlgorithm as ga
>>> def f(X):
>>> return np.sum(X)
>>>varbound = np.array([[0, 10]] * 3)
>>>model = ga(function=f, dimension=3, var_type='real', variable_boundaries=varbound)
>>>model.run()
"""
def __init__(
self,
function,
dim,
reference_file=None,
population_file=None,
goal=0,
var_type="bool",
boundaries=None,
var_type_mixed=None,
function_timeout=0,
algorithm_parameters=None,
progress_bar=True,
):
self.population_file = None
self.goal = 1e10
if population_file:
self.population_file = population_file
self.function = function
self.dim = int(dim)
self.goal = float(goal)
if not var_type == "bool" and not var_type == "int" and not var_type == "real":
raise ValueError("Variable type is not correct")
if var_type_mixed is None:
if var_type == "real":
self.var_type = np.array([["real"]] * self.dim)
else:
self.var_type = np.array([["int"]] * self.dim)
else:
if type(var_type_mixed).__module__ != "numpy":
raise ValueError("var_type must be numpy array")
if len(var_type_mixed) != self.dim:
raise ValueError("var_type must have a length equal dimension")
self.var_type = var_type_mixed
if var_type != "bool" or type(var_type_mixed).__module__ == "numpy":
if len(boundaries) != self.dim:
raise ValueError("boundaries must have a length equal dimension")
if type(boundaries).__module__ != "numpy":
raise ValueError("boundaries must be numpy array")
for i in boundaries:
if len(i) != 2:
raise ValueError("boundary for each variable must be a tuple of length two")
if i[0] > i[1]:
raise ValueError("lower boundaries must be smaller than upper_boundaries")
self.var_bound = boundaries
else:
self.var_bound = np.array([[0, 1]] * self.dim)
self.timeout = float(function_timeout)
if progress_bar:
self.progress_bar = True
else:
self.progress_bar = False
# GA parameters
if not algorithm_parameters:
algorithm_parameters = {
"max_num_iteration": None,
"population_size": 50,
"crossover_prob": 0.5,
"parents_portion": 0.3,
"crossover_type": "uniform",
"mutation_prob": 0.2,
"elite_ratio": 0.05,
"max_iteration_no_improv": None,
}
self.ga_param = algorithm_parameters
if not (1 >= self.ga_param["parents_portion"] >= 0):
raise ValueError("parents_portion must be in range [0,1]")
self.population_size = int(self.ga_param["population_size"])
self.par_s = int(self.ga_param["parents_portion"] * self.population_size)
trl = self.population_size - self.par_s
if trl % 2 != 0:
self.par_s += 1
self.prob_mut = self.ga_param["mutation_prob"]
if not (1 >= self.prob_mut >= 0):
raise ValueError("mutation_prob must be in range [0,1]")
self.prob_cross = self.ga_param["crossover_prob"]
if not (1 >= self.prob_cross >= 0):
raise ValueError("prob_cross must be in range [0,1]")
if not (1 >= self.ga_param["elite_ratio"] >= 0):
raise ValueError("elite_ratio must be in range [0,1]")
trl = self.population_size * self.ga_param["elite_ratio"]
if trl < 1 and self.ga_param["elite_ratio"] > 0:
self.num_elit = 1
else:
self.num_elit = int(trl)
if self.par_s < self.num_elit:
raise ValueError("number of parents must be greater than number of elits")
if self.ga_param["max_num_iteration"] is None:
self.iterate = 0
for i in range(0, self.dim):
if self.var_type[i] == "int":
self.iterate += (
(self.var_bound[i][1] - self.var_bound[i][0]) * self.dim * (100 / self.population_size)
)
else:
self.iterate += (self.var_bound[i][1] - self.var_bound[i][0]) * 50 * (100 / self.population_size)
self.iterate = int(self.iterate)
if (self.iterate * self.population_size) > 10000000:
self.iterate = 10000000 / self.population_size
else:
self.iterate = int(self.ga_param["max_num_iteration"])
self.crossover_type = self.ga_param["crossover_type"]
if (
not self.crossover_type == "uniform"
and not self.crossover_type == "one_point"
and not self.crossover_type == "two_point"
):
raise ValueError("crossover_type must 'uniform', 'one_point', or 'two_point'")
self.stop_iterations = False
if self.ga_param["max_iteration_no_improv"] is None:
self.stop_iterations = self.iterate + 1
else:
self.stop_iterations = int(self.ga_param["max_iteration_no_improv"])
self.integers = np.where(self.var_type == "int")
self.reals = np.where(self.var_type == "real")
self.report = []
self.best_function = []
self.best_variable = []
self.output_dict = {}
self.pop = []
self.reference_file = reference_file
def run(self):
"""Implements the genetic algorithm"""
# Init Population
pop = np.array([np.zeros(self.dim + 1)] * self.population_size)
solo = np.zeros(self.dim + 1)
var = np.zeros(self.dim)
for p in range(0, self.population_size):
for i in self.integers[0]:
var[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
solo[i] = var[i].copy()
for i in self.reals[0]:
var[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
solo[i] = var[i].copy()
obj = self.sim(var)
solo[self.dim] = obj
pop[p] = solo.copy()
# Sort
pop = pop[pop[:, self.dim].argsort()]
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
t = 1
counter = 0
while t <= self.iterate:
if self.population_file:
# Save Population in CSV
np.savetxt(self.population_file, pop, delimiter=",")
if self.progress_bar:
self.progress(t, self.iterate, status="GA is running...")
# Sort
pop = pop[pop[:, self.dim].argsort()]
if pop[0, self.dim] < self.best_function:
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
if pop[0, self.dim] > self.goal:
counter = 0
else:
counter += 1
# Report
self.report.append(pop[0, self.dim])
# Normalizing objective function
# normobj = np.zeros(self.population_size)
minobj = pop[0, self.dim]
if minobj < 0:
normobj = pop[:, self.dim] + abs(minobj)
else:
normobj = pop[:, self.dim].copy()
maxnorm = np.amax(normobj)
normobj = maxnorm - normobj + 1
# Calculate probability
sum_normobj = np.sum(normobj)
# prob = np.zeros(self.population_size)
prob = normobj / sum_normobj
cumprob = np.cumsum(prob)
# Select parents
par = np.array([np.zeros(self.dim + 1)] * self.par_s)
# Elite
for k in range(0, self.num_elit):
par[k] = pop[k].copy()
# Random population. Not repeated parents
for k in range(self.num_elit, self.par_s):
repeated_parent = True
count = 0
while repeated_parent:
count += 1
index = np.searchsorted(cumprob, np.random.random())
is_in_list = np.any(np.all(pop[index] == par, axis=1))
if count >= 10 or not is_in_list:
repeated_parent = False
par[k] = pop[index].copy()
ef_par_list = np.array([False] * self.par_s)
par_count = 0
while par_count == 0:
for k in range(0, self.par_s):
if np.random.random() <= self.prob_cross:
ef_par_list[k] = True
par_count += 1
ef_par = par[ef_par_list].copy()
# New generation
pop = np.array([np.zeros(self.dim + 1)] * self.population_size)
# Parents
for k in range(0, self.par_s):
pop[k] = par[k].copy()
# Children. If children is repeated, try up to 10 times
for k in range(self.par_s, self.population_size, 2):
repeated_children = True
count = 0
while repeated_children:
r1 = np.random.randint(0, par_count)
r2 = np.random.randint(0, par_count)
pvar1 = ef_par[r1, : self.dim].copy()
pvar2 = ef_par[r2, : self.dim].copy()
ch = self.cross(pvar1, pvar2, self.crossover_type)
ch1 = ch[0].copy()
ch2 = ch[1].copy()
ch1 = self.mut(ch1)
ch2 = self.mutmiddle(ch2, pvar1, pvar2)
count += 1
for population in pop:
is_in_list_ch1 = np.all(ch1 == population[:-1])
is_in_list_ch2 = np.all(ch2 == population[:-1])
if count >= 1000 or (not is_in_list_ch1 and not is_in_list_ch2):
repeated_children = False
elif is_in_list_ch1 or is_in_list_ch2:
repeated_children = True
break
solo[: self.dim] = ch1.copy()
obj = self.sim(ch1)
solo[self.dim] = obj
pop[k] = solo.copy()
solo[: self.dim] = ch2.copy()
obj = self.sim(ch2)
solo[self.dim] = obj
pop[k + 1] = solo.copy()
t += 1
if counter > self.stop_iterations or self.best_function == 0:
pop = pop[pop[:, self.dim].argsort()]
text = str(t - 1)
print("\nInfo: GA is terminated after " + text + " iterations")
break
# Last generation Info
# Sort
if t - 1 == self.iterate:
text = str(t - 1)
print("\nInfo: GA is terminated after " + text + " iterations")
pop = pop[pop[:, self.dim].argsort()]
self.pop = pop
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
# Report
self.report.append(pop[0, self.dim])
self.output_dict = {"variable": self.best_variable, "function": self.best_function}
if self.progress_bar:
show = " " * 100
sys.stdout.write("\r%s" % (show))
sys.stdout.flush()
sys.stdout.write("\r Best solution:\n %s" % (self.best_variable))
sys.stdout.write("\n\n Objective:\n %s\n" % (self.best_function))
return True
def cross(self, x, y, c_type):
ofs1 = x.copy()
ofs2 = y.copy()
if c_type == "one_point":
ran = np.random.randint(0, self.dim)
for i in range(0, ran):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if c_type == "two_point":
ran1 = np.random.randint(0, self.dim)
ran2 = np.random.randint(ran1, self.dim)
for i in range(ran1, ran2):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if c_type == "uniform":
for i in range(0, self.dim):
ran = np.random.random()
if ran < 0.5:
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
return np.array([ofs1, ofs2])
def mut(self, x):
for i in self.integers[0]:
ran = np.random.random()
if ran < self.prob_mut:
x[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
for i in self.reals[0]:
ran = np.random.random()
if ran < self.prob_mut:
x[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
return x
def mutmiddle(self, x, p1, p2):
for i in self.integers[0]:
ran = np.random.random()
if ran < self.prob_mut:
if p1[i] < p2[i]:
x[i] = np.random.randint(p1[i], p2[i])
elif p1[i] > p2[i]:
x[i] = np.random.randint(p2[i], p1[i])
else:
x[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
for i in self.reals[0]:
ran = np.random.random()
if ran < self.prob_mut:
if p1[i] < p2[i]:
x[i] = p1[i] + np.random.random() * (p2[i] - p1[i])
elif p1[i] > p2[i]:
x[i] = p2[i] + np.random.random() * (p1[i] - p2[i])
else:
x[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
return x
def evaluate(self):
self.goal = 1e10
if not self.reference_file:
self.goal = self.function(self.temp)
return True
else:
self.goal = self.function(self.temp, self.reference_file)
return True
def sim(self, X):
self.temp = X.copy()
if self.timeout > 0:
thread = ThreadTrace(target=self.evaluate, daemon=None)
thread.start()
thread.join(timeout=self.timeout)
if thread.is_alive():
print("After " + str(self.timeout) + " seconds delay the given function does not provide any output")
thread.kill()
# after the kill, you must call join to really kill it.
thread.join()
else:
self.evaluate()
return self.goal
def progress(self, count, total, status=""):
bar_len = 50
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = "|" * filled_len + "_" * (bar_len - filled_len)
sys.stdout.write("\r%s %s%s %s" % (bar, percents, "%", status))
sys.stdout.flush() | pyaedt/generic/python_optimizers.py | import sys
import threading
import warnings
from pyaedt.generic.general_methods import is_ironpython
if not is_ironpython:
try:
import numpy as np
except ImportError:
warnings.warn(
"The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython."
)
class ThreadTrace(threading.Thread):
"""Control a thread with python"""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
self.__run_backup = self.run
self.run = self.__run
threading.Thread.start(self)
def __run(self):
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, event, arg):
if event == "call":
return self.localtrace
else:
return None
def localtrace(self, frame, event, arg):
if self.killed:
if event == "line":
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class GeneticAlgorithm(object):
"""Genetic Algorithm for Python
Basic implementation of elitist genetic algorithm for solving problems with integers, continuous, boolean
or mixed variables.
Parameters
----------
function : callable
The Objective function to be minimized. This implementation minimizes the given objective function.
dim : int
Number of variables
reference_file : str, optional
Reference file to create the cromosomes. If it is not specified, the function should create the cromose.
goal : float, optional
If after 'max_iteration_no_improv' iterations the goal is not improvedaf, the algorithm stops
var_type: str
Type of the optimization variables. The default is 'bool'.
Other options are: 'int' if all variables are integer, and 'real' if all variables are
real value or continuous
boundaries: <numpy array/None>
By default is None. None if var_type is 'bool', otherwise provide an array of tuples
of length two as boundaries for each variable, the length of the array must be equal dimension.
For example, np.array([0,100],[0,200]) determines lower boundary 0 and upper boundary 100 for first
and upper boundary 200 for second variable where dimension is 2.
var_type_mixed: <numpy array/None> -
By default is None. None if all variables have the same type, otherwise this can be used to specify the type of
each variable separately.
For example if the first variable is integer but the second one is real the input is:
np.array(['int'],['real']). NOTE: it does not accept 'bool'. If variable type is Boolean use 'int' and provide
a boundary as [0,1] in variable_boundaries.
function_timeout: float
If the given function does not provide output before function_timeout (unit is seconds)
the algorithm raise error.
For example, when there is an infinite loop in the given function.
algorithm_parameters: dict
Genetic algorithm parameters:
max_num_iteration : int
population_size : int
crossover_prob: float
parents_portion: float
crossover_type: string
The default is 'uniform'. Other options are 'one_point' or 'two_point'
mutation_prob : float
elite_ration : float
max_iteration_no_improv: int
Successive iterations without improvement. If None it is ineffective
progress_bar: bool
Show progress bar. The default is True.
Examples
--------
Optimize a defined function using a genetic algorithm.
>>>import numpy as np
>>>from pyaedt.generic.python_optimizers import GeneticAlgorithm as ga
>>> def f(X):
>>> return np.sum(X)
>>>varbound = np.array([[0, 10]] * 3)
>>>model = ga(function=f, dimension=3, var_type='real', variable_boundaries=varbound)
>>>model.run()
"""
def __init__(
self,
function,
dim,
reference_file=None,
population_file=None,
goal=0,
var_type="bool",
boundaries=None,
var_type_mixed=None,
function_timeout=0,
algorithm_parameters=None,
progress_bar=True,
):
self.population_file = None
self.goal = 1e10
if population_file:
self.population_file = population_file
self.function = function
self.dim = int(dim)
self.goal = float(goal)
if not var_type == "bool" and not var_type == "int" and not var_type == "real":
raise ValueError("Variable type is not correct")
if var_type_mixed is None:
if var_type == "real":
self.var_type = np.array([["real"]] * self.dim)
else:
self.var_type = np.array([["int"]] * self.dim)
else:
if type(var_type_mixed).__module__ != "numpy":
raise ValueError("var_type must be numpy array")
if len(var_type_mixed) != self.dim:
raise ValueError("var_type must have a length equal dimension")
self.var_type = var_type_mixed
if var_type != "bool" or type(var_type_mixed).__module__ == "numpy":
if len(boundaries) != self.dim:
raise ValueError("boundaries must have a length equal dimension")
if type(boundaries).__module__ != "numpy":
raise ValueError("boundaries must be numpy array")
for i in boundaries:
if len(i) != 2:
raise ValueError("boundary for each variable must be a tuple of length two")
if i[0] > i[1]:
raise ValueError("lower boundaries must be smaller than upper_boundaries")
self.var_bound = boundaries
else:
self.var_bound = np.array([[0, 1]] * self.dim)
self.timeout = float(function_timeout)
if progress_bar:
self.progress_bar = True
else:
self.progress_bar = False
# GA parameters
if not algorithm_parameters:
algorithm_parameters = {
"max_num_iteration": None,
"population_size": 50,
"crossover_prob": 0.5,
"parents_portion": 0.3,
"crossover_type": "uniform",
"mutation_prob": 0.2,
"elite_ratio": 0.05,
"max_iteration_no_improv": None,
}
self.ga_param = algorithm_parameters
if not (1 >= self.ga_param["parents_portion"] >= 0):
raise ValueError("parents_portion must be in range [0,1]")
self.population_size = int(self.ga_param["population_size"])
self.par_s = int(self.ga_param["parents_portion"] * self.population_size)
trl = self.population_size - self.par_s
if trl % 2 != 0:
self.par_s += 1
self.prob_mut = self.ga_param["mutation_prob"]
if not (1 >= self.prob_mut >= 0):
raise ValueError("mutation_prob must be in range [0,1]")
self.prob_cross = self.ga_param["crossover_prob"]
if not (1 >= self.prob_cross >= 0):
raise ValueError("prob_cross must be in range [0,1]")
if not (1 >= self.ga_param["elite_ratio"] >= 0):
raise ValueError("elite_ratio must be in range [0,1]")
trl = self.population_size * self.ga_param["elite_ratio"]
if trl < 1 and self.ga_param["elite_ratio"] > 0:
self.num_elit = 1
else:
self.num_elit = int(trl)
if self.par_s < self.num_elit:
raise ValueError("number of parents must be greater than number of elits")
if self.ga_param["max_num_iteration"] is None:
self.iterate = 0
for i in range(0, self.dim):
if self.var_type[i] == "int":
self.iterate += (
(self.var_bound[i][1] - self.var_bound[i][0]) * self.dim * (100 / self.population_size)
)
else:
self.iterate += (self.var_bound[i][1] - self.var_bound[i][0]) * 50 * (100 / self.population_size)
self.iterate = int(self.iterate)
if (self.iterate * self.population_size) > 10000000:
self.iterate = 10000000 / self.population_size
else:
self.iterate = int(self.ga_param["max_num_iteration"])
self.crossover_type = self.ga_param["crossover_type"]
if (
not self.crossover_type == "uniform"
and not self.crossover_type == "one_point"
and not self.crossover_type == "two_point"
):
raise ValueError("crossover_type must 'uniform', 'one_point', or 'two_point'")
self.stop_iterations = False
if self.ga_param["max_iteration_no_improv"] is None:
self.stop_iterations = self.iterate + 1
else:
self.stop_iterations = int(self.ga_param["max_iteration_no_improv"])
self.integers = np.where(self.var_type == "int")
self.reals = np.where(self.var_type == "real")
self.report = []
self.best_function = []
self.best_variable = []
self.output_dict = {}
self.pop = []
self.reference_file = reference_file
def run(self):
"""Implements the genetic algorithm"""
# Init Population
pop = np.array([np.zeros(self.dim + 1)] * self.population_size)
solo = np.zeros(self.dim + 1)
var = np.zeros(self.dim)
for p in range(0, self.population_size):
for i in self.integers[0]:
var[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
solo[i] = var[i].copy()
for i in self.reals[0]:
var[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
solo[i] = var[i].copy()
obj = self.sim(var)
solo[self.dim] = obj
pop[p] = solo.copy()
# Sort
pop = pop[pop[:, self.dim].argsort()]
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
t = 1
counter = 0
while t <= self.iterate:
if self.population_file:
# Save Population in CSV
np.savetxt(self.population_file, pop, delimiter=",")
if self.progress_bar:
self.progress(t, self.iterate, status="GA is running...")
# Sort
pop = pop[pop[:, self.dim].argsort()]
if pop[0, self.dim] < self.best_function:
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
if pop[0, self.dim] > self.goal:
counter = 0
else:
counter += 1
# Report
self.report.append(pop[0, self.dim])
# Normalizing objective function
# normobj = np.zeros(self.population_size)
minobj = pop[0, self.dim]
if minobj < 0:
normobj = pop[:, self.dim] + abs(minobj)
else:
normobj = pop[:, self.dim].copy()
maxnorm = np.amax(normobj)
normobj = maxnorm - normobj + 1
# Calculate probability
sum_normobj = np.sum(normobj)
# prob = np.zeros(self.population_size)
prob = normobj / sum_normobj
cumprob = np.cumsum(prob)
# Select parents
par = np.array([np.zeros(self.dim + 1)] * self.par_s)
# Elite
for k in range(0, self.num_elit):
par[k] = pop[k].copy()
# Random population. Not repeated parents
for k in range(self.num_elit, self.par_s):
repeated_parent = True
count = 0
while repeated_parent:
count += 1
index = np.searchsorted(cumprob, np.random.random())
is_in_list = np.any(np.all(pop[index] == par, axis=1))
if count >= 10 or not is_in_list:
repeated_parent = False
par[k] = pop[index].copy()
ef_par_list = np.array([False] * self.par_s)
par_count = 0
while par_count == 0:
for k in range(0, self.par_s):
if np.random.random() <= self.prob_cross:
ef_par_list[k] = True
par_count += 1
ef_par = par[ef_par_list].copy()
# New generation
pop = np.array([np.zeros(self.dim + 1)] * self.population_size)
# Parents
for k in range(0, self.par_s):
pop[k] = par[k].copy()
# Children. If children is repeated, try up to 10 times
for k in range(self.par_s, self.population_size, 2):
repeated_children = True
count = 0
while repeated_children:
r1 = np.random.randint(0, par_count)
r2 = np.random.randint(0, par_count)
pvar1 = ef_par[r1, : self.dim].copy()
pvar2 = ef_par[r2, : self.dim].copy()
ch = self.cross(pvar1, pvar2, self.crossover_type)
ch1 = ch[0].copy()
ch2 = ch[1].copy()
ch1 = self.mut(ch1)
ch2 = self.mutmiddle(ch2, pvar1, pvar2)
count += 1
for population in pop:
is_in_list_ch1 = np.all(ch1 == population[:-1])
is_in_list_ch2 = np.all(ch2 == population[:-1])
if count >= 1000 or (not is_in_list_ch1 and not is_in_list_ch2):
repeated_children = False
elif is_in_list_ch1 or is_in_list_ch2:
repeated_children = True
break
solo[: self.dim] = ch1.copy()
obj = self.sim(ch1)
solo[self.dim] = obj
pop[k] = solo.copy()
solo[: self.dim] = ch2.copy()
obj = self.sim(ch2)
solo[self.dim] = obj
pop[k + 1] = solo.copy()
t += 1
if counter > self.stop_iterations or self.best_function == 0:
pop = pop[pop[:, self.dim].argsort()]
text = str(t - 1)
print("\nInfo: GA is terminated after " + text + " iterations")
break
# Last generation Info
# Sort
if t - 1 == self.iterate:
text = str(t - 1)
print("\nInfo: GA is terminated after " + text + " iterations")
pop = pop[pop[:, self.dim].argsort()]
self.pop = pop
self.best_function = pop[0, self.dim].copy()
self.best_variable = pop[0, : self.dim].copy()
# Report
self.report.append(pop[0, self.dim])
self.output_dict = {"variable": self.best_variable, "function": self.best_function}
if self.progress_bar:
show = " " * 100
sys.stdout.write("\r%s" % (show))
sys.stdout.flush()
sys.stdout.write("\r Best solution:\n %s" % (self.best_variable))
sys.stdout.write("\n\n Objective:\n %s\n" % (self.best_function))
return True
def cross(self, x, y, c_type):
ofs1 = x.copy()
ofs2 = y.copy()
if c_type == "one_point":
ran = np.random.randint(0, self.dim)
for i in range(0, ran):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if c_type == "two_point":
ran1 = np.random.randint(0, self.dim)
ran2 = np.random.randint(ran1, self.dim)
for i in range(ran1, ran2):
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
if c_type == "uniform":
for i in range(0, self.dim):
ran = np.random.random()
if ran < 0.5:
ofs1[i] = y[i].copy()
ofs2[i] = x[i].copy()
return np.array([ofs1, ofs2])
def mut(self, x):
for i in self.integers[0]:
ran = np.random.random()
if ran < self.prob_mut:
x[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
for i in self.reals[0]:
ran = np.random.random()
if ran < self.prob_mut:
x[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
return x
def mutmiddle(self, x, p1, p2):
for i in self.integers[0]:
ran = np.random.random()
if ran < self.prob_mut:
if p1[i] < p2[i]:
x[i] = np.random.randint(p1[i], p2[i])
elif p1[i] > p2[i]:
x[i] = np.random.randint(p2[i], p1[i])
else:
x[i] = np.random.randint(self.var_bound[i][0], self.var_bound[i][1] + 1)
for i in self.reals[0]:
ran = np.random.random()
if ran < self.prob_mut:
if p1[i] < p2[i]:
x[i] = p1[i] + np.random.random() * (p2[i] - p1[i])
elif p1[i] > p2[i]:
x[i] = p2[i] + np.random.random() * (p1[i] - p2[i])
else:
x[i] = self.var_bound[i][0] + np.random.random() * (self.var_bound[i][1] - self.var_bound[i][0])
return x
def evaluate(self):
self.goal = 1e10
if not self.reference_file:
self.goal = self.function(self.temp)
return True
else:
self.goal = self.function(self.temp, self.reference_file)
return True
def sim(self, X):
self.temp = X.copy()
if self.timeout > 0:
thread = ThreadTrace(target=self.evaluate, daemon=None)
thread.start()
thread.join(timeout=self.timeout)
if thread.is_alive():
print("After " + str(self.timeout) + " seconds delay the given function does not provide any output")
thread.kill()
# after the kill, you must call join to really kill it.
thread.join()
else:
self.evaluate()
return self.goal
def progress(self, count, total, status=""):
bar_len = 50
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = "|" * filled_len + "_" * (bar_len - filled_len)
sys.stdout.write("\r%s %s%s %s" % (bar, percents, "%", status))
sys.stdout.flush() | 0.549157 | 0.368065 |
from __future__ import annotations
from typing import Any, Callable, TYPE_CHECKING, Iterator, Tuple
from django.apps import apps
from maybe import Maybe
from subtypes import Str
from .config import SqlConfig
if TYPE_CHECKING:
from .sql import DjangoSql
class DjangoApp(SqlConfig.Sql.Constructors.Schema):
pass
class DjangoApps(SqlConfig.Sql.Constructors.Schemas):
schema_constructor = DjangoApp
def __repr__(self) -> str:
return f"""{type(self).__name__}(num_apps={len(self)}, apps=[{", ".join([f"{type(schema).__name__}(name='{schema._name}', tables={len(schema) if schema._ready else '?'})" for name, schema in self])}])"""
def __iter__(self) -> Iterator[Tuple[str, Any]]:
return super().__iter__()
class DjangoDatabase(SqlConfig.Sql.Constructors.Database):
def __init__(self, sql: DjangoSql) -> None:
self.django_mappings = {model._meta.db_table: model for models in apps.all_models.values() for model in models.values()}
self.sqlhandler_mappings = {}
super().__init__(sql=sql)
self.django = DjangoApps(database=self)
self._hierarchize()
def __repr__(self) -> str:
return f"{type(self).__name__}(name={repr(self.name)}, django={repr(self.django)})"
def _hierarchize(self) -> None:
for app, models in apps.all_models.items():
self.django[app] = schema = self.django.schema_constructor(name=app, parent=self.django)
schema._ready = True
for name, model in models.items():
if (model := self.shape[self.default_schema].registry.get((table_name := model._meta.db_table))) is not None:
self.sqlhandler_mappings[table_name] = schema[name] = model
def _scalar_name(self) -> Callable:
def scalar_name(base: Any, local_cls: Any, referred_cls: Any, constraint: Any) -> str:
return Maybe(self.django_mappings)[referred_cls.__name__]._meta.model_name.else_(referred_cls.__name__)
return scalar_name
def _collection_name(self) -> Callable:
def collection_name(base: Any, local_cls: Any, referred_cls: Any, constraint: Any) -> str:
real_name = Maybe(self.django_mappings)[referred_cls.__name__]._meta.model_name.else_(referred_cls.__name__)
return Str(real_name).case.plural()
return collection_name | sqlhandler/django/database.py | from __future__ import annotations
from typing import Any, Callable, TYPE_CHECKING, Iterator, Tuple
from django.apps import apps
from maybe import Maybe
from subtypes import Str
from .config import SqlConfig
if TYPE_CHECKING:
from .sql import DjangoSql
class DjangoApp(SqlConfig.Sql.Constructors.Schema):
pass
class DjangoApps(SqlConfig.Sql.Constructors.Schemas):
schema_constructor = DjangoApp
def __repr__(self) -> str:
return f"""{type(self).__name__}(num_apps={len(self)}, apps=[{", ".join([f"{type(schema).__name__}(name='{schema._name}', tables={len(schema) if schema._ready else '?'})" for name, schema in self])}])"""
def __iter__(self) -> Iterator[Tuple[str, Any]]:
return super().__iter__()
class DjangoDatabase(SqlConfig.Sql.Constructors.Database):
def __init__(self, sql: DjangoSql) -> None:
self.django_mappings = {model._meta.db_table: model for models in apps.all_models.values() for model in models.values()}
self.sqlhandler_mappings = {}
super().__init__(sql=sql)
self.django = DjangoApps(database=self)
self._hierarchize()
def __repr__(self) -> str:
return f"{type(self).__name__}(name={repr(self.name)}, django={repr(self.django)})"
def _hierarchize(self) -> None:
for app, models in apps.all_models.items():
self.django[app] = schema = self.django.schema_constructor(name=app, parent=self.django)
schema._ready = True
for name, model in models.items():
if (model := self.shape[self.default_schema].registry.get((table_name := model._meta.db_table))) is not None:
self.sqlhandler_mappings[table_name] = schema[name] = model
def _scalar_name(self) -> Callable:
def scalar_name(base: Any, local_cls: Any, referred_cls: Any, constraint: Any) -> str:
return Maybe(self.django_mappings)[referred_cls.__name__]._meta.model_name.else_(referred_cls.__name__)
return scalar_name
def _collection_name(self) -> Callable:
def collection_name(base: Any, local_cls: Any, referred_cls: Any, constraint: Any) -> str:
real_name = Maybe(self.django_mappings)[referred_cls.__name__]._meta.model_name.else_(referred_cls.__name__)
return Str(real_name).case.plural()
return collection_name | 0.81721 | 0.10393 |
import hashlib
import os
import sys
import time
import simplejson as json
import requests
from .utils import week_number
from .errors import CredentialsMissingError
API_BASE_URL = "https://searchlight.conductor.com"
class SearchlightService(object):
def __init__(self, **kwargs):
self._api_key = kwargs.get(
"api_key",
os.getenv("SEARCHLIGHT_API_KEY")
)
if not self._api_key:
raise CredentialsMissingError(token="Searchlight API Key")
self._secret = kwargs.get(
"secret",
os.getenv("SEARCHLIGHT_SHARED_SECRET")
)
if not self._secret:
raise CredentialsMissingError(token="Searchlight Shared Secret")
self._session = requests.Session()
self._base_url = API_BASE_URL
self._v3_url = "{base_url}/v3".format(
base_url=self._base_url
)
self.accounts = self.get_accounts()
assert self.accounts, "API Key or Secret is not valid"
def _generate_signature(self):
"""Generates API signature for request"""
return hashlib.md5(
"{key}{secret}{epoch}".format(
key=self._api_key,
secret=self._secret,
epoch=int(time.time())
).encode()
).hexdigest()
def _make_request(self, url, retry=True, verify=True, redirects=True):
"""Generic function to make get requests to SL API"""
url += "?apiKey={key}&sig={sig}".format(
key=self._api_key, sig=self._generate_signature())
try:
res = self._session.get(
url,
verify=verify,
allow_redirects=redirects
)
if res.status_code >= 400:
if retry:
print("Status Code: {status_code}. Retrying".format(
status_code=res.status_code))
return self._make_request(url, retry=False)
else:
print("{url} failed to respond".format(url=url))
return
data = res.json()
except (ConnectionRefusedError,
ConnectionResetError,
ConnectionAbortedError) as e:
print("Error connecting to Searchlight: {error}".format(
error=e)
)
return
except json.JSONDecodeError:
print("Unable to decode response from server")
return
except requests.exceptions.ChunkedEncodingError:
print("Searchlight response delayed, skipping retrieval..:"
" {info}".format(info=sys.exc_info()[0]))
return
return data
# Searchlight Configuration Data
def get_locations(self):
"""All locations supported by Searchlight"""
return self._make_request(
"{v3_url}/locations".format(
v3_url=self._v3_url
)
)
def get_rank_sources(self):
"""Returns all supported rank sources"""
return self._make_request(
"{v3_url}/rank-sources".format(
v3_url=self._v3_url
)
)
def get_devices(self):
"""Returns all supported devices"""
return self._make_request(
"{v3_url}/devices".format(
v3_url=self._v3_url
)
)
# Searchlight Account Data
def get_accounts(self):
"""Returns all available Searchlight accounts"""
if hasattr(self, "accounts"):
return self.accounts
else:
return self._make_request(
"{v3_url}/accounts".format(
v3_url=self._v3_url
), retry=False
)
class AccountService(SearchlightService):
def __init__(self, account_id, **kwargs):
SearchlightService.__init__(self, **kwargs)
self.account_id = account_id
assert any([acct["accountId"] == str(self.account_id) for acct in
self.accounts]), "Invalid account ID. Confirm you have " \
"access to this account"
# Account Configuration Data
def get_web_properties(self):
"""Retrieves account web properties"""
return self._make_request(
"{v3_url}/accounts/{acct}/web-properties".format(
v3_url=self._v3_url,
acct=self.account_id
)
)
def get_domain_name(self, wpid):
"""Retrieves the domain name for a given web property"""
try:
return next(wp["name"] for wp in self.get_web_properties()
if wp["webPropertyId"] == str(wpid))
except StopIteration:
raise StopIteration(
"Unable to find web property {wpid}".format(
wpid=wpid
)
)
def get_web_properties_for_domain(self, domain):
"""Retrieves the web property IDs associated with a given domain"""
wps = [wp["webPropertyId"] for wp in self.get_web_properties()
if wp["name"] == domain]
if not wps:
raise StopIteration(
"Unable to find any web property for domain {domain}".format(
domain=domain
)
)
return wps
def get_tracked_searches(self, wpid):
"""Gets all searches for a given web property"""
return self._make_request(
"{v3_url}/accounts/{account}/web-properties/{wpid}/"
"tracked-searches".format(
v3_url=self._v3_url,
account=self.account_id,
wpid=wpid
)
)
def get_categories(self):
"""Returns categories and their tracked searches"""
return self._make_request(
"{v3_url}/accounts/{acct}/categories".format(
v3_url=self._v3_url,
acct=self.account_id
)
)
# Collection Data
def get_ranks(self, wpid, rsid, date="CURRENT"):
"""Ranks for searches in a web property and rank source for a date"""
tp = week_number(date) if date != "CURRENT" else date
return self._make_request(
"{v3_url}/{acct}/web-properties/{wpid}/rank-sources/{rsid}/"
"tp/{tp}/serp-items".format(
v3_url=self._v3_url,
acct=self.account_id,
wpid=wpid,
rsid=rsid,
tp=tp
)
)
def get_volume(self, wpid, rsid, date="CURRENT"):
"""Volume for searches in a web property and rank source for a date"""
tp = week_number(date) if date != "CURRENT" else date
return self._make_request(
"{v3_url}/{acct}/web-properties/{wpid}/rank-sources/{rsid}/"
"tp/{tp}/search-volumes".format(
v3_url=self._v3_url,
acct=self.account_id,
wpid=wpid,
rsid=rsid,
tp=tp
)
) | searchlight_api/client.py | import hashlib
import os
import sys
import time
import simplejson as json
import requests
from .utils import week_number
from .errors import CredentialsMissingError
API_BASE_URL = "https://searchlight.conductor.com"
class SearchlightService(object):
def __init__(self, **kwargs):
self._api_key = kwargs.get(
"api_key",
os.getenv("SEARCHLIGHT_API_KEY")
)
if not self._api_key:
raise CredentialsMissingError(token="Searchlight API Key")
self._secret = kwargs.get(
"secret",
os.getenv("SEARCHLIGHT_SHARED_SECRET")
)
if not self._secret:
raise CredentialsMissingError(token="Searchlight Shared Secret")
self._session = requests.Session()
self._base_url = API_BASE_URL
self._v3_url = "{base_url}/v3".format(
base_url=self._base_url
)
self.accounts = self.get_accounts()
assert self.accounts, "API Key or Secret is not valid"
def _generate_signature(self):
"""Generates API signature for request"""
return hashlib.md5(
"{key}{secret}{epoch}".format(
key=self._api_key,
secret=self._secret,
epoch=int(time.time())
).encode()
).hexdigest()
def _make_request(self, url, retry=True, verify=True, redirects=True):
"""Generic function to make get requests to SL API"""
url += "?apiKey={key}&sig={sig}".format(
key=self._api_key, sig=self._generate_signature())
try:
res = self._session.get(
url,
verify=verify,
allow_redirects=redirects
)
if res.status_code >= 400:
if retry:
print("Status Code: {status_code}. Retrying".format(
status_code=res.status_code))
return self._make_request(url, retry=False)
else:
print("{url} failed to respond".format(url=url))
return
data = res.json()
except (ConnectionRefusedError,
ConnectionResetError,
ConnectionAbortedError) as e:
print("Error connecting to Searchlight: {error}".format(
error=e)
)
return
except json.JSONDecodeError:
print("Unable to decode response from server")
return
except requests.exceptions.ChunkedEncodingError:
print("Searchlight response delayed, skipping retrieval..:"
" {info}".format(info=sys.exc_info()[0]))
return
return data
# Searchlight Configuration Data
def get_locations(self):
"""All locations supported by Searchlight"""
return self._make_request(
"{v3_url}/locations".format(
v3_url=self._v3_url
)
)
def get_rank_sources(self):
"""Returns all supported rank sources"""
return self._make_request(
"{v3_url}/rank-sources".format(
v3_url=self._v3_url
)
)
def get_devices(self):
"""Returns all supported devices"""
return self._make_request(
"{v3_url}/devices".format(
v3_url=self._v3_url
)
)
# Searchlight Account Data
def get_accounts(self):
"""Returns all available Searchlight accounts"""
if hasattr(self, "accounts"):
return self.accounts
else:
return self._make_request(
"{v3_url}/accounts".format(
v3_url=self._v3_url
), retry=False
)
class AccountService(SearchlightService):
def __init__(self, account_id, **kwargs):
SearchlightService.__init__(self, **kwargs)
self.account_id = account_id
assert any([acct["accountId"] == str(self.account_id) for acct in
self.accounts]), "Invalid account ID. Confirm you have " \
"access to this account"
# Account Configuration Data
def get_web_properties(self):
"""Retrieves account web properties"""
return self._make_request(
"{v3_url}/accounts/{acct}/web-properties".format(
v3_url=self._v3_url,
acct=self.account_id
)
)
def get_domain_name(self, wpid):
"""Retrieves the domain name for a given web property"""
try:
return next(wp["name"] for wp in self.get_web_properties()
if wp["webPropertyId"] == str(wpid))
except StopIteration:
raise StopIteration(
"Unable to find web property {wpid}".format(
wpid=wpid
)
)
def get_web_properties_for_domain(self, domain):
"""Retrieves the web property IDs associated with a given domain"""
wps = [wp["webPropertyId"] for wp in self.get_web_properties()
if wp["name"] == domain]
if not wps:
raise StopIteration(
"Unable to find any web property for domain {domain}".format(
domain=domain
)
)
return wps
def get_tracked_searches(self, wpid):
"""Gets all searches for a given web property"""
return self._make_request(
"{v3_url}/accounts/{account}/web-properties/{wpid}/"
"tracked-searches".format(
v3_url=self._v3_url,
account=self.account_id,
wpid=wpid
)
)
def get_categories(self):
"""Returns categories and their tracked searches"""
return self._make_request(
"{v3_url}/accounts/{acct}/categories".format(
v3_url=self._v3_url,
acct=self.account_id
)
)
# Collection Data
def get_ranks(self, wpid, rsid, date="CURRENT"):
"""Ranks for searches in a web property and rank source for a date"""
tp = week_number(date) if date != "CURRENT" else date
return self._make_request(
"{v3_url}/{acct}/web-properties/{wpid}/rank-sources/{rsid}/"
"tp/{tp}/serp-items".format(
v3_url=self._v3_url,
acct=self.account_id,
wpid=wpid,
rsid=rsid,
tp=tp
)
)
def get_volume(self, wpid, rsid, date="CURRENT"):
"""Volume for searches in a web property and rank source for a date"""
tp = week_number(date) if date != "CURRENT" else date
return self._make_request(
"{v3_url}/{acct}/web-properties/{wpid}/rank-sources/{rsid}/"
"tp/{tp}/search-volumes".format(
v3_url=self._v3_url,
acct=self.account_id,
wpid=wpid,
rsid=rsid,
tp=tp
)
) | 0.47658 | 0.101367 |
import io
import numpy
import os
import pandas
import matplotlib.pyplot as plt
class DataManager:
def __init__(self, filename, hasId=True):
print('I am going to open file: %s' % (filename,))
pd = pandas.read_table(filename, comment='#', delim_whitespace=True)
#print(pd)
fldArray = pd.keys()
mapping = {}
if 'T' in fldArray:
self._timeField = 'T'
elif 'TIME' in fldArray:
self._timeField = 'TIME'
else:
raise RuntimeError('Unknown time field in:' + str(fldArray))
self._uniqueTimes = numpy.unique(pd[self._timeField])
self.idMapData = {}
if hasId:
for uid in numpy.unique(pd['ID']):
# Use negative ID as heartbeat, so we get the complete time axis
if uid < 0:
continue
intUid = int(uid)
idx = numpy.where(pd['ID'] == uid)[0]
self.idMapData[intUid] = {}
for fld in fldArray:
#print(iFld)
self.idMapData[intUid][fld] = pd[fld][idx]
print(self.idMapData)
else:
self.idMapData[0] = mapping
print('Done reading file: {0:s}'.format(filename))
def getUniqueTimes(self):
return self._uniqueTimes
def interpolateToTimeAxis(self, timeAxis):
for id, thisIdData in self.idMapData.items():
for fld, fldData in thisIdData.items():
# Must interpolate time field at the end, otherwise the other interpolations won't be correct
if fld != self._timeField:
self.idMapData[id][fld] = numpy.interp(timeAxis, thisIdData[self._timeField], fldData, left=numpy.nan, right=numpy.nan)
self.idMapData[id][self._timeField] = timeAxis
def getIndividualDataAtTime(self, data, time):
returnData = None
idx = (data[self._timeField] == time)
count = numpy.count_nonzero(idx)
if count == 0:
pass
elif count == 1:
returnData = {}
for fld, fldData in data.items():
returnData[fld] = data[fld][idx]
elif count > 1:
raise RuntimeError('Track', data['ID'][idx[0]], 'has duplicate data at time', time)
return returnData
if __name__ == '__main__':
dm = DataManager('test.txt', hasId=False)
print(dm.idMapData)
plt.figure()
plt.hold(True)
legTxt = ()
lines = ()
for key, value in dm.idMapData.items():
lines += plt.plot(value['X'], value['Y'], 'o-', label=str(key)),
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show() | DataManager.py | import io
import numpy
import os
import pandas
import matplotlib.pyplot as plt
class DataManager:
def __init__(self, filename, hasId=True):
print('I am going to open file: %s' % (filename,))
pd = pandas.read_table(filename, comment='#', delim_whitespace=True)
#print(pd)
fldArray = pd.keys()
mapping = {}
if 'T' in fldArray:
self._timeField = 'T'
elif 'TIME' in fldArray:
self._timeField = 'TIME'
else:
raise RuntimeError('Unknown time field in:' + str(fldArray))
self._uniqueTimes = numpy.unique(pd[self._timeField])
self.idMapData = {}
if hasId:
for uid in numpy.unique(pd['ID']):
# Use negative ID as heartbeat, so we get the complete time axis
if uid < 0:
continue
intUid = int(uid)
idx = numpy.where(pd['ID'] == uid)[0]
self.idMapData[intUid] = {}
for fld in fldArray:
#print(iFld)
self.idMapData[intUid][fld] = pd[fld][idx]
print(self.idMapData)
else:
self.idMapData[0] = mapping
print('Done reading file: {0:s}'.format(filename))
def getUniqueTimes(self):
return self._uniqueTimes
def interpolateToTimeAxis(self, timeAxis):
for id, thisIdData in self.idMapData.items():
for fld, fldData in thisIdData.items():
# Must interpolate time field at the end, otherwise the other interpolations won't be correct
if fld != self._timeField:
self.idMapData[id][fld] = numpy.interp(timeAxis, thisIdData[self._timeField], fldData, left=numpy.nan, right=numpy.nan)
self.idMapData[id][self._timeField] = timeAxis
def getIndividualDataAtTime(self, data, time):
returnData = None
idx = (data[self._timeField] == time)
count = numpy.count_nonzero(idx)
if count == 0:
pass
elif count == 1:
returnData = {}
for fld, fldData in data.items():
returnData[fld] = data[fld][idx]
elif count > 1:
raise RuntimeError('Track', data['ID'][idx[0]], 'has duplicate data at time', time)
return returnData
if __name__ == '__main__':
dm = DataManager('test.txt', hasId=False)
print(dm.idMapData)
plt.figure()
plt.hold(True)
legTxt = ()
lines = ()
for key, value in dm.idMapData.items():
lines += plt.plot(value['X'], value['Y'], 'o-', label=str(key)),
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show() | 0.252845 | 0.212099 |
import os
import torch
import torchvision
from time import time
try:
import wandb
except:
pass
from snf.train.statsrecorder import StatsRecorder
default_config = {
'name': None,
'notes': None,
'wandb': False,
'wandb_project': 'YOU_PROJECT_NAME',
'wandb_entity': 'YOUR_ENTITY_NAME',
'log_timing': False,
'eval_train': False,
'max_eval_ex': float('inf'),
'log_interval': 100,
'sample_epochs': 10_000,
'vis_epochs': 10_000,
'n_samples': 100,
'sample_dir': 'samples',
'epochs': 10_000,
'grad_clip_norm': None,
'eval_epochs': 1,
'lr': 1e-3,
'warmup_epochs': 10,
'modified_grad': True,
'add_recon_grad': True,
'sample_true_inv': False,
'plot_recon': False,
'checkpoint_path': None
}
class Experiment:
def __init__(self, model, train_loader, val_loader, test_loader,
optimizer, scheduler, **kwargs):
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.optimizer = optimizer
self.scheduler = scheduler
try:
self.data_shape = self.train_loader.dataset.dataset.data.shape[1:]
except AttributeError:
if type(train_loader.dataset.dataset) == torchvision.datasets.ImageFolder:
self.data_shape = train_loader.dataset.dataset[0][0].shape
else:
self.data_shape = self.train_loader.dataset.dataset.tensors[0].shape[2:]
self.to_bpd = lambda x: x / (torch.log(torch.tensor(2.0))
* torch.prod(torch.tensor(self.data_shape)))
self.config = default_config
self.config.update(**kwargs)
self.summary = {}
if self.config['wandb']:
wandb.init(name=self.config['name'],
notes=self.config['notes'],
project=self.config['wandb_project'],
entity=self.config['wandb_entity'],
config=self.config)
wandb.watch(self.model)
if self.config['checkpoint_path'] is None and self.config['wandb']:
self.config['checkpoint_path'] = os.path.join(wandb.run.dir,
'checkpoint.tar')
elif self.config['checkpoint_path'] is None:
checkpoint_path = f"./{str(self.config['name']).replace(' ', '_')}_checkpoint.tar"
self.log('Warning', f'No checkpoint path specified, defaulting to {checkpoint_path}')
self.config['checkpoint_path'] = checkpoint_path
self.update_summary('Epoch', 0)
self.update_summary("Best Val LogPx", float('-inf'))
self.update_summary("Test LogPx", float('-inf'))
if self.config['log_timing']:
self.batch_time = StatsRecorder()
self.sample_time = StatsRecorder()
def run(self):
for e in range(self.summary['Epoch'] + 1, self.config['epochs'] + 1):
self.update_summary('Epoch', e)
avg_loss = self.train_epoch(e)
self.log('Train Avg Loss', avg_loss)
if e % self.config['eval_epochs'] == 0:
if self.config['eval_train']:
train_logpx = self.eval_epoch(self.train_loader, e)
self.log('Train LogPx', train_logpx)
self.log('Train BPD', self.to_bpd(train_logpx))
val_logpx = self.eval_epoch(self.val_loader, e, split='Val')
self.log('Val LogPx', val_logpx)
self.log('Val BPD', self.to_bpd(val_logpx))
if val_logpx > self.summary['Best Val LogPx']:
self.update_summary('Best Val LogPx', val_logpx)
self.update_summary('Best Val BPD', self.to_bpd(val_logpx))
test_logpx = self.eval_epoch(self.test_loader, e, split='Test')
self.log('Test LogPx', test_logpx)
self.log('Test BPD', self.to_bpd(test_logpx))
self.update_summary('Test LogPx', test_logpx)
self.update_summary('Test BPD', self.to_bpd(test_logpx))
# Checkpoint model
self.save()
if e < 5 or e == 10 or e % self.config['sample_epochs'] == 0:
self.sample(e)
if e % self.config['vis_epochs'] == 0:
self.filter_vis()
self.scheduler.step()
def log(self, name, val):
print(f"{name}: {val}")
if self.config['wandb']: wandb.log({name: val})
def update_summary(self, name, val):
print(f"{name}: {val}")
self.summary[name] = val
if self.config['wandb']: wandb.run.summary[name] = val
def get_loss(self, x):
compute_expensive = not self.config['modified_grad']
lossval = -self.model.log_prob(x, compute_expensive=compute_expensive)
lossval[lossval != lossval] = 0.0 # Replace NaN's with 0
lossval = (lossval).sum() / len(x)
return lossval
def warmup_lr(self, epoch, num_batches):
if epoch <= self.config['warmup_epochs']:
for param_group in self.optimizer.param_groups:
s = (((num_batches+1) + (epoch-1) * len(self.train_loader))
/ (self.config['warmup_epochs'] * len(self.train_loader)))
param_group['lr'] = self.config['lr'] * s
def train_epoch(self, epoch):
total_loss = 0
num_batches = 0
batch_durations = []
self.model.train()
for x, _ in self.train_loader:
self.warmup_lr(epoch, num_batches)
self.optimizer.zero_grad()
x = x.float().to('cuda')
if self.config['log_timing']:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
lossval = self.get_loss(x)
lossval.backward()
if self.config['add_recon_grad']:
total_recon_loss = self.model.add_recon_grad()
if self.config['grad_clip_norm'] is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(),
self.config['grad_clip_norm'])
self.optimizer.step()
if self.config['log_timing']:
end.record()
torch.cuda.synchronize()
batch_durations.append(start.elapsed_time(end))
total_loss += lossval.item()
num_batches += 1
if num_batches % self.config['log_interval'] == 0:
self.log('Train Batch Loss', lossval)
if self.config['add_recon_grad']:
self.log('Train Total Recon Loss', total_recon_loss)
if self.config['log_timing']:
# Take all but first 100 and last 100 batch times into account
self.batch_time.update(batch_durations[100:-100])
self.update_summary('Batch Time Mean', self.batch_time.mean)
self.update_summary('Batch Time Std', self.batch_time.std)
if self.config['plot_recon']:
self.plot_recon(x, epoch)
avg_loss = total_loss / num_batches
return avg_loss
def eval_epoch(self, dataloader, epoch, split='Val'):
total_logpx = 0.0
num_x = 0
with torch.no_grad():
self.model.eval()
for x, _ in dataloader:
x = x.float().to('cuda')
total_logpx += self.model.log_prob(x).sum()
num_x += len(x)
if num_x >= self.config['max_eval_ex']:
break
avg_logpx = total_logpx / num_x
return avg_logpx
def sample(self, e):
n = self.config['n_samples']
s_dir = self.config['sample_dir']
s_path = os.path.join(s_dir, f'{e}.png')
compute_expensive = not self.config['modified_grad']
if self.config['log_timing']:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
sample_durations = []
for idx in range(n):
start.record()
with torch.no_grad():
_, _ = self.model.sample(n_samples=1,
compute_expensive=compute_expensive,
also_true_inverse=False)
end.record()
torch.cuda.synchronize()
sample_durations.append(start.elapsed_time(end))
self.sample_time.update(sample_durations[n//5:-n//5])
self.update_summary('Sample Time Mean', self.sample_time.mean)
self.update_summary('Sample Time Std', self.sample_time.std)
with torch.no_grad():
x_sample, x_sample_trueinv = self.model.sample(n_samples=n,
compute_expensive=compute_expensive,
also_true_inverse=self.config['sample_true_inv']
)
if len(self.data_shape) == 2:
x_sample = x_sample.view(n, 1, *self.data_shape)
x_sample_trueinv = x_sample_trueinv.view(n, 1, *self.data_shape)
else:
x_sample = x_sample
x_sample_trueinv = x_sample_trueinv
os.makedirs(s_dir, exist_ok=True)
torchvision.utils.save_image(
x_sample / 256., s_path, nrow=10,
padding=2, normalize=False)
if self.config['wandb']:
wandb.log({'Samples_Approx_Inv': wandb.Image(s_path)})
if self.config['sample_true_inv']:
s_true_inv_path = os.path.join(s_dir, f'{e}_trueinv.png')
torchvision.utils.save_image(
x_sample_trueinv / 256., s_true_inv_path, nrow=10,
padding=2, normalize=False)
if self.config['wandb']:
wandb.log({'Samples_True_Inv': wandb.Image(s_true_inv_path)})
def filter_vis(self):
self.model.plot_filters()
def plot_recon(self, x, e, context=None):
n = self.config['n_samples']
s_dir = self.config['sample_dir']
x_path = os.path.join(s_dir, f'{e}_x.png')
xhat_path = os.path.join(s_dir, f'{e}_xrecon.png')
diff_path = os.path.join(s_dir, f'{e}_recon_diff.png')
compute_expensive = not self.config['modified_grad']
with torch.no_grad():
xhat = self.model.reconstruct(x, context, compute_expensive).view(x.shape)
os.makedirs(s_dir, exist_ok=True)
torchvision.utils.save_image(
xhat / 256., xhat_path, nrow=10,
padding=2, normalize=False)
torchvision.utils.save_image(
x / 256., x_path, nrow=10,
padding=2, normalize=False)
xdiff = torch.abs(x - xhat)
torchvision.utils.save_image(
xdiff / 256., diff_path, nrow=10,
padding=2, normalize=False)
if self.config['wandb']:
wandb.log({'X Original': wandb.Image(x_path)})
wandb.log({'X Recon': wandb.Image(xhat_path)})
wandb.log({'Recon diff': wandb.Image(diff_path)})
def save(self):
self.log('Note', f'Saving checkpoint to: {self.config["checkpoint_path"]}')
checkpoint = {
'summary': self.summary,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': self.scheduler.state_dict(),
'config': self.config
}
torch.save(checkpoint, self.config['checkpoint_path'])
if self.config['wandb']:
wandb.save(self.config['checkpoint_path'])
def load(self, path):
self.log('Note', f'Loading checkpoint from: {path}')
checkpoint = torch.load(path)
# Warning, config params overwritten
self.summary = checkpoint['summary']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
config_diff = set(self.config.items()) ^ set(checkpoint['config'].items())
if config_diff != set():
self.log('Warning', f'Differences in loaded config: {config_diff}') | snf/train/experiment.py | import os
import torch
import torchvision
from time import time
try:
import wandb
except:
pass
from snf.train.statsrecorder import StatsRecorder
default_config = {
'name': None,
'notes': None,
'wandb': False,
'wandb_project': 'YOU_PROJECT_NAME',
'wandb_entity': 'YOUR_ENTITY_NAME',
'log_timing': False,
'eval_train': False,
'max_eval_ex': float('inf'),
'log_interval': 100,
'sample_epochs': 10_000,
'vis_epochs': 10_000,
'n_samples': 100,
'sample_dir': 'samples',
'epochs': 10_000,
'grad_clip_norm': None,
'eval_epochs': 1,
'lr': 1e-3,
'warmup_epochs': 10,
'modified_grad': True,
'add_recon_grad': True,
'sample_true_inv': False,
'plot_recon': False,
'checkpoint_path': None
}
class Experiment:
def __init__(self, model, train_loader, val_loader, test_loader,
optimizer, scheduler, **kwargs):
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.optimizer = optimizer
self.scheduler = scheduler
try:
self.data_shape = self.train_loader.dataset.dataset.data.shape[1:]
except AttributeError:
if type(train_loader.dataset.dataset) == torchvision.datasets.ImageFolder:
self.data_shape = train_loader.dataset.dataset[0][0].shape
else:
self.data_shape = self.train_loader.dataset.dataset.tensors[0].shape[2:]
self.to_bpd = lambda x: x / (torch.log(torch.tensor(2.0))
* torch.prod(torch.tensor(self.data_shape)))
self.config = default_config
self.config.update(**kwargs)
self.summary = {}
if self.config['wandb']:
wandb.init(name=self.config['name'],
notes=self.config['notes'],
project=self.config['wandb_project'],
entity=self.config['wandb_entity'],
config=self.config)
wandb.watch(self.model)
if self.config['checkpoint_path'] is None and self.config['wandb']:
self.config['checkpoint_path'] = os.path.join(wandb.run.dir,
'checkpoint.tar')
elif self.config['checkpoint_path'] is None:
checkpoint_path = f"./{str(self.config['name']).replace(' ', '_')}_checkpoint.tar"
self.log('Warning', f'No checkpoint path specified, defaulting to {checkpoint_path}')
self.config['checkpoint_path'] = checkpoint_path
self.update_summary('Epoch', 0)
self.update_summary("Best Val LogPx", float('-inf'))
self.update_summary("Test LogPx", float('-inf'))
if self.config['log_timing']:
self.batch_time = StatsRecorder()
self.sample_time = StatsRecorder()
def run(self):
for e in range(self.summary['Epoch'] + 1, self.config['epochs'] + 1):
self.update_summary('Epoch', e)
avg_loss = self.train_epoch(e)
self.log('Train Avg Loss', avg_loss)
if e % self.config['eval_epochs'] == 0:
if self.config['eval_train']:
train_logpx = self.eval_epoch(self.train_loader, e)
self.log('Train LogPx', train_logpx)
self.log('Train BPD', self.to_bpd(train_logpx))
val_logpx = self.eval_epoch(self.val_loader, e, split='Val')
self.log('Val LogPx', val_logpx)
self.log('Val BPD', self.to_bpd(val_logpx))
if val_logpx > self.summary['Best Val LogPx']:
self.update_summary('Best Val LogPx', val_logpx)
self.update_summary('Best Val BPD', self.to_bpd(val_logpx))
test_logpx = self.eval_epoch(self.test_loader, e, split='Test')
self.log('Test LogPx', test_logpx)
self.log('Test BPD', self.to_bpd(test_logpx))
self.update_summary('Test LogPx', test_logpx)
self.update_summary('Test BPD', self.to_bpd(test_logpx))
# Checkpoint model
self.save()
if e < 5 or e == 10 or e % self.config['sample_epochs'] == 0:
self.sample(e)
if e % self.config['vis_epochs'] == 0:
self.filter_vis()
self.scheduler.step()
def log(self, name, val):
print(f"{name}: {val}")
if self.config['wandb']: wandb.log({name: val})
def update_summary(self, name, val):
print(f"{name}: {val}")
self.summary[name] = val
if self.config['wandb']: wandb.run.summary[name] = val
def get_loss(self, x):
compute_expensive = not self.config['modified_grad']
lossval = -self.model.log_prob(x, compute_expensive=compute_expensive)
lossval[lossval != lossval] = 0.0 # Replace NaN's with 0
lossval = (lossval).sum() / len(x)
return lossval
def warmup_lr(self, epoch, num_batches):
if epoch <= self.config['warmup_epochs']:
for param_group in self.optimizer.param_groups:
s = (((num_batches+1) + (epoch-1) * len(self.train_loader))
/ (self.config['warmup_epochs'] * len(self.train_loader)))
param_group['lr'] = self.config['lr'] * s
def train_epoch(self, epoch):
total_loss = 0
num_batches = 0
batch_durations = []
self.model.train()
for x, _ in self.train_loader:
self.warmup_lr(epoch, num_batches)
self.optimizer.zero_grad()
x = x.float().to('cuda')
if self.config['log_timing']:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
lossval = self.get_loss(x)
lossval.backward()
if self.config['add_recon_grad']:
total_recon_loss = self.model.add_recon_grad()
if self.config['grad_clip_norm'] is not None:
torch.nn.utils.clip_grad_norm_(self.model.parameters(),
self.config['grad_clip_norm'])
self.optimizer.step()
if self.config['log_timing']:
end.record()
torch.cuda.synchronize()
batch_durations.append(start.elapsed_time(end))
total_loss += lossval.item()
num_batches += 1
if num_batches % self.config['log_interval'] == 0:
self.log('Train Batch Loss', lossval)
if self.config['add_recon_grad']:
self.log('Train Total Recon Loss', total_recon_loss)
if self.config['log_timing']:
# Take all but first 100 and last 100 batch times into account
self.batch_time.update(batch_durations[100:-100])
self.update_summary('Batch Time Mean', self.batch_time.mean)
self.update_summary('Batch Time Std', self.batch_time.std)
if self.config['plot_recon']:
self.plot_recon(x, epoch)
avg_loss = total_loss / num_batches
return avg_loss
def eval_epoch(self, dataloader, epoch, split='Val'):
total_logpx = 0.0
num_x = 0
with torch.no_grad():
self.model.eval()
for x, _ in dataloader:
x = x.float().to('cuda')
total_logpx += self.model.log_prob(x).sum()
num_x += len(x)
if num_x >= self.config['max_eval_ex']:
break
avg_logpx = total_logpx / num_x
return avg_logpx
def sample(self, e):
n = self.config['n_samples']
s_dir = self.config['sample_dir']
s_path = os.path.join(s_dir, f'{e}.png')
compute_expensive = not self.config['modified_grad']
if self.config['log_timing']:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
sample_durations = []
for idx in range(n):
start.record()
with torch.no_grad():
_, _ = self.model.sample(n_samples=1,
compute_expensive=compute_expensive,
also_true_inverse=False)
end.record()
torch.cuda.synchronize()
sample_durations.append(start.elapsed_time(end))
self.sample_time.update(sample_durations[n//5:-n//5])
self.update_summary('Sample Time Mean', self.sample_time.mean)
self.update_summary('Sample Time Std', self.sample_time.std)
with torch.no_grad():
x_sample, x_sample_trueinv = self.model.sample(n_samples=n,
compute_expensive=compute_expensive,
also_true_inverse=self.config['sample_true_inv']
)
if len(self.data_shape) == 2:
x_sample = x_sample.view(n, 1, *self.data_shape)
x_sample_trueinv = x_sample_trueinv.view(n, 1, *self.data_shape)
else:
x_sample = x_sample
x_sample_trueinv = x_sample_trueinv
os.makedirs(s_dir, exist_ok=True)
torchvision.utils.save_image(
x_sample / 256., s_path, nrow=10,
padding=2, normalize=False)
if self.config['wandb']:
wandb.log({'Samples_Approx_Inv': wandb.Image(s_path)})
if self.config['sample_true_inv']:
s_true_inv_path = os.path.join(s_dir, f'{e}_trueinv.png')
torchvision.utils.save_image(
x_sample_trueinv / 256., s_true_inv_path, nrow=10,
padding=2, normalize=False)
if self.config['wandb']:
wandb.log({'Samples_True_Inv': wandb.Image(s_true_inv_path)})
def filter_vis(self):
self.model.plot_filters()
def plot_recon(self, x, e, context=None):
n = self.config['n_samples']
s_dir = self.config['sample_dir']
x_path = os.path.join(s_dir, f'{e}_x.png')
xhat_path = os.path.join(s_dir, f'{e}_xrecon.png')
diff_path = os.path.join(s_dir, f'{e}_recon_diff.png')
compute_expensive = not self.config['modified_grad']
with torch.no_grad():
xhat = self.model.reconstruct(x, context, compute_expensive).view(x.shape)
os.makedirs(s_dir, exist_ok=True)
torchvision.utils.save_image(
xhat / 256., xhat_path, nrow=10,
padding=2, normalize=False)
torchvision.utils.save_image(
x / 256., x_path, nrow=10,
padding=2, normalize=False)
xdiff = torch.abs(x - xhat)
torchvision.utils.save_image(
xdiff / 256., diff_path, nrow=10,
padding=2, normalize=False)
if self.config['wandb']:
wandb.log({'X Original': wandb.Image(x_path)})
wandb.log({'X Recon': wandb.Image(xhat_path)})
wandb.log({'Recon diff': wandb.Image(diff_path)})
def save(self):
self.log('Note', f'Saving checkpoint to: {self.config["checkpoint_path"]}')
checkpoint = {
'summary': self.summary,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': self.scheduler.state_dict(),
'config': self.config
}
torch.save(checkpoint, self.config['checkpoint_path'])
if self.config['wandb']:
wandb.save(self.config['checkpoint_path'])
def load(self, path):
self.log('Note', f'Loading checkpoint from: {path}')
checkpoint = torch.load(path)
# Warning, config params overwritten
self.summary = checkpoint['summary']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
config_diff = set(self.config.items()) ^ set(checkpoint['config'].items())
if config_diff != set():
self.log('Warning', f'Differences in loaded config: {config_diff}') | 0.589007 | 0.172015 |
# added warning
import pickle # nosec
import time
from argparse import ArgumentParser
import os
from itertools import repeat
from typing import Tuple, List
from glob import glob
from multiprocessing import Pool, cpu_count
import pathlib
from giant._typing import PATH
from giant.ray_tracer.kdtree import KDTree
from giant.relative_opnav.estimators.sfn import SurfaceFeature, FeatureCatalogue
from giant.utilities.stereophotoclinometry import Maplet
def _get_parser():
warning = "WARNING: This script saves some results to python pickle files. " \
"Pickle files can be used to execute arbitrary code, " \
"so you should never open one from an untrusted source."
parser = ArgumentParser(
description='Generate a feature catalog for Surface Feature Navigation (SFN) '
'containing locations of maplet topography files.', epilog=warning)
parser.add_argument('shape', help='path to the shape file directory')
parser.add_argument('-f', '--filter', help='a list of the landmark subset to be used', default=None)
parser.add_argument('-o', '--output', help='The file to save the results to', default='./spc_maps.pickle')
parser.add_argument('-d', '--output_dir', help='The directory to save the feature files to',
default=None)
parser.add_argument('-m', '--memory_efficient',
help='Use the memory efficient triangles instead of the regular ones', action='store_true')
parser.add_argument('-u', '--update',
help='use existing kdtree if available', action='store_true')
return parser
def build_feature(inp: Tuple[int, Tuple[PATH, int, bool, PATH, bool]]) -> Tuple[SurfaceFeature, dict]:
"""
Load a maplet and convert it into a GIANT SurfaceFeature, returning the created feature.
:param inp: the inputs that are needed as a tuple (current_index, (maplet_file, number_of_maplets,
memory_efficient_flag, output_directory))
:return: The surface feature and a dictionary containing the keys order and bounds about the feature
"""
ind, (file, n_maps, me, output, update) = inp
start = time.time()
maplet = Maplet(file_name=file)
print(file + ' -- loaded', flush=True)
# make the output path
shape_path = output / (maplet.name + '.pickle')
if update:
if os.path.exists(shape_path):
try:
with open(shape_path, 'rb') as ifile:
kd = pickle.load(ifile)
map_info = {'order': kd.order,
'bounds': kd.bounding_box.vertices}
# Store path to pickle into SurfaceFeature:
feat = SurfaceFeature(shape_path.resolve(),
maplet.rotation_maplet2body[:, 2],
maplet.position_objmap,
maplet.name,
maplet.scale)
print('map {} of {} finished in {:.3f} seconds'.format(ind, n_maps, time.time() - start), flush=True)
return feat, map_info
except:
pass
tris = maplet.get_triangles(me=me)
print(file + ' -- tessellated', flush=True)
kd = KDTree(tris, max_depth=11)
kd.build(print_progress=False, force=False)
print(file + ' -- built', flush=True)
# Write KD tree as .pickle:
map_info = {'order': kd.order,
'bounds': kd.bounding_box.vertices}
with open(shape_path, 'wb') as f:
pickle.dump(kd, f)
# Store path to pickle into SurfaceFeature:
feat = SurfaceFeature(shape_path.resolve(),
maplet.rotation_maplet2body[:, 2],
maplet.position_objmap,
maplet.name,
maplet.scale)
print('map {} of {} finished in {:.3f} seconds'.format(ind, n_maps, time.time() - start), flush=True)
return feat, map_info
def main():
"""
The main code that is run
"""
parser = _get_parser()
args = parser.parse_args()
shape_path = args.shape
if args.filter is not None:
map_files: List[str] = []
filter_file = args.filter
try:
with open(filter_file, mode='r') as infile:
for line in infile:
if 'END' not in line:
# noinspection SpellCheckingInspection
temp = shape_path + '/MAPFILES/' + line.strip() + '.MAP'
map_files.append(temp)
except FileNotFoundError:
# noinspection SpellCheckingInspection
map_files = sorted(glob(shape_path + '/MAPFILES/*.MAP'))[::int(args.filter)]
else:
# noinspection SpellCheckingInspection
map_files = glob(shape_path + '/MAPFILES/*.MAP')
if args.output_dir is None:
output_dir = (pathlib.Path(shape_path) / 'pickle_files')
else:
output_dir = pathlib.Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
n_maps = len(map_files)
me: bool = args.memory_efficient
with Pool(cpu_count()//2) as pool:
res = pool.map(build_feature, enumerate(zip(map_files,
repeat(n_maps), repeat(me), repeat(output_dir),
repeat(args.update))))
sfs = [r[0] for r in res]
map_info = [r[1] for r in res]
fc = FeatureCatalogue(sfs, map_info=map_info)
start = time.time()
out_bytes = pickle.dumps(fc, protocol=pickle.HIGHEST_PROTOCOL)
print('serialized in {:.3f} seconds'.format(time.time() - start))
start = time.time()
chunk_size = 2 ** 30
n_chunks = len(out_bytes) // chunk_size
with open(args.output, 'wb') as feature_catalogue_file:
for n, idx in enumerate(range(0, len(out_bytes), chunk_size)):
local_start = time.time()
feature_catalogue_file.write(out_bytes[idx:idx + chunk_size])
print('chunk {} of {} written in {} seconds'.format(n, n_chunks, time.time() - local_start))
print('written in {} seconds'.format(time.time() - start))
if __name__ == "__main__":
main() | giant/scripts/spc_to_feature_catalogue.py | # added warning
import pickle # nosec
import time
from argparse import ArgumentParser
import os
from itertools import repeat
from typing import Tuple, List
from glob import glob
from multiprocessing import Pool, cpu_count
import pathlib
from giant._typing import PATH
from giant.ray_tracer.kdtree import KDTree
from giant.relative_opnav.estimators.sfn import SurfaceFeature, FeatureCatalogue
from giant.utilities.stereophotoclinometry import Maplet
def _get_parser():
warning = "WARNING: This script saves some results to python pickle files. " \
"Pickle files can be used to execute arbitrary code, " \
"so you should never open one from an untrusted source."
parser = ArgumentParser(
description='Generate a feature catalog for Surface Feature Navigation (SFN) '
'containing locations of maplet topography files.', epilog=warning)
parser.add_argument('shape', help='path to the shape file directory')
parser.add_argument('-f', '--filter', help='a list of the landmark subset to be used', default=None)
parser.add_argument('-o', '--output', help='The file to save the results to', default='./spc_maps.pickle')
parser.add_argument('-d', '--output_dir', help='The directory to save the feature files to',
default=None)
parser.add_argument('-m', '--memory_efficient',
help='Use the memory efficient triangles instead of the regular ones', action='store_true')
parser.add_argument('-u', '--update',
help='use existing kdtree if available', action='store_true')
return parser
def build_feature(inp: Tuple[int, Tuple[PATH, int, bool, PATH, bool]]) -> Tuple[SurfaceFeature, dict]:
"""
Load a maplet and convert it into a GIANT SurfaceFeature, returning the created feature.
:param inp: the inputs that are needed as a tuple (current_index, (maplet_file, number_of_maplets,
memory_efficient_flag, output_directory))
:return: The surface feature and a dictionary containing the keys order and bounds about the feature
"""
ind, (file, n_maps, me, output, update) = inp
start = time.time()
maplet = Maplet(file_name=file)
print(file + ' -- loaded', flush=True)
# make the output path
shape_path = output / (maplet.name + '.pickle')
if update:
if os.path.exists(shape_path):
try:
with open(shape_path, 'rb') as ifile:
kd = pickle.load(ifile)
map_info = {'order': kd.order,
'bounds': kd.bounding_box.vertices}
# Store path to pickle into SurfaceFeature:
feat = SurfaceFeature(shape_path.resolve(),
maplet.rotation_maplet2body[:, 2],
maplet.position_objmap,
maplet.name,
maplet.scale)
print('map {} of {} finished in {:.3f} seconds'.format(ind, n_maps, time.time() - start), flush=True)
return feat, map_info
except:
pass
tris = maplet.get_triangles(me=me)
print(file + ' -- tessellated', flush=True)
kd = KDTree(tris, max_depth=11)
kd.build(print_progress=False, force=False)
print(file + ' -- built', flush=True)
# Write KD tree as .pickle:
map_info = {'order': kd.order,
'bounds': kd.bounding_box.vertices}
with open(shape_path, 'wb') as f:
pickle.dump(kd, f)
# Store path to pickle into SurfaceFeature:
feat = SurfaceFeature(shape_path.resolve(),
maplet.rotation_maplet2body[:, 2],
maplet.position_objmap,
maplet.name,
maplet.scale)
print('map {} of {} finished in {:.3f} seconds'.format(ind, n_maps, time.time() - start), flush=True)
return feat, map_info
def main():
"""
The main code that is run
"""
parser = _get_parser()
args = parser.parse_args()
shape_path = args.shape
if args.filter is not None:
map_files: List[str] = []
filter_file = args.filter
try:
with open(filter_file, mode='r') as infile:
for line in infile:
if 'END' not in line:
# noinspection SpellCheckingInspection
temp = shape_path + '/MAPFILES/' + line.strip() + '.MAP'
map_files.append(temp)
except FileNotFoundError:
# noinspection SpellCheckingInspection
map_files = sorted(glob(shape_path + '/MAPFILES/*.MAP'))[::int(args.filter)]
else:
# noinspection SpellCheckingInspection
map_files = glob(shape_path + '/MAPFILES/*.MAP')
if args.output_dir is None:
output_dir = (pathlib.Path(shape_path) / 'pickle_files')
else:
output_dir = pathlib.Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
n_maps = len(map_files)
me: bool = args.memory_efficient
with Pool(cpu_count()//2) as pool:
res = pool.map(build_feature, enumerate(zip(map_files,
repeat(n_maps), repeat(me), repeat(output_dir),
repeat(args.update))))
sfs = [r[0] for r in res]
map_info = [r[1] for r in res]
fc = FeatureCatalogue(sfs, map_info=map_info)
start = time.time()
out_bytes = pickle.dumps(fc, protocol=pickle.HIGHEST_PROTOCOL)
print('serialized in {:.3f} seconds'.format(time.time() - start))
start = time.time()
chunk_size = 2 ** 30
n_chunks = len(out_bytes) // chunk_size
with open(args.output, 'wb') as feature_catalogue_file:
for n, idx in enumerate(range(0, len(out_bytes), chunk_size)):
local_start = time.time()
feature_catalogue_file.write(out_bytes[idx:idx + chunk_size])
print('chunk {} of {} written in {} seconds'.format(n, n_chunks, time.time() - local_start))
print('written in {} seconds'.format(time.time() - start))
if __name__ == "__main__":
main() | 0.489015 | 0.266451 |
from django.db.models import Q
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERROR
from spylunking.log.setup_logging import build_colorized_logger
from drf_network_pipeline.pipeline.build_worker_result_node import \
build_worker_result_node
from drf_network_pipeline.pipeline.models import MLJob
from drf_network_pipeline.pipeline.models import MLJobResult
name = 'ml_prc_results'
log = build_colorized_logger(
name=name)
def handle_worker_results_message(
body=None):
"""handle_worker_results_message
:param body: contents from the results
"""
label = "APIRES"
last_step = ""
try:
last_step = ("{} received worker results body={}").format(
label,
str(body)[0:32])
log.info(last_step)
manifest = body.get(
"manifest",
None)
parent_result_node = body.get(
"results",
None)
result = parent_result_node.get(
"data",
None)
job_id = int(manifest["job_id"])
result_id = int(manifest["result_id"])
job_query = (Q(id=job_id))
result_query = (Q(id=result_id))
db_job = MLJob.objects.select_related() \
.filter(job_query).first()
db_result = MLJobResult.objects.select_related() \
.filter(result_query).first()
log.info(("{} updating job_id={} result_id={}")
.format(
label,
job_id,
result_id))
model_json = result["model_json"]
model_weights = result["weights"]
scores = result["scores"]
acc_data = result["acc"]
error_data = result["err"]
predictions_json = {
"predictions": result["sample_predictions"]
}
acc_data = {
"accuracy": scores[1] * 100
}
db_result.acc_data = acc_data
db_result.error_data = error_data
db_result.model_json = model_json
db_result.model_weights = model_weights
db_result.predictions_json = predictions_json
db_job.status = "finished"
db_job.control_state = "finished"
db_result.status = "finished"
db_result.control_status = "finished"
log.info(("saving job_id={}")
.format(
job_id))
db_job.save()
log.info(("saving result_id={}")
.format(
result_id))
db_result.save()
except Exception as e:
log.error(("{} failed handling worker results for body={} "
"last_step='{}' ex={}").format(
label,
body,
last_step,
e))
# try/ex handling for updating the db
# end of handle_worker_results_message
def process_worker_results(
res_node=None):
"""process_worker_results
:param res_node: incoming request dictionary - not used right now
"""
status = SUCCESS
api_node = build_worker_result_node()
# the worker is disabled - nothing to process
if not api_node:
return status
label = "APIRES"
last_step = "not-started"
try:
last_step = ("{} - start").format(
label)
log.info(last_step)
handle_worker_results_message(
body=res_node)
log.info(("{} done")
.format(
label))
except Exception as e:
log.error(("{} failed processing core results last_step='{}' ex={}")
.format(
label,
last_step,
e))
status = ERROR
# end of try/ex
return status
# end of process_worker_results | webapp/drf_network_pipeline/pipeline/process_worker_results.py | from django.db.models import Q
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERROR
from spylunking.log.setup_logging import build_colorized_logger
from drf_network_pipeline.pipeline.build_worker_result_node import \
build_worker_result_node
from drf_network_pipeline.pipeline.models import MLJob
from drf_network_pipeline.pipeline.models import MLJobResult
name = 'ml_prc_results'
log = build_colorized_logger(
name=name)
def handle_worker_results_message(
body=None):
"""handle_worker_results_message
:param body: contents from the results
"""
label = "APIRES"
last_step = ""
try:
last_step = ("{} received worker results body={}").format(
label,
str(body)[0:32])
log.info(last_step)
manifest = body.get(
"manifest",
None)
parent_result_node = body.get(
"results",
None)
result = parent_result_node.get(
"data",
None)
job_id = int(manifest["job_id"])
result_id = int(manifest["result_id"])
job_query = (Q(id=job_id))
result_query = (Q(id=result_id))
db_job = MLJob.objects.select_related() \
.filter(job_query).first()
db_result = MLJobResult.objects.select_related() \
.filter(result_query).first()
log.info(("{} updating job_id={} result_id={}")
.format(
label,
job_id,
result_id))
model_json = result["model_json"]
model_weights = result["weights"]
scores = result["scores"]
acc_data = result["acc"]
error_data = result["err"]
predictions_json = {
"predictions": result["sample_predictions"]
}
acc_data = {
"accuracy": scores[1] * 100
}
db_result.acc_data = acc_data
db_result.error_data = error_data
db_result.model_json = model_json
db_result.model_weights = model_weights
db_result.predictions_json = predictions_json
db_job.status = "finished"
db_job.control_state = "finished"
db_result.status = "finished"
db_result.control_status = "finished"
log.info(("saving job_id={}")
.format(
job_id))
db_job.save()
log.info(("saving result_id={}")
.format(
result_id))
db_result.save()
except Exception as e:
log.error(("{} failed handling worker results for body={} "
"last_step='{}' ex={}").format(
label,
body,
last_step,
e))
# try/ex handling for updating the db
# end of handle_worker_results_message
def process_worker_results(
res_node=None):
"""process_worker_results
:param res_node: incoming request dictionary - not used right now
"""
status = SUCCESS
api_node = build_worker_result_node()
# the worker is disabled - nothing to process
if not api_node:
return status
label = "APIRES"
last_step = "not-started"
try:
last_step = ("{} - start").format(
label)
log.info(last_step)
handle_worker_results_message(
body=res_node)
log.info(("{} done")
.format(
label))
except Exception as e:
log.error(("{} failed processing core results last_step='{}' ex={}")
.format(
label,
last_step,
e))
status = ERROR
# end of try/ex
return status
# end of process_worker_results | 0.496826 | 0.082107 |
# TODO: use ndarray of numpy replace original python list implementation(next PR)
from copy import deepcopy
def min_edit_distance(
source: str,
target: str,
del_cost=1,
ins_cost=1,
sub_cost=2,
):
"""Minimum-Edit-Distance(DP)
Args:
`source`: source chars.
`target`: target chars.
`del_cost`: delete cost.
`ins_cost`: insert cost.
`sub_cost`: substitute cost.
Returns:
minimun edit distance between `source` and `target`
Algorithm:
D[i, j] = min(
D[i-1, j] + Del-cost(source[i]),
D[i, j-1] + Insert-cost(target[j]),
D[i-1, j-1] + Sub-cost(source[i] + target[j]),
)
D[i, j] is the cost from first i chars of source to,
first j chars of target, and sub-cost is 0 while source[i]
and target[j] is the same char, otherwise 2.
"""
source_len = len(source)
target_len = len(target)
# Init matrix
matrix = [[0 for _ in range(target_len + 1)]
for _ in range(target_len + 1)]
for idx in range(source_len + 1):
matrix[idx][0] = idx
for idx in range(target_len + 1):
matrix[0][idx] = idx
for i in range(1, source_len + 1):
for j in range(1, target_len + 1):
up = matrix[i - 1][j] + del_cost # pylint: disable=invalid-name
left = matrix[i][j - 1] + ins_cost
northwest = matrix[i - 1][j - 1] \
+ (sub_cost if source[i - 1] != target[j - 1] else 0)
min_cost = min(up, left, northwest)
matrix[i][j] = min_cost
return matrix[source_len][target_len]
def min_edit_distance_pro(
source: str,
target: str,
del_cost=1,
ins_cost=1,
sub_cost=2,
):
"""Augmented minimum-edit-distance(DP) with alignment.
Args:
`source`: source chars.
`target`: target chars.
`del_cost`: delete cost.
`ins_cost`: insert cost.
`sub_cost`: substitute cost.
Returns:
med: minimun edit distance between `source` and `target`
matrix:
Algorithm:
D[i, j] = min(
D[i-1, j] + Del-cost(source[i]),
D[i, j-1] + Insert-cost(target[j]),
D[i-1, j-1] + Sub-cost(source[i] + target[j]),
)
D[i, j] is the cost from first i chars of source to,
first j chars of target, and sub-cost is 0 while source[i]
and target[j] is the same char, otherwise 2.
"""
# Use three tables for up, left and northwest or,
# Uses one tables and represents up,
# left and northwest as 1, 3, 5 respectivly?
# 1: up, 3: left, 5: northwest, 4: up + left
# 6: up + northwest, 8: left + northwest,
# 9: up + left + northwest
source_len = len(source)
target_len = len(target)
# Init matrix
matrix = [[0 for _ in range(target_len + 1)]
for _ in range(target_len + 1)]
backtrace_table = deepcopy(matrix)
backtrace_table[0][0] = -1
for idx in range(1, source_len + 1):
matrix[idx][0] = idx
backtrace_table[idx][0] = 1
for idx in range(1, target_len + 1):
matrix[0][idx] = idx
backtrace_table[0][idx] = 3
traces_map = [1, 3, 5]
min_cost = 0
for i in range(1, source_len + 1):
for j in range(1, target_len + 1):
up = matrix[i - 1][j] + del_cost # pylint: disable=invalid-name
left = matrix[i][j - 1] + ins_cost
northwest = matrix[i - 1][j - 1] \
+ (sub_cost if source[i - 1] != target[j - 1] else 0)
traces = [up, left, northwest]
min_cost = min(traces)
backtrace_table[i][j] = sum(traces_map[idx] for idx in range(3) \
if traces[idx] == min_cost)
matrix[i][j] = min_cost
alignment = trace_back(backtrace_table, source, target)
print('\n'.join(' '.join(triple[idx] for triple in alignment)
for idx in range(3)))
return min_cost, alignment
def trace_back(backtrace_table, source, target):
"""Return an alignment from source to target.
"""
# 1 for del, 3 for insert, 5 for nothing happened,
# others for substitute.
source_len = len(source)
target_len = len(target)
i, j = source_len, target_len
# triple: operation, source[i], target[j]
alignment = []
while backtrace_table[i][j] != -1: # i, j = 0, 0
operation = backtrace_table[i][j]
i -= 1
j -= 1
if operation == 1:
alignment.insert(0, (source[i], '*', 'd'))
j += 1
elif operation in (3, 4):
alignment.insert(0, ('*', target[j], 'i'))
i += 1
elif operation == 5:
alignment.insert(0, (source[i], target[j], ' '))
# Prefer substitution
else:
alignment.insert(0, (source[i], target[j], 's'))
return alignment | lna/algorithms/min_edit_distance.py | # TODO: use ndarray of numpy replace original python list implementation(next PR)
from copy import deepcopy
def min_edit_distance(
source: str,
target: str,
del_cost=1,
ins_cost=1,
sub_cost=2,
):
"""Minimum-Edit-Distance(DP)
Args:
`source`: source chars.
`target`: target chars.
`del_cost`: delete cost.
`ins_cost`: insert cost.
`sub_cost`: substitute cost.
Returns:
minimun edit distance between `source` and `target`
Algorithm:
D[i, j] = min(
D[i-1, j] + Del-cost(source[i]),
D[i, j-1] + Insert-cost(target[j]),
D[i-1, j-1] + Sub-cost(source[i] + target[j]),
)
D[i, j] is the cost from first i chars of source to,
first j chars of target, and sub-cost is 0 while source[i]
and target[j] is the same char, otherwise 2.
"""
source_len = len(source)
target_len = len(target)
# Init matrix
matrix = [[0 for _ in range(target_len + 1)]
for _ in range(target_len + 1)]
for idx in range(source_len + 1):
matrix[idx][0] = idx
for idx in range(target_len + 1):
matrix[0][idx] = idx
for i in range(1, source_len + 1):
for j in range(1, target_len + 1):
up = matrix[i - 1][j] + del_cost # pylint: disable=invalid-name
left = matrix[i][j - 1] + ins_cost
northwest = matrix[i - 1][j - 1] \
+ (sub_cost if source[i - 1] != target[j - 1] else 0)
min_cost = min(up, left, northwest)
matrix[i][j] = min_cost
return matrix[source_len][target_len]
def min_edit_distance_pro(
source: str,
target: str,
del_cost=1,
ins_cost=1,
sub_cost=2,
):
"""Augmented minimum-edit-distance(DP) with alignment.
Args:
`source`: source chars.
`target`: target chars.
`del_cost`: delete cost.
`ins_cost`: insert cost.
`sub_cost`: substitute cost.
Returns:
med: minimun edit distance between `source` and `target`
matrix:
Algorithm:
D[i, j] = min(
D[i-1, j] + Del-cost(source[i]),
D[i, j-1] + Insert-cost(target[j]),
D[i-1, j-1] + Sub-cost(source[i] + target[j]),
)
D[i, j] is the cost from first i chars of source to,
first j chars of target, and sub-cost is 0 while source[i]
and target[j] is the same char, otherwise 2.
"""
# Use three tables for up, left and northwest or,
# Uses one tables and represents up,
# left and northwest as 1, 3, 5 respectivly?
# 1: up, 3: left, 5: northwest, 4: up + left
# 6: up + northwest, 8: left + northwest,
# 9: up + left + northwest
source_len = len(source)
target_len = len(target)
# Init matrix
matrix = [[0 for _ in range(target_len + 1)]
for _ in range(target_len + 1)]
backtrace_table = deepcopy(matrix)
backtrace_table[0][0] = -1
for idx in range(1, source_len + 1):
matrix[idx][0] = idx
backtrace_table[idx][0] = 1
for idx in range(1, target_len + 1):
matrix[0][idx] = idx
backtrace_table[0][idx] = 3
traces_map = [1, 3, 5]
min_cost = 0
for i in range(1, source_len + 1):
for j in range(1, target_len + 1):
up = matrix[i - 1][j] + del_cost # pylint: disable=invalid-name
left = matrix[i][j - 1] + ins_cost
northwest = matrix[i - 1][j - 1] \
+ (sub_cost if source[i - 1] != target[j - 1] else 0)
traces = [up, left, northwest]
min_cost = min(traces)
backtrace_table[i][j] = sum(traces_map[idx] for idx in range(3) \
if traces[idx] == min_cost)
matrix[i][j] = min_cost
alignment = trace_back(backtrace_table, source, target)
print('\n'.join(' '.join(triple[idx] for triple in alignment)
for idx in range(3)))
return min_cost, alignment
def trace_back(backtrace_table, source, target):
"""Return an alignment from source to target.
"""
# 1 for del, 3 for insert, 5 for nothing happened,
# others for substitute.
source_len = len(source)
target_len = len(target)
i, j = source_len, target_len
# triple: operation, source[i], target[j]
alignment = []
while backtrace_table[i][j] != -1: # i, j = 0, 0
operation = backtrace_table[i][j]
i -= 1
j -= 1
if operation == 1:
alignment.insert(0, (source[i], '*', 'd'))
j += 1
elif operation in (3, 4):
alignment.insert(0, ('*', target[j], 'i'))
i += 1
elif operation == 5:
alignment.insert(0, (source[i], target[j], ' '))
# Prefer substitution
else:
alignment.insert(0, (source[i], target[j], 's'))
return alignment | 0.581541 | 0.670959 |
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn
from matplotlib.animation import FuncAnimation
from legendre_series import legendre_polynomials, legendre_series, \
step_function, v_function, convergence_rate, convergence_line_log
DEFAULT_DIR = os.path.join(os.path.dirname(__file__), "figures")
# Improved plot styles.
seaborn.set()
def plot_legendre_polynomials(x, n=5, name="legendre_polynomials", save=False,
dirname=DEFAULT_DIR):
"""Plot Legendre polynomials."""
plt.figure()
plt.xlabel("$x$")
plt.ylabel("$P_n(x)$")
p = legendre_polynomials(x)
for _ in range(n):
plt.plot(x, next(p))
if save:
os.makedirs(dirname, exist_ok=True)
filepath = os.path.join(dirname, f"{name}.png")
plt.savefig(filepath, dpi=300)
else:
plt.show()
def plot_piecewise_functions(x, a, name="piecewise_functions", save=False,
dirname=DEFAULT_DIR):
"""Plot Step and V-function."""
plt.figure()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.plot(x, v_function(x, a), label="$u(x)$")
plt.plot(x, step_function(x, a), label="$u'(x)$")
plt.legend()
if save:
os.makedirs(dirname, exist_ok=True)
plt.savefig(os.path.join(dirname, f"{name}.png"), dpi=300)
else:
plt.show()
def plot_legendre_series(x, a, n, coeff_func, name, f, ylim_min,
save=False, dirname=DEFAULT_DIR):
"""Create animation of the Legendre series."""
series = legendre_series(x, coeff_func(a))
# Legendre Series
start = np.min(x)
stop = np.max(x)
ymin = np.min(f(x, a)) - 0.3
ymax = np.max(f(x, a)) + 0.3
fig, axes = plt.subplots(1, 2, figsize=(16, 8))
axes[0].set(
xlim=(start, stop),
ylim=(ymin, ymax),
xlabel="$x$",
ylabel="$f_k(x)$",
)
axes[1].set(
xlim=(start, stop),
ylim=(ylim_min, 1.1),
xlabel="$x$",
ylabel=r"$|\varepsilon_k(x)|$",
)
axes[0].set_title(f"k={n}")
axes[1].set_title(f"k={n}")
axes[0].plot(x, f(x, a))
fig.set_tight_layout(True)
for _ in range(n):
next(series)
y = next(series)
plot_series, = axes[0].plot(x, y)
error = np.abs(f(x, a) - y)
plot_error, = axes[1].semilogy(x, error)
if save:
os.makedirs(dirname, exist_ok=True)
plt.savefig(os.path.join(dirname, f"legendre_series.png"), dpi=300)
else:
plt.show()
plt.close(fig)
def animate_legendre_series(x, a, n, coeff_func, name, f, ylim_min,
save=False, dirname=DEFAULT_DIR):
"""Create animation of the Legendre series."""
series = legendre_series(x, coeff_func(a))
# Legendre Series
start = np.min(x)
stop = np.max(x)
ymin = np.min(f(x, a)) - 0.3
ymax = np.max(f(x, a)) + 0.3
fig, axes = plt.subplots(1, 2, figsize=(16, 8))
axes[0].set(
xlim=(start, stop),
ylim=(ymin, ymax),
xlabel="$x$",
ylabel="$f_k(x)$",
)
axes[1].set(
xlim=(start, stop),
ylim=(ylim_min, 1.1),
xlabel="$x$",
ylabel=r"$|\varepsilon_k(x)|$",
)
axes[0].set_title(f"k={0}")
axes[1].set_title(f"k={0}")
axes[0].plot(x, f(x, a))
fig.set_tight_layout(True)
y = next(series)
plot_series, = axes[0].plot(x, y)
error = np.abs(f(x, a) - y)
plot_error, = axes[1].semilogy(x, error)
def update(i):
print(i)
y = next(series)
axes[0].set_title(f"k={i}")
axes[1].set_title(f"k={i}")
plot_series.set_data(x, y)
error = np.abs(f(x, a) - y)
plot_error.set_data(x, error)
return plot_series, plot_error
anim = FuncAnimation(fig, update, frames=n, interval=100)
if save:
# TODO: {function}/{a}
os.makedirs(dirname, exist_ok=True)
fpath = os.path.join(dirname, f'{name}.mp4')
anim.save(fpath, dpi=300, writer='ffmpeg')
# anim.save(os.path.join(dirname, f'{name}.gif'), dpi=80, writer='imagemagick')
else:
plt.show()
plt.close(fig)
def plot_pointwise_convergence(x, a, n, coeff_func, name, f, b, ylim_min,
save=False, dirname=DEFAULT_DIR):
"""Plot poinwise convergence of Legendre series."""
series = legendre_series(x, coeff_func(a))
degrees = np.arange(n)
values = np.array([next(series) for _ in degrees])
errors = np.abs(f(x, a) - values)
a_min = -convergence_rate(x, a, b)
alpha, beta = convergence_line_log(degrees, errors, a_min)
fig, ax = plt.subplots()
ax.set(
ylim=(ylim_min, 1e1),
title=f"x={x}, a={a}",
xlabel=r"$k$",
ylabel=r"$|\varepsilon_k(x)|$"
)
ax.loglog(degrees[1:], errors[1:])
# ax.loglog(degrees[indices], errors[indices])
ax.loglog(degrees[1:], beta * degrees[1:] ** alpha,
label=rf"$\alpha={-alpha:.3f}$"+'\n'+rf"$\beta={beta:.3f}$")
ax.legend()
if save:
fpath = os.path.join(dirname, "pointwise_convergence", name, str(a))
os.makedirs(fpath, exist_ok=True)
plt.savefig(os.path.join(fpath, f"{x:.7f}.png"), dpi=300)
else:
plt.show()
plt.close(fig)
def animate_pointwise_convergence(dirname=DEFAULT_DIR):
"""Create an animation of pointwise convergences."""
pass
def plot_convergence_distance(xs, a, n, coeff_func, func_name, f, b, save=False,
dirname=DEFAULT_DIR):
"""Create a plot of the behaviour of the intercepts."""
betas = []
for x in xs:
print(x)
series = legendre_series(x, coeff_func(a))
degrees = np.arange(n)
values = np.array([next(series) for _ in degrees])
errors = np.abs(f(x, a) - values)
a_min = -convergence_rate(x, a, b)
alpha, beta = convergence_line_log(degrees, errors, a_min)
betas.append(beta)
fig = plt.figure(figsize=(16, 8))
plt.xlabel(r"$x$")
plt.ylabel(r"$\beta(x)$")
plt.semilogy(xs, betas, '.', basey=10)
if save:
fpath = os.path.join(dirname, "convergence_distances", func_name)
os.makedirs(fpath, exist_ok=True)
plt.savefig(os.path.join(fpath, f"{a}.png"))
else:
plt.show()
plt.close(fig)
def plot_convergence_distance_loglog(xs, a, xi, n, coeff_func, func_name, f, b,
label, name, save=False, dirname=DEFAULT_DIR):
"""Create a plot of the behaviour of the intercepts near the singularity
and edges."""
betas = []
for x in xs:
print(x)
series = legendre_series(x, coeff_func(a))
degrees = np.arange(n)
values = np.array([next(series) for _ in degrees])
errors = np.abs(f(x, a) - values)
a_min = -convergence_rate(x, a, b)
alpha, beta = convergence_line_log(degrees, errors, a_min)
betas.append(beta)
# Fit a line
xi_log = np.log10(xi)
z = np.polyfit(xi_log, np.log10(betas), 1)
p = np.poly1d(z)
fig = plt.figure()
plt.xlabel(r"$\xi$")
plt.ylabel(rf"$\beta({label})$")
plt.loglog(xi, np.array(betas), '.', label=r"$\beta$")
# TODO: improve label, variable names
plt.loglog(xi, 10 ** p(xi_log),
label="\n".join((rf"$\rho={-z[0]:.5f}$", rf"$D={10**z[1]:.5f}$")))
plt.legend()
if save:
fpath = os.path.join(dirname, "convergence_distances_loglog", func_name, str(a))
os.makedirs(fpath, exist_ok=True)
plt.savefig(os.path.join(fpath, f"{name}.png"))
else:
plt.show()
plt.close(fig) | plots.py | import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn
from matplotlib.animation import FuncAnimation
from legendre_series import legendre_polynomials, legendre_series, \
step_function, v_function, convergence_rate, convergence_line_log
DEFAULT_DIR = os.path.join(os.path.dirname(__file__), "figures")
# Improved plot styles.
seaborn.set()
def plot_legendre_polynomials(x, n=5, name="legendre_polynomials", save=False,
dirname=DEFAULT_DIR):
"""Plot Legendre polynomials."""
plt.figure()
plt.xlabel("$x$")
plt.ylabel("$P_n(x)$")
p = legendre_polynomials(x)
for _ in range(n):
plt.plot(x, next(p))
if save:
os.makedirs(dirname, exist_ok=True)
filepath = os.path.join(dirname, f"{name}.png")
plt.savefig(filepath, dpi=300)
else:
plt.show()
def plot_piecewise_functions(x, a, name="piecewise_functions", save=False,
dirname=DEFAULT_DIR):
"""Plot Step and V-function."""
plt.figure()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.plot(x, v_function(x, a), label="$u(x)$")
plt.plot(x, step_function(x, a), label="$u'(x)$")
plt.legend()
if save:
os.makedirs(dirname, exist_ok=True)
plt.savefig(os.path.join(dirname, f"{name}.png"), dpi=300)
else:
plt.show()
def plot_legendre_series(x, a, n, coeff_func, name, f, ylim_min,
save=False, dirname=DEFAULT_DIR):
"""Create animation of the Legendre series."""
series = legendre_series(x, coeff_func(a))
# Legendre Series
start = np.min(x)
stop = np.max(x)
ymin = np.min(f(x, a)) - 0.3
ymax = np.max(f(x, a)) + 0.3
fig, axes = plt.subplots(1, 2, figsize=(16, 8))
axes[0].set(
xlim=(start, stop),
ylim=(ymin, ymax),
xlabel="$x$",
ylabel="$f_k(x)$",
)
axes[1].set(
xlim=(start, stop),
ylim=(ylim_min, 1.1),
xlabel="$x$",
ylabel=r"$|\varepsilon_k(x)|$",
)
axes[0].set_title(f"k={n}")
axes[1].set_title(f"k={n}")
axes[0].plot(x, f(x, a))
fig.set_tight_layout(True)
for _ in range(n):
next(series)
y = next(series)
plot_series, = axes[0].plot(x, y)
error = np.abs(f(x, a) - y)
plot_error, = axes[1].semilogy(x, error)
if save:
os.makedirs(dirname, exist_ok=True)
plt.savefig(os.path.join(dirname, f"legendre_series.png"), dpi=300)
else:
plt.show()
plt.close(fig)
def animate_legendre_series(x, a, n, coeff_func, name, f, ylim_min,
save=False, dirname=DEFAULT_DIR):
"""Create animation of the Legendre series."""
series = legendre_series(x, coeff_func(a))
# Legendre Series
start = np.min(x)
stop = np.max(x)
ymin = np.min(f(x, a)) - 0.3
ymax = np.max(f(x, a)) + 0.3
fig, axes = plt.subplots(1, 2, figsize=(16, 8))
axes[0].set(
xlim=(start, stop),
ylim=(ymin, ymax),
xlabel="$x$",
ylabel="$f_k(x)$",
)
axes[1].set(
xlim=(start, stop),
ylim=(ylim_min, 1.1),
xlabel="$x$",
ylabel=r"$|\varepsilon_k(x)|$",
)
axes[0].set_title(f"k={0}")
axes[1].set_title(f"k={0}")
axes[0].plot(x, f(x, a))
fig.set_tight_layout(True)
y = next(series)
plot_series, = axes[0].plot(x, y)
error = np.abs(f(x, a) - y)
plot_error, = axes[1].semilogy(x, error)
def update(i):
print(i)
y = next(series)
axes[0].set_title(f"k={i}")
axes[1].set_title(f"k={i}")
plot_series.set_data(x, y)
error = np.abs(f(x, a) - y)
plot_error.set_data(x, error)
return plot_series, plot_error
anim = FuncAnimation(fig, update, frames=n, interval=100)
if save:
# TODO: {function}/{a}
os.makedirs(dirname, exist_ok=True)
fpath = os.path.join(dirname, f'{name}.mp4')
anim.save(fpath, dpi=300, writer='ffmpeg')
# anim.save(os.path.join(dirname, f'{name}.gif'), dpi=80, writer='imagemagick')
else:
plt.show()
plt.close(fig)
def plot_pointwise_convergence(x, a, n, coeff_func, name, f, b, ylim_min,
save=False, dirname=DEFAULT_DIR):
"""Plot poinwise convergence of Legendre series."""
series = legendre_series(x, coeff_func(a))
degrees = np.arange(n)
values = np.array([next(series) for _ in degrees])
errors = np.abs(f(x, a) - values)
a_min = -convergence_rate(x, a, b)
alpha, beta = convergence_line_log(degrees, errors, a_min)
fig, ax = plt.subplots()
ax.set(
ylim=(ylim_min, 1e1),
title=f"x={x}, a={a}",
xlabel=r"$k$",
ylabel=r"$|\varepsilon_k(x)|$"
)
ax.loglog(degrees[1:], errors[1:])
# ax.loglog(degrees[indices], errors[indices])
ax.loglog(degrees[1:], beta * degrees[1:] ** alpha,
label=rf"$\alpha={-alpha:.3f}$"+'\n'+rf"$\beta={beta:.3f}$")
ax.legend()
if save:
fpath = os.path.join(dirname, "pointwise_convergence", name, str(a))
os.makedirs(fpath, exist_ok=True)
plt.savefig(os.path.join(fpath, f"{x:.7f}.png"), dpi=300)
else:
plt.show()
plt.close(fig)
def animate_pointwise_convergence(dirname=DEFAULT_DIR):
"""Create an animation of pointwise convergences."""
pass
def plot_convergence_distance(xs, a, n, coeff_func, func_name, f, b, save=False,
dirname=DEFAULT_DIR):
"""Create a plot of the behaviour of the intercepts."""
betas = []
for x in xs:
print(x)
series = legendre_series(x, coeff_func(a))
degrees = np.arange(n)
values = np.array([next(series) for _ in degrees])
errors = np.abs(f(x, a) - values)
a_min = -convergence_rate(x, a, b)
alpha, beta = convergence_line_log(degrees, errors, a_min)
betas.append(beta)
fig = plt.figure(figsize=(16, 8))
plt.xlabel(r"$x$")
plt.ylabel(r"$\beta(x)$")
plt.semilogy(xs, betas, '.', basey=10)
if save:
fpath = os.path.join(dirname, "convergence_distances", func_name)
os.makedirs(fpath, exist_ok=True)
plt.savefig(os.path.join(fpath, f"{a}.png"))
else:
plt.show()
plt.close(fig)
def plot_convergence_distance_loglog(xs, a, xi, n, coeff_func, func_name, f, b,
label, name, save=False, dirname=DEFAULT_DIR):
"""Create a plot of the behaviour of the intercepts near the singularity
and edges."""
betas = []
for x in xs:
print(x)
series = legendre_series(x, coeff_func(a))
degrees = np.arange(n)
values = np.array([next(series) for _ in degrees])
errors = np.abs(f(x, a) - values)
a_min = -convergence_rate(x, a, b)
alpha, beta = convergence_line_log(degrees, errors, a_min)
betas.append(beta)
# Fit a line
xi_log = np.log10(xi)
z = np.polyfit(xi_log, np.log10(betas), 1)
p = np.poly1d(z)
fig = plt.figure()
plt.xlabel(r"$\xi$")
plt.ylabel(rf"$\beta({label})$")
plt.loglog(xi, np.array(betas), '.', label=r"$\beta$")
# TODO: improve label, variable names
plt.loglog(xi, 10 ** p(xi_log),
label="\n".join((rf"$\rho={-z[0]:.5f}$", rf"$D={10**z[1]:.5f}$")))
plt.legend()
if save:
fpath = os.path.join(dirname, "convergence_distances_loglog", func_name, str(a))
os.makedirs(fpath, exist_ok=True)
plt.savefig(os.path.join(fpath, f"{name}.png"))
else:
plt.show()
plt.close(fig) | 0.588771 | 0.508361 |
from typing import Callable
import pandas as pd
from random import choice
import numpy as np
from datasets.base_dataset import PathBaseDataset
class TripletsCSVDataset(PathBaseDataset):
'''
Csv dataset representation (csv will be in RAM) for triplets
'''
def __init__(
self,
csv_path: str,
image_prefix: str = '',
path_transform: Callable = None,
transform=None,
return_triplets: bool = True
):
'''
:param csv_path: path to csv file with paths of images (one column)
:param image_prefix: path prefix which will be added to paths of images in csv file
:param path_transform: None or function for transform of path. Will be os.path.join(image_prefix,
path_transform(image_path))
:param transform: albumentations transform class or None
:param return_triplets: if True, then return ((anchor, positive, negative), label)
else return ((image,), label)
'''
super().__init__(image_prefix=image_prefix, path_transform=path_transform, transform=transform)
self.csv_path = csv_path
self.dt = pd.read_csv(csv_path)
self.return_triplets = return_triplets
images_per_classes = self.dt.iloc[:, 1].apply(lambda x: len(x.split(' '))).values
self.dt = self.dt.values
self.idxs = np.zeros((images_per_classes.sum(), 2), dtype=np.int64)
it = 0
for i in range(len(images_per_classes)):
self.idxs[it: it + images_per_classes[i], 0] = i
self.idxs[it: it + images_per_classes[i], 1] = np.arange(images_per_classes[i])
it += images_per_classes[i]
def __len__(self):
return len(self.idxs)
def __get_negative_id(self, anchor_id):
negative_ids = list(range(anchor_id)) + list(range(anchor_id + 1, len(self.dt)))
if len(negative_ids) == 0:
raise ValueError(f'Dataset {self.csv_path} has only one label id')
negative_id = choice(negative_ids)
return choice(self.dt[negative_id][1].split(' '))
def __get_positive_id(self, positive_image_ids, anchor_image_idx):
positive_ids = list(range(anchor_image_idx)) + list(range(anchor_image_idx + 1, len(positive_image_ids)))
if len(positive_ids) == 0:
return positive_image_ids[anchor_image_idx]
positive_image_idx = choice(positive_ids)
return positive_image_ids[positive_image_idx]
def __getitem__(self, idx):
label_idx, image_idx = self.idxs[idx]
row = self.dt[label_idx]
positive_image_ids = row[1].split(' ')
anchor_id = positive_image_ids[image_idx]
anchor_image = self._read_image(anchor_id)
if not self.return_triplets:
return (anchor_image,), row[0]
positive_id = self.__get_positive_id(positive_image_ids, image_idx)
positive_image = self._read_image(positive_id)
negative_id = self.__get_negative_id(label_idx)
negative_image = self._read_image(negative_id)
return (anchor_image, positive_image, negative_image), row[0] | src/datasets/triplets_csv_dataset.py | from typing import Callable
import pandas as pd
from random import choice
import numpy as np
from datasets.base_dataset import PathBaseDataset
class TripletsCSVDataset(PathBaseDataset):
'''
Csv dataset representation (csv will be in RAM) for triplets
'''
def __init__(
self,
csv_path: str,
image_prefix: str = '',
path_transform: Callable = None,
transform=None,
return_triplets: bool = True
):
'''
:param csv_path: path to csv file with paths of images (one column)
:param image_prefix: path prefix which will be added to paths of images in csv file
:param path_transform: None or function for transform of path. Will be os.path.join(image_prefix,
path_transform(image_path))
:param transform: albumentations transform class or None
:param return_triplets: if True, then return ((anchor, positive, negative), label)
else return ((image,), label)
'''
super().__init__(image_prefix=image_prefix, path_transform=path_transform, transform=transform)
self.csv_path = csv_path
self.dt = pd.read_csv(csv_path)
self.return_triplets = return_triplets
images_per_classes = self.dt.iloc[:, 1].apply(lambda x: len(x.split(' '))).values
self.dt = self.dt.values
self.idxs = np.zeros((images_per_classes.sum(), 2), dtype=np.int64)
it = 0
for i in range(len(images_per_classes)):
self.idxs[it: it + images_per_classes[i], 0] = i
self.idxs[it: it + images_per_classes[i], 1] = np.arange(images_per_classes[i])
it += images_per_classes[i]
def __len__(self):
return len(self.idxs)
def __get_negative_id(self, anchor_id):
negative_ids = list(range(anchor_id)) + list(range(anchor_id + 1, len(self.dt)))
if len(negative_ids) == 0:
raise ValueError(f'Dataset {self.csv_path} has only one label id')
negative_id = choice(negative_ids)
return choice(self.dt[negative_id][1].split(' '))
def __get_positive_id(self, positive_image_ids, anchor_image_idx):
positive_ids = list(range(anchor_image_idx)) + list(range(anchor_image_idx + 1, len(positive_image_ids)))
if len(positive_ids) == 0:
return positive_image_ids[anchor_image_idx]
positive_image_idx = choice(positive_ids)
return positive_image_ids[positive_image_idx]
def __getitem__(self, idx):
label_idx, image_idx = self.idxs[idx]
row = self.dt[label_idx]
positive_image_ids = row[1].split(' ')
anchor_id = positive_image_ids[image_idx]
anchor_image = self._read_image(anchor_id)
if not self.return_triplets:
return (anchor_image,), row[0]
positive_id = self.__get_positive_id(positive_image_ids, image_idx)
positive_image = self._read_image(positive_id)
negative_id = self.__get_negative_id(label_idx)
negative_image = self._read_image(negative_id)
return (anchor_image, positive_image, negative_image), row[0] | 0.758242 | 0.323293 |
import config
import datetime
import errors
import flask
def call(func):
"""Call API wrapper.
Gracefully responds to requests that raise exceptions.
:param func [function]: function to call
:returns [tuple[dict, int]]: JSON response via helper functions
"""
try:
return _success(func())
except Exception as error:
return _failure(error)
def parse(name, format, optional=False):
"""Parse request parameter.
:param name [str]: parameter name
:param format [type]: type of variable to parse parameter into
:param optional [bool]: if False, fail if unable to parse parameter
:returns [any]: converted request parameter
:raises MissingParameter: if parameter is missing from request body
:raises UnprocessableRequest: if parameter is in invalid format
"""
param = flask.request.form.get(name)
if param:
try:
return format(param)
except ValueError as error:
raise errors.UnprocessableRequest(
f"unable to parse '{param}' into {format.__name__}: "
f'{str(error)}')
elif not optional:
raise errors.MissingParameter(f'{name}')
return param # optional parameter is None
def _failure(error):
"""Failed request response.
Format:
{
"response": {
"error": <error type>,
"message": <error message>
},
"success": false
}
:param error [str]: description of error
:returns [tuple[dict, int]]: JSON response as (response, status code)
"""
resp = config.RESPONSE_TEMPLATE.copy()
resp['success'] = False
resp['response'] = {}
errorType = type(error).__name__
resp['response']['error'] = errorType
resp['response']['message'] = str(error)
# Default status 500 Internal Server Error
responseCode = 500
if isinstance(error, errors.CustomException):
responseCode = error.httpResponseCode
return resp, responseCode
def _success(response=None):
"""Successful request response.
Format:
{
"response": <response>,
"success": true
}
:param response [str]: request response
:returns [tuple[dict, int]]: JSON response as (response, status code)
"""
resp = config.RESPONSE_TEMPLATE.copy()
if response is not None:
resp['response'] = response
# Status 200 OK
return resp, 200 | utils/handler.py | import config
import datetime
import errors
import flask
def call(func):
"""Call API wrapper.
Gracefully responds to requests that raise exceptions.
:param func [function]: function to call
:returns [tuple[dict, int]]: JSON response via helper functions
"""
try:
return _success(func())
except Exception as error:
return _failure(error)
def parse(name, format, optional=False):
"""Parse request parameter.
:param name [str]: parameter name
:param format [type]: type of variable to parse parameter into
:param optional [bool]: if False, fail if unable to parse parameter
:returns [any]: converted request parameter
:raises MissingParameter: if parameter is missing from request body
:raises UnprocessableRequest: if parameter is in invalid format
"""
param = flask.request.form.get(name)
if param:
try:
return format(param)
except ValueError as error:
raise errors.UnprocessableRequest(
f"unable to parse '{param}' into {format.__name__}: "
f'{str(error)}')
elif not optional:
raise errors.MissingParameter(f'{name}')
return param # optional parameter is None
def _failure(error):
"""Failed request response.
Format:
{
"response": {
"error": <error type>,
"message": <error message>
},
"success": false
}
:param error [str]: description of error
:returns [tuple[dict, int]]: JSON response as (response, status code)
"""
resp = config.RESPONSE_TEMPLATE.copy()
resp['success'] = False
resp['response'] = {}
errorType = type(error).__name__
resp['response']['error'] = errorType
resp['response']['message'] = str(error)
# Default status 500 Internal Server Error
responseCode = 500
if isinstance(error, errors.CustomException):
responseCode = error.httpResponseCode
return resp, responseCode
def _success(response=None):
"""Successful request response.
Format:
{
"response": <response>,
"success": true
}
:param response [str]: request response
:returns [tuple[dict, int]]: JSON response as (response, status code)
"""
resp = config.RESPONSE_TEMPLATE.copy()
if response is not None:
resp['response'] = response
# Status 200 OK
return resp, 200 | 0.642769 | 0.397061 |
import re
import sys
from hashlib import sha1
import logging
import subprocess as sp
from pathlib import Path
from remake.util import sysrun
from remake.setup_logging import setup_stdout_logging
from remake.loader import load_remake
from remake.task import Task, RescanFileTask
from remake.executor.base_executor import Executor
SLURM_SCRIPT_TPL = """#!/bin/bash
#SBATCH --job-name={job_name}
#SBATCH -p {queue}
#SBATCH -o {task_slurm_output}/{task_type}_%j.out
#SBATCH -e {task_slurm_output}/{task_type}_%j.err
#SBATCH --time={max_runtime}
#SBATCH --mem={mem}
{dependencies}
python {script_path} {remakefile_path} {remakefile_path_hash} {task_type} {task_key}
"""
logger = logging.getLogger(__name__)
def _parse_jobid(output):
match = re.match('Submitted batch job (?P<jobid>\d+)', output) # noqa: W605
if match:
jobid = match['jobid']
return jobid
else:
raise Exception(f'Could not parse {output}')
def _submit_slurm_script(slurm_script_path):
try:
comp_proc = sysrun(f'sbatch {slurm_script_path}')
output = comp_proc.stdout
logger.debug(output.strip())
except sp.CalledProcessError as cpe:
logger.error(f'Error submitting {slurm_script_path}')
logger.error(cpe)
logger.error('===ERROR===')
logger.error(cpe.stderr)
logger.error('===ERROR===')
raise
return output
class SlurmExecutor(Executor):
handles_dependencies = True
def __init__(self, task_ctrl, slurm_config):
super().__init__(task_ctrl)
default_slurm_kwargs = {'queue': 'short-serial',
'max_runtime': '4:00:00',
'mem': 50000}
slurm_kwargs = {**default_slurm_kwargs}
slurm_kwargs.update(slurm_config)
self.slurm_dir = Path('.remake/slurm/scripts')
self.slurm_dir.mkdir(exist_ok=True, parents=True)
self.slurm_output = Path('.remake/slurm/output')
self.slurm_output.mkdir(exist_ok=True, parents=True)
self.remakefile_path = Path(task_ctrl.name + '.py').absolute()
self.slurm_kwargs = slurm_kwargs
self.task_jobid_map = {}
self.remakefile_path_hash = sha1(self.remakefile_path.read_bytes()).hexdigest()
self.pending_tasks = []
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
for task in self.pending_tasks:
self._submit_task(task)
def _write_submit_script(self, task):
remakefile_name = self.remakefile_path.stem
script_path = Path(__file__)
script_name = script_path.stem
rule_name = task.__class__.__name__
rule_slurm_output = self.slurm_output / rule_name
if hasattr(task, 'var_matrix'):
task_path_hash_key = task.path_hash_key()
task_dir = [task_path_hash_key[:2], task_path_hash_key[2:]]
# Doesn't work if val is e.g. a datetime.
# task_dir = [f'{k}-{getattr(task, k)}' for k in task.var_matrix.keys()]
task_slurm_output = rule_slurm_output.joinpath(*task_dir)
else:
task_slurm_output = rule_slurm_output
logger.debug(f' creating {task_slurm_output}')
task_slurm_output.mkdir(exist_ok=True, parents=True)
slurm_script_filepath = self.slurm_dir / f'{script_name}_{remakefile_name}_{task.path_hash_key()}.sbatch'
prev_jobids = []
prev_tasks = self.task_ctrl.task_dag.predecessors(task)
for prev_task in prev_tasks:
# N.B. not all dependencies have to have been run; they could not require rerunning.
if prev_task in self.task_jobid_map:
prev_jobids.append(self.task_jobid_map[prev_task])
if prev_jobids:
dependencies = '#SBATCH --dependency=afterok:' + ':'.join(prev_jobids)
else:
dependencies = ''
if isinstance(task, Task):
task_type = 'task'
task_key = task.path_hash_key()
elif isinstance(task, RescanFileTask):
task_type = 'rescan'
task_key = str(task.inputs['filepath'])
else:
raise ValueError(f'Unkown task type: {task}')
slurm_script = SLURM_SCRIPT_TPL.format(script_name=script_name,
script_path=script_path,
task_slurm_output=task_slurm_output,
remakefile_name=remakefile_name,
remakefile_path=self.remakefile_path,
remakefile_path_hash=self.remakefile_path_hash,
task_type=task_type,
task_key=task_key,
dependencies=dependencies,
job_name=task_key[:10], # Longer and a leading * is added.
**self.slurm_kwargs)
logger.debug(f' writing {slurm_script_filepath}')
with open(slurm_script_filepath, 'w') as fp:
fp.write(slurm_script)
return slurm_script_filepath
def _submit_task(self, task):
slurm_script_path = self._write_submit_script(task)
output = _submit_slurm_script(slurm_script_path)
logger.info(f'Submitted: {task}')
jobid = _parse_jobid(output)
self.task_jobid_map[task] = jobid
def can_accept_task(self):
return True
def enqueue_task(self, task):
self._submit_task(task)
def get_completed_task(self):
raise NotImplementedError('Should not be called for SlurmExecutor')
def has_finished(self):
raise NotImplementedError('Should not be called for SlurmExecutor')
def run_job(remakefile, remakefile_hash, task_type, task_key):
setup_stdout_logging('DEBUG', colour=False, detailed=True)
remakefile = Path(remakefile).absolute()
curr_remakefile_hash = sha1(remakefile.read_bytes()).hexdigest()
if remakefile_hash != curr_remakefile_hash:
raise Exception(f'config file {remakefile} has changed -- cannot run task.')
remake = load_remake(remakefile)
task_ctrl = remake.task_ctrl
assert not task_ctrl.finalized, f'task control {task_ctrl} already finalized'
# Note, task_ctrl is not finalized.
# This is because another task could be finishing, and writing its output's metadata
# when this is called, and finalize can be trying to read it at the same time.
# Can perhaps fix if instead Task is responsible for working out if rerun needed,
# and removing finalize here.
# But the task DAG needs to be build.
task_ctrl.build_task_DAG()
if task_type == 'task':
task = task_ctrl.task_from_path_hash_key[task_key]
elif task_type == 'rescan':
task = task_ctrl.gen_rescan_task(task_key)
force = False
# Task might not be required anymore -- find out.
requires_rerun = task_ctrl.task_requires_rerun(task, print_reasons=True)
if force or task.force or requires_rerun & task_ctrl.remake_on:
logger.info(f'Running task: {task}')
# Can't run this; not finalized.
# task_ctrl.run_requested([task])
task.run(force=True, use_task_control=False)
task.update_status('COMPLETED')
else:
print(f'Run task not required: {task}')
logger.info(f'Run task not required: {task}')
if __name__ == '__main__':
print(sys.argv)
run_job(*sys.argv[1:]) | remake/executor/slurm_executor.py | import re
import sys
from hashlib import sha1
import logging
import subprocess as sp
from pathlib import Path
from remake.util import sysrun
from remake.setup_logging import setup_stdout_logging
from remake.loader import load_remake
from remake.task import Task, RescanFileTask
from remake.executor.base_executor import Executor
SLURM_SCRIPT_TPL = """#!/bin/bash
#SBATCH --job-name={job_name}
#SBATCH -p {queue}
#SBATCH -o {task_slurm_output}/{task_type}_%j.out
#SBATCH -e {task_slurm_output}/{task_type}_%j.err
#SBATCH --time={max_runtime}
#SBATCH --mem={mem}
{dependencies}
python {script_path} {remakefile_path} {remakefile_path_hash} {task_type} {task_key}
"""
logger = logging.getLogger(__name__)
def _parse_jobid(output):
match = re.match('Submitted batch job (?P<jobid>\d+)', output) # noqa: W605
if match:
jobid = match['jobid']
return jobid
else:
raise Exception(f'Could not parse {output}')
def _submit_slurm_script(slurm_script_path):
try:
comp_proc = sysrun(f'sbatch {slurm_script_path}')
output = comp_proc.stdout
logger.debug(output.strip())
except sp.CalledProcessError as cpe:
logger.error(f'Error submitting {slurm_script_path}')
logger.error(cpe)
logger.error('===ERROR===')
logger.error(cpe.stderr)
logger.error('===ERROR===')
raise
return output
class SlurmExecutor(Executor):
handles_dependencies = True
def __init__(self, task_ctrl, slurm_config):
super().__init__(task_ctrl)
default_slurm_kwargs = {'queue': 'short-serial',
'max_runtime': '4:00:00',
'mem': 50000}
slurm_kwargs = {**default_slurm_kwargs}
slurm_kwargs.update(slurm_config)
self.slurm_dir = Path('.remake/slurm/scripts')
self.slurm_dir.mkdir(exist_ok=True, parents=True)
self.slurm_output = Path('.remake/slurm/output')
self.slurm_output.mkdir(exist_ok=True, parents=True)
self.remakefile_path = Path(task_ctrl.name + '.py').absolute()
self.slurm_kwargs = slurm_kwargs
self.task_jobid_map = {}
self.remakefile_path_hash = sha1(self.remakefile_path.read_bytes()).hexdigest()
self.pending_tasks = []
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
for task in self.pending_tasks:
self._submit_task(task)
def _write_submit_script(self, task):
remakefile_name = self.remakefile_path.stem
script_path = Path(__file__)
script_name = script_path.stem
rule_name = task.__class__.__name__
rule_slurm_output = self.slurm_output / rule_name
if hasattr(task, 'var_matrix'):
task_path_hash_key = task.path_hash_key()
task_dir = [task_path_hash_key[:2], task_path_hash_key[2:]]
# Doesn't work if val is e.g. a datetime.
# task_dir = [f'{k}-{getattr(task, k)}' for k in task.var_matrix.keys()]
task_slurm_output = rule_slurm_output.joinpath(*task_dir)
else:
task_slurm_output = rule_slurm_output
logger.debug(f' creating {task_slurm_output}')
task_slurm_output.mkdir(exist_ok=True, parents=True)
slurm_script_filepath = self.slurm_dir / f'{script_name}_{remakefile_name}_{task.path_hash_key()}.sbatch'
prev_jobids = []
prev_tasks = self.task_ctrl.task_dag.predecessors(task)
for prev_task in prev_tasks:
# N.B. not all dependencies have to have been run; they could not require rerunning.
if prev_task in self.task_jobid_map:
prev_jobids.append(self.task_jobid_map[prev_task])
if prev_jobids:
dependencies = '#SBATCH --dependency=afterok:' + ':'.join(prev_jobids)
else:
dependencies = ''
if isinstance(task, Task):
task_type = 'task'
task_key = task.path_hash_key()
elif isinstance(task, RescanFileTask):
task_type = 'rescan'
task_key = str(task.inputs['filepath'])
else:
raise ValueError(f'Unkown task type: {task}')
slurm_script = SLURM_SCRIPT_TPL.format(script_name=script_name,
script_path=script_path,
task_slurm_output=task_slurm_output,
remakefile_name=remakefile_name,
remakefile_path=self.remakefile_path,
remakefile_path_hash=self.remakefile_path_hash,
task_type=task_type,
task_key=task_key,
dependencies=dependencies,
job_name=task_key[:10], # Longer and a leading * is added.
**self.slurm_kwargs)
logger.debug(f' writing {slurm_script_filepath}')
with open(slurm_script_filepath, 'w') as fp:
fp.write(slurm_script)
return slurm_script_filepath
def _submit_task(self, task):
slurm_script_path = self._write_submit_script(task)
output = _submit_slurm_script(slurm_script_path)
logger.info(f'Submitted: {task}')
jobid = _parse_jobid(output)
self.task_jobid_map[task] = jobid
def can_accept_task(self):
return True
def enqueue_task(self, task):
self._submit_task(task)
def get_completed_task(self):
raise NotImplementedError('Should not be called for SlurmExecutor')
def has_finished(self):
raise NotImplementedError('Should not be called for SlurmExecutor')
def run_job(remakefile, remakefile_hash, task_type, task_key):
setup_stdout_logging('DEBUG', colour=False, detailed=True)
remakefile = Path(remakefile).absolute()
curr_remakefile_hash = sha1(remakefile.read_bytes()).hexdigest()
if remakefile_hash != curr_remakefile_hash:
raise Exception(f'config file {remakefile} has changed -- cannot run task.')
remake = load_remake(remakefile)
task_ctrl = remake.task_ctrl
assert not task_ctrl.finalized, f'task control {task_ctrl} already finalized'
# Note, task_ctrl is not finalized.
# This is because another task could be finishing, and writing its output's metadata
# when this is called, and finalize can be trying to read it at the same time.
# Can perhaps fix if instead Task is responsible for working out if rerun needed,
# and removing finalize here.
# But the task DAG needs to be build.
task_ctrl.build_task_DAG()
if task_type == 'task':
task = task_ctrl.task_from_path_hash_key[task_key]
elif task_type == 'rescan':
task = task_ctrl.gen_rescan_task(task_key)
force = False
# Task might not be required anymore -- find out.
requires_rerun = task_ctrl.task_requires_rerun(task, print_reasons=True)
if force or task.force or requires_rerun & task_ctrl.remake_on:
logger.info(f'Running task: {task}')
# Can't run this; not finalized.
# task_ctrl.run_requested([task])
task.run(force=True, use_task_control=False)
task.update_status('COMPLETED')
else:
print(f'Run task not required: {task}')
logger.info(f'Run task not required: {task}')
if __name__ == '__main__':
print(sys.argv)
run_job(*sys.argv[1:]) | 0.266739 | 0.065187 |
from unittest import TestCase
from day7.part1.get_signal_for_wire import get_signal_for_wire
class TestGetSignalForWire(TestCase):
def test_get_signal_for_wire_1(self):
expected_value = 72
instructions = [
"123 -> x",
"456 -> y",
"x AND y -> d"
]
value = get_signal_for_wire(instructions, "d")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_2(self):
expected_value = 507
instructions = [
"123 -> x",
"456 -> y",
"x OR y -> e"
]
value = get_signal_for_wire(instructions, "e")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_3(self):
expected_value = 492
instructions = [
"123 -> x",
"456 -> y",
"x LSHIFT 2 -> f"
]
value = get_signal_for_wire(instructions, "f")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_4(self):
expected_value = 114
instructions = [
"123 -> x",
"456 -> y",
"y RSHIFT 2 -> g"
]
value = get_signal_for_wire(instructions, "g")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_5(self):
expected_value = -124
instructions = [
"123 -> x",
"456 -> y",
"x AND y -> d",
"x OR y -> e",
"x LSHIFT 2 -> f",
"y RSHIFT 2 -> g",
"NOT x -> h"
]
value = get_signal_for_wire(instructions, "h")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_6(self):
expected_value = -457
instructions = [
"123 -> x",
"456 -> y",
"x AND y -> d",
"x OR y -> e",
"x LSHIFT 2 -> f",
"y RSHIFT 2 -> g",
"NOT y -> i"
]
value = get_signal_for_wire(instructions, "i")
self.assertEqual(expected_value, value) | day7/part1/test_get_signal_for_wire.py | from unittest import TestCase
from day7.part1.get_signal_for_wire import get_signal_for_wire
class TestGetSignalForWire(TestCase):
def test_get_signal_for_wire_1(self):
expected_value = 72
instructions = [
"123 -> x",
"456 -> y",
"x AND y -> d"
]
value = get_signal_for_wire(instructions, "d")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_2(self):
expected_value = 507
instructions = [
"123 -> x",
"456 -> y",
"x OR y -> e"
]
value = get_signal_for_wire(instructions, "e")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_3(self):
expected_value = 492
instructions = [
"123 -> x",
"456 -> y",
"x LSHIFT 2 -> f"
]
value = get_signal_for_wire(instructions, "f")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_4(self):
expected_value = 114
instructions = [
"123 -> x",
"456 -> y",
"y RSHIFT 2 -> g"
]
value = get_signal_for_wire(instructions, "g")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_5(self):
expected_value = -124
instructions = [
"123 -> x",
"456 -> y",
"x AND y -> d",
"x OR y -> e",
"x LSHIFT 2 -> f",
"y RSHIFT 2 -> g",
"NOT x -> h"
]
value = get_signal_for_wire(instructions, "h")
self.assertEqual(expected_value, value)
def test_get_signal_for_wire_6(self):
expected_value = -457
instructions = [
"123 -> x",
"456 -> y",
"x AND y -> d",
"x OR y -> e",
"x LSHIFT 2 -> f",
"y RSHIFT 2 -> g",
"NOT y -> i"
]
value = get_signal_for_wire(instructions, "i")
self.assertEqual(expected_value, value) | 0.782746 | 0.691344 |
# Import Packages
import pandas as pd
import numpy as np
import math
from pylab import *
from scipy import linalg as la
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.transforms as mtrans
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.gridspec as gridspec
from matplotlib.patches import FancyBboxPatch
from matplotlib import patheffects
plt.rcParams['font.family'] = ["Georgia"]
class unavco_data:
def __init__(self, **kwargs):
self.start_time = kwargs.get('start_time', '')
self.end_time = kwargs.get('end_time', '')
def get_stations(self, minlon, maxlon, minlat, maxlat):
# Returns a pandas dataframe with all sites within a specific set of coordinates
import requests, io
url_ = "https://web-services.unavco.org/gps/metadata/sites/v1?minlatitude="
coordinates = str(minlat) + "&maxlatitude=" + str(maxlat) + "&minlongitude=" + str(minlon) + "&maxlongitude=" + str(maxlon)
srt_ = "&starttime=" + str(self.start_time)
end_ = "&endtime=" + str(self.end_time)
full_url = url_ + coordinates + "&summary=true"
urlData = requests.get(full_url).content
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
return rawData
def site_data(self, sites, **kwargs):
# Generates a pandas dataframe with all of the site information
period = kwargs.get('period', 365)
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse
data_ = []
for i in range(3):
location = sites[i]
file = "https://web-services.unavco.org/gps/data/position/"
start_ = "/v3?analysisCenter=cwu&referenceFrame=nam14&starttime=" + str(self.start_time)
end_ = "&endtime=" + str(self.end_time)
query = "&report=long&dataPostProcessing=Cleaned&refCoordOption=first_epoch"
data_loop = pd.read_csv(file + location + start_ + end_ + query, skiprows=[i for i in range(0,8)])
site = location
lon = data_loop.head(1)[' E longitude'][0] - 360
lat = data_loop.head(1)[' N latitude'][0]
difference_in_years = relativedelta(parse(max(data_loop['Datetime'])), parse(min(data_loop['Datetime']))).years
difference_in_years = difference_in_years if difference_in_years > 0 else 1
e_vel = (data_loop[' delta E'].mean() / difference_in_years) * 1000
e_unc = 0.01
n_vel = (data_loop[' delta N'].mean() / difference_in_years) * 1000
n_unc = 0.01
data_.append(dict(zip(['site', 'longitude', 'latitude', 'E velocity (mm/yr)', 'E uncertainty (mm/yr)',
'N velocity (mm/yr)', 'N uncertainty (mm/yr)'],
[site, lon, lat, e_vel, e_unc, n_vel, n_unc])))
data_df = pd.DataFrame(data_)
return data_df
class strain_data:
def __init__(self, data_unav):
self.data_unav = data_unav
class computation:
pass
def output_data(self, **kwargs):
# read in computation for easier data retrieval
computation = strain_data.computation()
# Primary output for the sites
data_df = self.data_unav
pwr = kwargs.get('pwr', 7)
# Convert to radians
l_rads = data_df[['site', 'longitude', 'latitude']].copy()
l_rads['longitude'] = l_rads['longitude'].apply(lambda x: x * (math.pi/180))
l_rads['latitude'] = l_rads['latitude'].apply(lambda x: x * (math.pi/180))
computation.l_rads = l_rads
# Determine UTM Zone
utm_z = data_df[['site', 'longitude']].copy()
utm_z['UTM_Zone'] = utm_z['longitude'].apply(lambda x: (x + 180)/6)
def utm_zone(x):
if x - int(x) > 0:
return int(x) + 1
else:
return int(x)
utm_z['UTM_Zone'] = utm_z['UTM_Zone'].apply(lambda x: utm_zone(x))
utm_z = utm_z[['site', 'UTM_Zone']]
computation.utm_z = utm_z
# Central Meridian of Zone (long0)
cm_long0 = utm_z.copy()
cm_long0['long0'] = cm_long0['UTM_Zone'].apply(lambda x: -183 + (6 * x))
cm_long0 = cm_long0[['site', 'long0']]
computation.cm_long0 = cm_long0
# Central Meridian of Zone (long0) in radians
cm_long0_r = cm_long0.copy()
cm_long0_r['long0_r'] = cm_long0_r['long0'].apply(lambda x: x * math.pi/180)
cm_long0_r = cm_long0_r[['site', 'long0_r']]
computation.cm_long0_r = cm_long0_r
# Central meridian of zone to the west (the 'pseudo' zone)
def cm_west(x):
if x == -177:
return 177
else:
return x - 6
p_z = cm_long0.copy()
p_z['cm_pseudo_zone'] = cm_long0['long0'].apply(lambda x: cm_west(x))
p_z = p_z[['site', 'cm_pseudo_zone']]
computation.p_z = p_z
# Central meridian of zone to the west (the 'pseudo' zone) in radians
p_z_r = p_z.copy()
p_z_r['cm_pseudo_zone_r'] = p_z_r['cm_pseudo_zone'].apply(lambda x: x * math.pi/180)
p_z_r = p_z_r[['site', 'cm_pseudo_zone_r']]
computation.p_z_r = p_z_r
# UTM 'pseudo' zone
utm_p_z = p_z.copy()
utm_p_z['UTM_Pseudo_Zone'] = utm_p_z['cm_pseudo_zone'].apply(lambda x: (x + 180)/6)
utm_p_z['UTM_Pseudo_Zone'] = utm_p_z['UTM_Pseudo_Zone'].apply(lambda x: utm_zone(x))
computation.utm_p_z = utm_p_z
# WGS84 datum
a_wgs84 = 6378137
b_wgs84 = 6356752.3142
computation.a_wgs84 = a_wgs84
computation.b_wgs84 = b_wgs84
# Calculate key components
k0 = 0.9996
computation.k0 = k0
e = math.sqrt(1-b_wgs84**2/a_wgs84**2)
computation.e = e
e_2 = ((e * a_wgs84)/b_wgs84)**2
computation.e_2 = e_2
n = (a_wgs84 - b_wgs84)/(a_wgs84 + b_wgs84)
computation.n = n
# Calculate rho
def calc_rho(x, e_, a_):
p_1 = a_*(1-e_**2)
p_2 = (1-(e_**2 * math.sin(x)**2))**(3/2)
return p_1/p_2
rho = l_rads[['site', 'latitude']].copy()
rho['$\rho$'] = rho['latitude'].apply(lambda x: calc_rho(x, e, a_wgs84))
rho = rho[['site', '$\rho$']]
computation.rho = rho
# Calculate nu
def calc_nu(x, e_, a_):
p_1 = a_
p_2 = math.sqrt(1-(e_**2 * math.sin(x)**2))
return p_1/p_2
nu = l_rads[['site', 'latitude']].copy()
nu['$\nu$'] = nu['latitude'].apply(lambda x: calc_nu(x, e, a_wgs84))
nu = nu[['site', '$\nu$']]
computation.nu = nu
# Calculate p
p_0 = l_rads[['site', 'longitude']].copy()
p_1 = cm_long0_r.copy()
p_merge = p_0.merge(p_1, on='site')
p_merge['p'] = p_merge.longitude - p_merge.long0_r
p = p_merge[['site', 'p']]
computation.p = p
# Calculate pseudo p
p_p0 = l_rads[['site', 'longitude']].copy()
p_p1 = p_z.copy()
p_p2 = p_z_r.copy()
p_p_m = p_p0.merge(p_p1, on='site').merge(p_p2, on='site')
computation.p_p_m = p_p_m
def pseudo_p(x, y, z):
if y == 177:
return abs(x) - z
else:
return x - z
p_p_m['pseudo_p'] = p_p_m.apply(lambda x: pseudo_p(x.longitude, x.cm_pseudo_zone, x.cm_pseudo_zone_r), axis=1)
pseudo_p = p_p_m[['site', 'pseudo_p']]
computation.pseudo_p = pseudo_p
# Matrix Components
def mat_comps(x, e, m):
if m == 'm1':
return x*(1-((e**2)/4)-((3*(e**4))/64)-((5*(e**6))/256))
elif m == 'm2':
return math.sin(2*x)*(((3*(e**2))/8)+((3*(e**4))/32)+((45*(e**6))/1024))
elif m == 'm3':
return math.sin(4*x)*(((15*(e**4))/256)+((45*(e**6))/1024))
else:
return math.sin(6*x)*((35*(e**6))/3072)
def m_comp(m):
m_ = l_rads[['site', 'latitude']].copy()
i = 0
while i < len(m):
m_[m[i]] = m_['latitude'].apply(lambda x: mat_comps(x, e, m[i]))
i+=1
return m_
m_comps = m_comp(m=['m1', 'm2', 'm3', 'm4'])
computation.m_comps = m_comps
# Calculate M
def calc_M(x0, x1, x2, x3, a):
eq_ = (x0 - x1 + x2 - x3)
return a*eq_
M = m_comps.copy()
M['M'] = M.apply(lambda x: calc_M(x.m1, x.m2, x.m3, x.m4, a_wgs84), axis=1)
M = M[['site', 'M']]
computation.M = M
# Calculate the K components
def k_comps(x, M, nu, k0, e_2, k):
if k == 'K1':
return M*k0
elif k == 'K2':
return k0*nu*math.sin(2*x)/4
elif k == 'K3':
return (k0*nu*math.sin(x)*((math.cos(x))**3)/24)*(5-((math.tan(x))**2)+(9*e_2*((math.cos(x))**2))+(4*(e_2**2)*((math.cos(x))**4)))
elif k == 'K4':
return k0*nu*math.cos(x)
else:
return (k0*nu*((math.cos(x))**3)/6)*(1-((math.tan(x))**2)+(e_2*((math.cos(x))**2)))
def k_comp(k):
k_0 = l_rads[['site', 'latitude']].copy()
k_1 = M.copy()
k_2 = nu.copy()
k_ = k_0.merge(k_1, on='site').merge(k_2, on='site')
i = 0
while i < len(k):
k_[k[i]] = k_.apply(lambda x: k_comps(x.latitude, x.M, x['$\nu$'], k0, e_2, k[i]), axis=1)
i+=1
k_ = k_[['site'] + k]
return k_
k_c = k_comp(k=['K1', 'K2', 'K3', 'K4', 'K5'])
computation.k_c = k_c
# True Northing and Easting
def t_ne(K1, K2, K3, K4, K5, p, ne):
if ne == 'northing':
return K1+(K2*(p**2))+(K3*(p**4))
else:
return 500000+(K4*p)+(K5*(p**3))
t_n_0 = k_c.merge(p, on='site')
computation.t_n_0 = t_n_0
t_n_0['true_northing'] = t_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.p, 'northing'), axis = 1)
t_n_0['true_easting'] = t_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.p, 'easting'), axis = 1)
t_n_e = t_n_0[['site', 'true_northing', 'true_easting']]
computation.t_n_e = t_n_e
# Pseudo Northing and Easting
p_n_0 = k_c.merge(pseudo_p, on='site')
p_n_0['pseudo_northing'] = p_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.pseudo_p, 'northing'), axis = 1)
p_n_0['pseudo_easting'] = p_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.pseudo_p, 'easting'), axis = 1)
p_n_e = p_n_0[['site', 'pseudo_northing', 'pseudo_easting']]
computation.p_n_e = p_n_e
# Westernmost Zone
def w_z_():
if np.std(utm_z.UTM_Zone) > 5:
return 60
else:
return (np.sum(utm_z.UTM_Zone)/3)//1
w_z = w_z_()
w_z_avg = (np.sum(utm_z.UTM_Zone)/3)
w_z_std = np.std(utm_z.UTM_Zone)
computation.w_z = w_z
computation.w_z_avg = w_z_avg
computation.w_z_std = w_z_std
# UTM coordinates relative to the westernmost zone, to be used in strain analysis
def utm_w_z(x, w, t, p):
if x == w:
return t
else:
return p
utm_0 = utm_z.copy()
utm_1 = t_n_e.copy()
utm_2 = p_n_e.copy()
utm_w = utm_0.merge(utm_1, on='site').merge(utm_2, on='site')
utm_w['UTM_w_z_easting'] = utm_w.apply(lambda x: utm_w_z(x.UTM_Zone, w_z, x.true_easting, x.pseudo_easting), axis=1)
utm_w['UTM_w_z_northing'] = utm_w.apply(lambda x: utm_w_z(x.UTM_Zone, w_z, x.true_northing, x.pseudo_northing), axis=1)
utm_w = utm_w[['site', 'UTM_w_z_easting', 'UTM_w_z_northing']]
computation.utm_w = utm_w
# Center of Triangle
mean_n = utm_w.UTM_w_z_northing.mean()
mean_e = utm_w.UTM_w_z_easting.mean()
computation.mean_n = mean_n
computation.mean_e = mean_e
# Revised Locations
sites_r = utm_w.copy()
sites_r['revised_easting'] = sites_r['UTM_w_z_easting'].apply(lambda x: x - mean_e)
sites_r['revised_northing'] = sites_r['UTM_w_z_northing'].apply(lambda x: x - mean_n)
sites_r = sites_r[['site', 'revised_easting', 'revised_northing']]
computation.sites_r = sites_r
# Velocities converted from mm/yr to m/yr
vel_m = self.data_unav.copy().drop(['longitude', 'latitude'], axis=1)
vel_m['E velocity (m/yr)'] = vel_m['E velocity (mm/yr)'].apply(lambda x: x * 0.001)
vel_m['E uncertainty (m/yr)'] = vel_m['E uncertainty (mm/yr)'].apply(lambda x: x * 0.001)
vel_m['N velocity (m/yr)'] = vel_m['N velocity (mm/yr)'].apply(lambda x: x * 0.001)
vel_m['N uncertainty (m/yr)'] = vel_m['N uncertainty (mm/yr)'].apply(lambda x: x * 0.001)
vel_m = vel_m.drop(['E velocity (mm/yr)', 'E uncertainty (mm/yr)', 'N velocity (mm/yr)', 'N uncertainty (mm/yr)'], axis=1)
computation.vel_m = vel_m
# Matrix 1
M1 = np.array([[sites_r.revised_easting], [sites_r.revised_northing]]).transpose()
computation.M1 = M1
# Matrix 2
def mat2(x):
mat_2 = pd.DataFrame()
for i in range(3):
x = sites_r.revised_easting[i]
y = sites_r.revised_northing[i]
list_s = [pd.Series(np.array([1, 0, (-1 * y), x, y, 0]).transpose()), pd.Series(np.array([0, 1, x, 0, x, y]).transpose())]
mat_2 = mat_2.append(list_s, ignore_index=True)
continue
return mat_2
M2 = mat2(x=sites_r)
computation.M2 = np.array(M2)
# Matrix 3
M3 = la.inv(M2)
M3 = pd.DataFrame(M3)
computation.M3 = np.array(M3)
# Matrix 4
M4_ = pd.concat([vel_m['E velocity (m/yr)'], vel_m['N velocity (m/yr)']]).sort_index()
M4 = np.array(M4_)[np.newaxis].T
computation.M4 = M4
# Matrix 5
M5 = np.matrix(M3).dot(np.matrix(M4))
computation.M5 = M5
# North Unit Vector
n_v_unit = [0, 1]
computation.n_v_unit = n_v_unit
# Translation Vector
t_v = [float(M5[0]), float(M5[1])]
computation.t_v = t_v
# Magnitude of translation vector, or speed (m/yr)
t_v_s = np.sqrt((t_v[0]**2)+(t_v[1]**2))
computation.t_v_s = t_v_s
# Unit Translation Vector
t_v_unit = [(t_v[0]/t_v_s), (t_v[1]/t_v_s)]
computation.t_v_unit = t_v_unit
# Angle between north vector and unit trans vector
n_t_a = math.acos((t_v_unit[0]*n_v_unit[0])+(t_v_unit[1]*n_v_unit[1]))*(180/math.pi)
computation.n_t_a = n_t_a
# Azimuth of trans vect (degrees clockwise from north)
def trans_azi(x, y):
if x < 0:
return 360 - y
else:
return y
t_v_azi = trans_azi(t_v[0], n_t_a)
computation.t_v_azi = t_v_azi
# Matrix M6
M6 = np.array([[M5[-3], M5[-2]], [M5[-2], M5[-1]]])
computation.M6 = M6
# Eigen System
def eigen_s(x0, x1, x2, x3):
ev_0 = x0 + x3
ev_1 = 4 * x1 * x2
ev_2 = (x0 - x3)**2
ev_3 = np.sqrt(ev_1 + ev_2)
ev_a = (ev_0 + ev_3) / 2
ev_b = (ev_0 - ev_3) / 2
eigen = [ev_a, ev_b]
return eigen
e_s = eigen_s(float(M6[0][0]), float(M6[0][1]), float(M6[1][0]), float(M6[1][1]))
computation.e_s = e_s
# Calculate e1 and e2
def det_e(e_sys):
if e_sys[0] > e_sys[1]:
return [e_sys[0], e_sys[1]]
else:
return [e_sys[1], e_sys[0]]
e1_2 = det_e(e_s)
computation.e1_2 = e1_2
# Calculate e1 and e2 unit eigenvectors
def unit_eigen(x, y, z):
x_c = 1/np.sqrt(1+((x-y)/z)**2)
y_c = ((x-y)/z)/np.sqrt(1+((x-y)/z)**2)
return [x_c, y_c]
e1_unit = unit_eigen(e1_2[0], float(M6[0][0]), float(M6[0][1]))
computation.e1_unit = e1_unit
e2_unit = unit_eigen(e1_2[1], float(M6[0][0]), float(M6[0][1]))
computation.e2_unit = e2_unit
# Angle between north vector and e1/e2 unit eigenvectors (Degrees)
def find_angle(w, x, y, z):
return math.acos((w*x)+(y*z))*(180/math.pi)
nv_e1 = find_angle(e1_unit[0], n_v_unit[0], e1_unit[1], n_v_unit[1])
computation.nv_e1 = nv_e1
nv_e2 = find_angle(e2_unit[0], n_v_unit[0], e2_unit[1], n_v_unit[1])
computation.nv_e2 = nv_e2
# Azimuth of e1/e2 unit eigenvectors
def az_e(x, y):
if x < 0:
return 360 - y
else:
return y
e1_azi = az_e(e1_unit[0], nv_e1)
computation.e1_azi = e1_azi
e2_azi = az_e(e2_unit[0], nv_e2)
computation.e2_azi = e2_azi
# Alternate Azimuth of e1/e2 unit eigenvectors
def a_az_e(x):
if x < 180:
return x + 180
else:
return x - 180
e1_azi_a = a_az_e(e1_azi)
computation.e1_azi_a = e1_azi_a
e2_azi_a = a_az_e(e2_azi)
computation.e2_azi_a = e2_azi_a
# Maximum infinitesimal shear strain
mis_strain = 2 * np.sqrt(((float(M6[0][0]) - float(M6[1][1])) / 2)**2 + (float(M6[0][1])**2))
computation.mis_strain = mis_strain
# Area Strain
a_strain = e1_2[0] + e1_2[1]
computation.a_strain = a_strain
# Invariants of the infinitesimal strain rate tensor
inv_0 = a_strain
computation.inv_0 = inv_0
inv_1 = e1_2[0] * e1_2[1]
computation.inv_1 = inv_1
inv_2 = inv_1
computation.inv_2 = inv_2
# Matrix 7
def m7(x, y):
v = pd.concat([x, y]).sort_index()
v = np.array(list(v.apply(lambda x: 1 / (x**2))))
return np.diag(v)
M7 = pd.DataFrame(m7(vel_m['E uncertainty (m/yr)'], vel_m['N uncertainty (m/yr)']))
computation.M7 = np.array(M7)
# Matrix 8
M8 = M2.T
computation.M8 = M8
# Matrix (m9.1 = m7 dot m2)
M9_1 = M7.dot(M2)
computation.M9_1 = M9_1
# Matrix (m9.2 = m8 dot m9.1)
M9_2 = M8.dot(M9_1)
computation.M9_2 = M9_2
# Matrix 9
M9 = la.inv(M9_2)
computation.M9 = M9
# Primary Data Output
fields_ = ['E component ± uncert [m/yr]', 'N component ± uncert [m/yr]', 'Azimuth [degrees]',
'Speed [m/yr]', 'Rotation ± uncertainty [degrees/yr]', 'Rotation ± uncertainty [nano-rad/yr]', 'Direction of rotation',
'Max horizontal extension (e1H) [nano-strain]', 'Azimuth of S1H [degrees]', 'Min horizontal extension (e2H) [nano-strain]',
'Azimuth of S2H [degrees]', 'Max shear strain [nano-strain]', 'Area strain [nano-strain]']
data_1 = str(round(float(M5[0]), 4)) + ' $\pm$ ' + str(round(float(M9[0][0]), 12))
data_2 = str(round(float(M5[1]), 4)) + ' $\pm$ ' + str(round(float(M9[1][1]), 12))
data_3 = str(round(float(M5[2]) * (180 / math.pi), 10)) + ' $\pm$ ' + str(round(np.sqrt(float(M9[2][2])) * (180 / math.pi), 12))
data_4 = str(round(float(M5[2]) * (10**9), 4)) + ' $\pm$ ' + str(round(np.sqrt(float(M9[2][2])) * (10**9), 4))
data_5 = 'Clockwise' if (float(M5[2]) * (10**9)) < 0 else 'Anti-Clockwise'
data_6 = str(round(float(e1_2[0]) * (10**9), 4))
data_7 = str(round(e1_azi, 4)) + ' or ' + str(round(e1_azi_a, 4))
data_8 = str(round(float(e1_2[1]) * (10**9), 4))
data_9 = str(round(e2_azi, 4)) + ' or ' + str(round(e2_azi_a, 4))
values_ = [data_1, data_2, str(round(t_v_azi, 4)), str(round(t_v_s, 4)), data_3, data_4, data_5,
data_6, data_7, data_8, data_9, str(round(mis_strain*(10**9), 4)), str(round(a_strain*(10**9), 4))]
primary = pd.DataFrame(values_, index=fields_)
primary.columns = ['Translation Vector']
computation.primary_data = primary
# Calculate the strain ellipse
stretch = np.array([[float(M5[3]), 0], [0, float(M5[5])]])
computation.stretch = stretch
shear = np.array([[0, float(M5[4])/2], [float(M5[4])/2, 0]])
computation.shear = shear
theta = float(M5[2]) * (180/math.pi)
rotation = array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
computation.rotation_tensor = rotation
S = (stretch + shear)
computation.stretch_tensor = S
R = rotation
F_ = R.dot(S) * 10**6 + np.array([[1, 0], [0, 1]])
F = dot(F_, F_.T)
computation.deformation_matrix = F
B = F @ F.T
C = F.T @ F
V = la.sqrtm(B)
computation.left_stretch_tensor = V
U = la.sqrtm(C)
computation.right_stretch_tensor = U
R_r = la.inv(V) @ F
R_l = F @ la.inv(U)
return computation
class strain_viz:
# Some of the Python Functions are adaptations of <NAME>'s GitHub repository
def __init__(self, strain_data):
self.strain_data = strain_data
def def_ellipse(self, V):
# Draw strain ellipse from deformation gradient
theta = linspace(0, 2*pi, 180)
xc, yc = cos(theta), sin(theta)
x,y = dot(V, [xc,yc])
plt.plot(xc, yc, 'slategrey', x, y, lw=2, linestyle='--')
plt.fill(xc, yc, 'w', alpha=0.45)
u, s, v = svd(V)
plt.plot(x, y, 'k', lw=2, zorder=40)
plt.quiver(zeros(2), zeros(2),
hstack((s*u[0],-s*u[0])), hstack((s*u[1],-s*u[1])),
scale=1, units='xy', color=['tomato', 'cornflowerblue'],
width=0.065, headaxislength=0, headlength=0, zorder=30)
plt.quiver(zeros(2), zeros(2),
hstack((1,0)), hstack((0,1)),
scale=1, units='xy', color=['tomato', 'cornflowerblue'],
width=0.065, linestyle='dashed', alpha=0.25, headaxislength=0, headlength=0, zorder=10)
plt.quiver(zeros(2), zeros(2),
hstack((-1,0)), hstack((0,-1)),
scale=1, units='xy', color=['tomato', 'cornflowerblue'],
width=0.065, linestyle='dashed', alpha=0.25, headaxislength=0, headlength=0, zorder=10)
axis('equal')
axis('off')
def def_field(self, V, **kwargs):
# Visualize displacement field from
# displacement gradient
alpha_ = kwargs.get('alpha', '1')
F = asarray(V)
J = F - eye(2)
X, Y = meshgrid(linspace(-3, 3, 21),
linspace(-2, 2, 17))
u, v = tensordot(J, [X, Y], axes=1)
plt.quiver(X, Y, u, v, angles='xy', color='black', alpha=alpha_)
axis('off')
def get_center(sites_):
# Locate the center of the triangle
lonc = sites_.longitude.sum() / 3
latc = sites_.latitude.sum() /3
if lonc < -180:
lonc = lonc + 360
elif lonc > 180:
lonc = lonc - 360
return lonc, latc
def end_df(sites_):
sites = sites_
first_site = pd.DataFrame(sites.head(1))
last_site = pd.DataFrame(sites.tail(1))
end_sites = pd.concat([first_site, last_site]).reset_index(drop=True)
return end_sites
def ellipse_plot(self, **kwargs):
sites = self.strain_data
V = kwargs.get('V', 'off')
ax = kwargs.get('ax', None)
fig = kwargs.get('fig', None)
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
# Figure Size
if ax is None:
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
figx = kwargs.get('figx', 15)
figy = kwargs.get('figy', 15)
fig = plt.figure(figsize=(figx, figy))
ax = fig.add_subplot(1, 1, 1, projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
ax.add_image(tiler, tiler_size, interpolation='spline36')
ax.set_aspect(1, 'datalim')
ax.gridlines(draw_labels=True)
plt.plot(sites.longitude, sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(end_sites.longitude, end_sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(sites.longitude, sites.latitude, color='black', linewidth=0, marker=',', transform=ccrs.PlateCarree(), label=sites.site, zorder=20)
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
my_dpi = fig.dpi
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree())
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * scale_loc[1]
tmc = ccrs.TransverseMercator(sbllx, sblly)
x0, x1, y0, y1 = ax.get_extent(tmc)
sbx = x0 + (x1 - x0) * scale_loc[0]
sby = y0 + (y1 - y0) * scale_loc[1]
# print(sbx, sby)
sbxe = ((sbx + length * 500)/5)*2
sbxf = round(sbx - length * 500)
j = sbxf
k = 1
while k <= 5:
bar_xs = [j, j + sbxe]
if k % 2 == 0:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='w', linewidth=15, zorder=10)
else:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='k', linewidth=15, zorder=11)
j += sbxe
k += 1
buffer = [patheffects.withStroke(linewidth=1.5, foreground="w")]
hei_ = kwargs.get('hei_', 5)
ax.text(-1*sbxf, sby+(hei_*sby), str(length) + ' km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='left', verticalalignment='bottom')
ax.text(sbxf, sby+(hei_*sby), '0 km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='right', verticalalignment='bottom')
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'show')
for i in range(len(sites)):
plt.draw()
lon, lat = sites.longitude[i], sites.latitude[i]
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
axi = fig.add_axes([(x_ - (5/width)*0.5), (y_ - (5/height)*0.5), (5/width), (5/height)])
colors = color_list
scale_arrow = kwargs.get('scale_arrow', 40)
if arrows == 'show':
axi.quiver(sites['E velocity (mm/yr)'][i], sites['N velocity (mm/yr)'][i], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
axi.plot(0, 0, marker='o', markersize=10, color=colors[i])
axi.axis('equal')
axi.axis('off')
sites_h = []
for i in range(3):
site_0 = Line2D([0], [0], marker='o', color='b', linestyle='--',fillstyle='full', markeredgecolor='red',
markeredgewidth=0.0, label=sites.site[i], markerfacecolor=color_list[i], markersize=15)
sites_h.append(site_0)
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Add Legend
leg = ax.legend(handles=[sites_h[0], sites_h[1], sites_h[2]], ncol=3, loc=loc_, fontsize="x-large")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.75)
# Add Strain Ellipse
if V is not 'off':
plt.draw()
lon, lat = lonc, latc
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
ax2 = fig.add_axes([(x_), (y_), 0.2, 0.2])
ax2.set_xlim([-1,1])
ax2.set_ylim([-1,1])
strain_viz.def_ellipse(self, V)
ax2.axis('equal')
ax2.axis('off')
p1 = ax.get_position()
p2 = ax2.get_position()
ax2.set_position([x_ - (p2.width/2 + shiftx), y_ - (p2.height/2 + shifty), p2.width, p2.height])
axn = fig.add_axes([(x_), (y_), 0.05, 0.05])
buffer = [patheffects.withStroke(linewidth=4, foreground="w")]
axn.text(0.5, 0.0,u'\u25B2 \nN ', ha='center', fontsize=35, family='Arial', path_effects=buffer, rotation = 0)
axn.axis('equal')
axn.axis('off')
p3 = ax.get_position()
p4 = axn.get_position()
axn.set_position([p3.x0 + (0.05*p3.x1), p3.y0 + (0.05*p3.y1), 0.05, 0.05])
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def symbol_map(self, **kwargs):
sites = self.strain_data
ax = kwargs.get('ax', None)
fig = kwargs.get('fig', None)
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
if ax is None:
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
figx = kwargs.get('figx', 15)
figy = kwargs.get('figy', 15)
fig = plt.figure(figsize=(figx, figy))
ax = fig.add_subplot(1, 1, 1, projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
ax.add_image(tiler, tiler_size, interpolation='spline36')
ax.set_aspect(1, 'datalim')
ax.gridlines(draw_labels=True)
plt.plot(sites.longitude, sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(end_sites.longitude, end_sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(sites.longitude, sites.latitude, color='black', linewidth=0, marker=',', transform=ccrs.PlateCarree(), label=sites.site, zorder=20)
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
my_dpi = fig.dpi
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree())
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * scale_loc[1]
tmc = ccrs.TransverseMercator(sbllx, sblly)
x0, x1, y0, y1 = ax.get_extent(tmc)
sbx = x0 + (x1 - x0) * scale_loc[0]
sby = y0 + (y1 - y0) * scale_loc[1]
sbxe = ((sbx + length * 500)/5)*2
sbxf = round(sbx - length * 500)
j = sbxf
k = 1
while k <= 5:
bar_xs = [j, j + sbxe]
if k % 2 == 0:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='w', linewidth=15, zorder=10)
else:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='k', linewidth=15, zorder=11)
j += sbxe
k += 1
buffer = [patheffects.withStroke(linewidth=2.5, foreground="w")]
hei_ = kwargs.get('hei_', 5)
ax.text(-1*sbxf, sby+(hei_*sby), str(length) + ' km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='left', verticalalignment='bottom')
ax.text(sbxf, sby+(hei_*sby), '0 km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='right', verticalalignment='bottom')
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'off')
for i in range(len(sites)):
plt.draw()
lon, lat = sites.longitude[i], sites.latitude[i]
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
axi = fig.add_axes([(x_ - (5/width)*0.5), (y_ - (5/height)*0.5), (5/width), (5/height)])
colors = color_list
scale_arrow = kwargs.get('scale_arrow', 40)
if arrows == 'show':
axi.quiver(sites['E velocity (mm/yr)'][i], sites['N velocity (mm/yr)'][i], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
axi.plot(0, 0, marker='o', markersize=10, color=colors[i])
axi.axis('equal')
axi.axis('off')
sites_h = []
for i in range(3):
site_0 = Line2D([0], [0], marker='o', color='b', linestyle='--',fillstyle='full', markeredgecolor='red',
markeredgewidth=0.0, label=sites.site[i], markerfacecolor=color_list[i], markersize=15)
sites_h.append(site_0)
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Add Legend
leg = ax.legend(handles=[sites_h[0], sites_h[1], sites_h[2]], ncol=3, loc=loc_, fontsize="x-large")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.75)
plt.draw()
# Add in the e1 and e2 symbols
e1 = kwargs.get('e1', None)
e2 = kwargs.get('e2', None)
#e_loc = kwargs.get('e_loc', 'lower left')
e_rot = kwargs.get('e_rot', 0)
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
# Add Map Symbol
if None not in (e1, e2):
plt.draw()
lon, lat = lonc, latc
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
ax2 = fig.add_axes([(x_), (y_), (5/width), (5/height)])
ax2.set_xlim([-1,1])
ax2.set_ylim([-1,1])
strain_viz.map_symbol(self, e1, e2, rot=e_rot, old_range=old_range, new_range_a=new_range_a, new_range_b=new_range_b, max_strain=max_strain, min_strain=min_strain, ax=ax2)
ax2.axis('equal')
#ax2.axis('off')
p1 = ax.get_position()
p2 = ax2.get_position()
ax2.set_position([x_ - (p2.width/2 + shiftx), y_ - (p2.height/2 + shifty), p2.width, p2.height])
ax2.autoscale(False)
plt.draw()
axn = fig.add_axes([(x_), (y_), 0.05, 0.05])
buffer = [patheffects.withStroke(linewidth=4, foreground="w")]
axn.text(0.5, 0.0,u'\u25B2 \nN ', ha='center', fontsize=35, family='Arial', path_effects=buffer, rotation = 0)
axn.axis('equal')
axn.axis('off')
p3 = ax.get_position()
p4 = axn.get_position()
axn.set_position([p3.x0 + (0.05*p3.x1), p3.y0 + (0.05*p3.y1), 0.05, 0.05])
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def scale_arrow(value, old_range, new_range):
tmin, tmax = old_range
xmin, xmax = new_range
percent = abs((value - tmin) / (tmax - tmin))
return ((xmax - xmin) * percent) + xmin
def scale_arrow_percent(value, old_range):
tmin, tmax = old_range
return abs((value - tmin) / (tmax - tmin))
def map_symbol(self, e1, e2, **kwargs):
# Add Figure to plot
ax = kwargs.get('ax', 'none')
rot = kwargs.get('rot', 0)
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
sz_e1 = strain_viz.scale_arrow(e1 * 10**9, old_range, new_range_a)
sz_e2 = strain_viz.scale_arrow(e2 * 10**9, old_range, new_range_a)
sz_e1_d = strain_viz.scale_arrow(e1 * 10**9, old_range, new_range_b)
sz_e2_d = strain_viz.scale_arrow(e2 * 10**9, old_range, new_range_b)
sz_p_e1 = strain_viz.scale_arrow(e1 * 10**9, [min_strain, max_strain], [0.2, 0.6])
sz_p_e2 = strain_viz.scale_arrow(e2 * 10**9, [min_strain, max_strain], [0.2, 0.6])
scale_arrow_percent_0 = strain_viz.scale_arrow(e1 * 10**9, [min_strain, max_strain], [0.2, 0.6])
boxstyle0_d = f"darrow,pad=%s" % (scale_arrow_percent_0)
scale_arrow_percent_1 = strain_viz.scale_arrow(e2 * 10**9, [min_strain, max_strain], [0.2, 0.6])
boxstyle1_d = f"darrow,pad=%s" % (scale_arrow_percent_1)
#scale_arrow_percent_1 = str(round(strain_viz.scale_arrow_percent(e2 * 10**9, old_range), 1))
#boxstyle1_l = f"larrow,pad=%s" % (scale_arrow_percent_1)
#boxstyle1_r = f"rarrow,pad=%s" % (scale_arrow_percent_1)
if ax == 'none':
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
if (e1 == 0) and (e2 < 0):
rot0 = mtrans.Affine2D().rotate_deg(rot)
x0, y0 = rot0.transform_point((0.0, sz_p_e2))
x1, y1 = rot0.transform_point((0.0, -sz_p_e2))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
elif (e1 > 0) and (e2 == 0):
bbox_props1 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.0, 0.0, sz_text1, ha="center", va="center", rotation=rot + 90,
size=sz_e1_d, color='w',
bbox=bbox_props1)
elif (e1 > 0) and (e2 > 0):
bbox_props2 = dict(boxstyle=boxstyle1_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.0, 0.0, sz_text1, ha="center", va="center", rotation=rot,
size=sz_e2_d, color='w',
bbox=bbox_props2)
sz_text0 = "---------------" + ('-' * int(20*float(scale_arrow_percent_0)))
bbox_props3 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
ax.text(0.0, 0.0, sz_text0, ha="center", va="center", rotation=rot+90,
size=sz_e1_d, color='w',
bbox=bbox_props3)
elif (e1 > 0) and (e2 < 0):
angle_phi = rot
l2 = np.array((5, 5))
trans_angle = plt.gca().transData.transform_angles(np.array((angle_phi,)),
l2.reshape((1, 2)))[0]
bbox_props = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
sz_text = "---------------" + ('-' * int(20*float(scale_arrow_percent_0)))
t = ax.text(0.0, 0.0, sz_text, ha="center", va="center",
size=sz_e1_d, color='w', rotation=trans_angle, bbox=bbox_props)
rot1 = mtrans.Affine2D().rotate_deg(angle_phi)
x0, y0 = rot1.transform_point((0.0, sz_p_e2))
x1, y1 = rot1.transform_point((0.0, -sz_p_e2))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
elif (e1 < 0) and (e2 < 0):
rot0 = mtrans.Affine2D().rotate_deg(rot)
x0, y0 = rot0.transform_point((0.0, sz_p_e2))
x1, y1 = rot0.transform_point((0.0, -sz_p_e2))
x2, y2 = rot0.transform_point((sz_p_e1, 0.0))
x3, y3 = rot0.transform_point((-sz_p_e1, 0.0))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x2, y2),
size=sz_e1, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x3, y3),
size=sz_e1, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
axis('off')
def symbol_map_full(self, **kwargs):
sites = self.strain_data
V = kwargs.get('V', None)
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'off')
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Get data for plot
e1 = kwargs.get('e1', None)
e2 = kwargs.get('e2', None)
e_loc = kwargs.get('e_loc', 'lower left')
e_rot = kwargs.get('e_rot', 0)
# Import Site data and find center
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
# Figure Size
fig = plt.figure(figsize=(20, 15), constrained_layout=False)
gs = gridspec.GridSpec(30, 40, figure=fig, wspace=0.0, hspace=0.0)
ax = fig.add_subplot(gs[:, 11:], projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
scale_arrow = kwargs.get('scale_arrow', 40)
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
hei_ = kwargs.get('hei_', 5)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
strain_viz.symbol_map(self, e1=e1, e2=e2, e_loc=e_loc, e_rot=e_rot, hei_=hei_, old_range=old_range, new_range_a=new_range_a,
new_range_b=new_range_b, max_strain=max_strain, min_strain=min_strain,
arrows=arrows, color_list=color_list, tiler_size=tiler_size, map_tile_type=map_tile_type,
scale_arrow=scale_arrow, length=length, scale_loc=scale_loc, loc_=loc_, ax=ax, fig=fig)
ax1 = fig.add_subplot(gs[27:30, 1:9])
image = kwargs.get('image', "https://www.unavco.org/education/resources/lib/images/unavco-logo-red-white-shadow.png")
strain_viz.unavco_logo(image=image, ax=ax1)
ax1_1 = fig.add_subplot(gs[:3, :10])
title_ = kwargs.get('title', "GPS Triangle-Strain Map\nUsing UNAVCO PBO Data")
fontsize_ = kwargs.get('fontsize', 24)
ha_ = kwargs.get('ha', 'center')
va_ = kwargs.get('va', 'top')
xy_ = kwargs.get('xy', (0.5, 0.925))
strain_viz.map_title(title=str(title_), xy=xy_, fontsize=fontsize_, ha=ha_, va=va_, ax=ax1_1)
ax2 = fig.add_subplot(gs[4:12, 1:9])
strain_viz.ellipse_subplot(self, V=V, ax=ax2)
ax3 = fig.add_subplot(gs[13:18, :10])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
old_range = kwargs.get('old_range', [0.1, 300])
strain_viz.contraction(old_range=old_range, max_strain=max_strain, min_strain=min_strain, ax=ax3)
ax4 = fig.add_subplot(gs[20:25, :10])
strain_viz.elongation(old_range=old_range, max_strain=max_strain, min_strain=min_strain, ax=ax4)
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def strain_map_full(self, **kwargs):
sites = self.strain_data
V = kwargs.get('V', None)
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'show')
size = kwargs.get('size', 10)
label = kwargs.get('label', '10 mm/yr')
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Import Site data and find center
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
# Figure Size
fig = plt.figure(figsize=(15, 20), constrained_layout=False)
gs = gridspec.GridSpec(40, 30, figure=fig)
ax = fig.add_subplot(gs[:30, :], projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
scale_arrow = kwargs.get('scale_arrow', 40)
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
hei_ = kwargs.get('hei_', 5)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
strain_viz.ellipse_plot(self, V=V, arrows=arrows, color_list=color_list, tiler_size=tiler_size, map_tile_type=map_tile_type,
hei_=hei_, scale_arrow=scale_arrow, length=length, scale_loc=scale_loc, loc_=loc_, ax=ax, fig=fig)
fig.canvas.draw()
ax1 = fig.add_subplot(gs[30:34, 23:])
image = kwargs.get('image', "https://www.unavco.org/education/resources/lib/images/unavco-logo-red-white-shadow.png")
strain_viz.unavco_logo(image=image, ax=ax1)
ax1_1 = fig.add_subplot(gs[31:33, :23])
title_ = kwargs.get('title', "GPS Triangle-Strain Map Using UNAVCO PBO Data")
fontsize_ = kwargs.get('fontsize', 24)
ha_ = kwargs.get('ha', 'left')
va_ = kwargs.get('va', 'center')
strain_viz.map_title(title=str(title_), fontsize=fontsize_, ha=ha_, va=va_, ax=ax1_1)
ax2 = fig.add_subplot(gs[30:40, 10:24])
strain_viz.quiver_legend(self, sites=sites, size=size, label=label, scale_arrow=scale_arrow, ax=ax2)
ax3 = fig.add_subplot(gs[33:37, 1:10])
strain_viz.strain_legend(ax=ax3)
ax4 = fig.add_subplot(gs[38:, :])
strain_viz.table_data(self, sites=sites, ax=ax4)
ax5 = fig.add_subplot(gs[34:36, 21:])
strain_viz.speed_data(self, sites=sites, ax=ax5)
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def unavco_logo(**kwargs):
im_read = kwargs.get('image', "https://www.unavco.org/education/resources/lib/images/unavco-logo-red-white-shadow.png")
a = plt.imread(im_read)
plt.imshow(a, aspect='equal')
axis('off')
def map_title(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(5, 1.5))
ax = fig.add_subplot(1, 1, 1)
title_ = kwargs.get('title', "GPS Triangle-Strain Map Using UNAVCO PBO Data")
fontsize_ = kwargs.get('fontsize', 20)
ha_ = kwargs.get('ha', 'center')
va_ = kwargs.get('va', 'top')
xy_ = kwargs.get('xy', (0.0, 0.5))
ax.annotate(str(title_), xy=xy_, va=va_, ha=ha_, fontsize=fontsize_)
ax.axis('off')
def ellipse_subplot(self, V, **kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
strain_viz.def_ellipse(self, V)
ax.set_title("Infinitesimal Strain Ellipse", x=0.5, y=1.05, fontsize=16, fontweight='light')
sites_h = []
colors = ['tomato', 'cornflowerblue']
strain_ = ['$S_{1H}$', '$S_{2H}$']
for i in range(2):
site_0 = Line2D([0], [0], color=colors[i], linestyle='-', linewidth=1.5, fillstyle='full', label=strain_[i])
sites_h.append(site_0)
leg = ax.legend(handles=[sites_h[0], sites_h[1]], ncol=2, loc='upper center', bbox_to_anchor=(0.5, 1.1), fontsize="x-large", frameon=False)
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.5)
ax.axis('off')
def contraction(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(5, 2.5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
rot0 = mtrans.Affine2D().rotate_deg(0)
x0, y0 = rot0.transform_point((0.35, -strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [0.25, 0.75]) + 0.75))
x1, y1 = rot0.transform_point((-0.35, -strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [0.25, 0.75]) + 0.5))
sz_e1 = strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [40, 80])
sz_e2 = strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [40, 80])
x = np.array([-0.35, 0.35])
y_1 = np.array([0.48, 0.73])
y_2 = np.array([y1+0.01, y0+0.01])
plt.plot((-0.35, 0.35), (0.48, 0.73), color='slategrey', linewidth=1, linestyle='--', marker=',')
plt.plot((-0.35, 0.35), (y1+0.01, y0+0.01), color='slategrey', linewidth=1, linestyle='--', marker=',')
plt.fill_between(x, y_1, y_2, where=(y_1 > y_2), color='slategrey', alpha=0.15, interpolate=True)
ax.annotate("",
xy=(0.35, 0.75),
xytext=(x0, y0), textcoords='data',
size=sz_e1, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2)
)
ax.annotate("",
xy=(-0.35,0.5),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2)
)
ax.annotate("Infinitesimal Strain (Contraction)", xy=(0.0, 0.9), xycoords="data",
va="top", ha="center", fontsize=16)
ax.annotate("%s\nnano-strain" % (min_strain), xy=(-0.75, 0.3), xycoords="data",
va="center", ha="center", fontsize=12)
ax.annotate("%s\nnano-strain" % (max_strain), xy=(0.75, 0.3), xycoords="data",
va="center", ha="center", fontsize=12)
ax.set_xlim([-1,1])
ax.set_ylim([0,1])
ax.axis('off')
def elongation(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(5, 2.5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
scale_arrow_percent_0 = strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [0.2, 0.6])
boxstyle0_d = f"darrow,pad=%s" % (scale_arrow_percent_0)
scale_arrow_percent_1 = strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [0.2, 0.6])
boxstyle1_d = f"darrow,pad=%s" % (scale_arrow_percent_1)
sz_e1_d = strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [10, 15])
sz_e2_d = strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [10, 15])
x = np.array([0.85, 0.35, -0.21, -0.32])
y_2 = np.array([scale_arrow_percent_1+0.1, scale_arrow_percent_0 + 0.0975, 0.65, 0.125])
ax.fill(x, y_2, color='slategrey', alpha=0.15)
plt.plot((-0.21, -0.32), (0.65, 0.125), color='slategrey', linewidth=1, linestyle='--', marker=',')
plt.plot((0.8, 0.35), (scale_arrow_percent_1+0.15, scale_arrow_percent_0 + 0.0975), color='slategrey', linewidth=1, linestyle='--', marker=',')
bbox_props2 = dict(boxstyle=boxstyle1_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.1, 0.68, sz_text1, ha="center", va="top", rotation=0,
size=sz_e2_d, color='w', bbox=bbox_props2)
sz_text0 = "---------------" + ('-' * int(20*float(scale_arrow_percent_0)))
bbox_props3 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
ax.text(0.35, 0.2, sz_text0, ha="center", va="top", rotation=0,
size=sz_e1_d, color='w', bbox=bbox_props3)
ax.annotate("Infinitesimal Strain (Elongation)", xy=(0.0, 0.925), xycoords="data",
va="top", ha="center", fontsize=16, fontweight='book')
ax.annotate("%s\nnano-strain" % (min_strain), xy=(-0.65, 0.63), xycoords="data",
va="center", ha="center", fontsize=12)
ax.annotate("%s\nnano-strain" % (max_strain), xy=(-0.65, 0.05), xycoords="data",
va="center", ha="center", fontsize=12)
bboxprops = dict(boxstyle="round,pad=1", facecolor='white', edgecolor='black', lw=3)
ax.annotate("", xy=(-0.65, 0.05), xycoords="data",
va="center", ha="center", fontsize=12, bbox=bboxprops)
ax.set_xlim([-1,1])
ax.set_ylim([0, 1])
ax.axis('off')
def table_data(self, sites, **kwargs):
ax = kwargs.get('ax', None)
fontsize = kwargs.get('fontsize', 11.25)
scale = kwargs.get('fontsize', 1.75)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
table = ax.table(cellText=sites.round(6).values, colLabels=sites.columns, cellLoc='center', rowLoc='center',loc='center')
table.auto_set_font_size(False)
table.set_fontsize(fontsize)
table.scale(1, scale)
ax.axis('off')
def speed_data(self, sites, **kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
sites_t = sites.copy().drop(['E uncertainty (mm/yr)', 'N uncertainty (mm/yr)'], axis=1)
sites_t.columns = ['sites', 'longitude', 'latitude', 'east_v', 'north_v']
sites_t['Speed (mm/yr)'] = sites_t[['east_v', 'north_v']].apply(lambda x: np.sqrt((x.east_v**2)+(x.north_v**2)), axis=1)
sites_t = sites_t[['sites', 'Speed (mm/yr)']]
table = ax.table(cellText=sites_t.round(6).values, colLabels=sites_t.columns, cellLoc='center', rowLoc='center',loc='center')
table.auto_set_font_size(False)
table.set_fontsize(11.25)
table.scale(1, 1.75)
ax.axis('off')
def quiver_legend(self, sites, **kwargs):
ax = kwargs.get('ax', None)
size = kwargs.get('size', 10)
label = kwargs.get('label', '10 mm/yr')
scale_arrow = kwargs.get('scale_arrow', 40)
if ax is None:
#fig_ = plt.figure(figsize=(5, 5))
fig_ = plt.figure()
ax = fig_.add_subplot(1, 1, 1)
Q = ax.quiver(sites['E velocity (mm/yr)'], sites['N velocity (mm/yr)'], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
ax.clear()
p_fancy = FancyBboxPatch((0.115, 0.415),
0.59, 0.17,
boxstyle="square,pad=0.05",
fc='w', ec='k', lw=1, alpha=0.25)
ax.add_patch(p_fancy)
annotate("Velocity Relative to SNARF", xy=(0.4, 0.6), xycoords="data",
va="top", ha="center", fontsize=14, fontweight='book')
ax.quiverkey(Q, 0.45, 0.45, size, label, labelpos='E', fontproperties=dict(size=12.5), labelsep=0.2,
coordinates='axes')
ax.axis('off')
def strain_legend(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
sites_h = []
colors = ['tomato', 'cornflowerblue']
strain_ = ['$S_{1H}$', '$S_{2H}$']
for i in range(2):
site_0 = Line2D([0], [0], color=colors[i], linestyle='-', linewidth=1.5, fillstyle='full', label=strain_[i])
sites_h.append(site_0)
site_1 = Line2D([0], [0], marker='$\u25CC$', color='w', linestyle='--', markeredgecolor='slategrey',
markeredgewidth=0.5, label='Initial State', markerfacecolor='slategrey', markersize=20)
site_2 = Line2D([0], [0], marker='o', color='w', linestyle='--', markeredgecolor='k',
markeredgewidth=1.1, label='Strain Ellipse', markerfacecolor='w', markersize=18)
leg = ax.legend(handles=[sites_h[0], site_1, sites_h[1], site_2], ncol=2, loc='center', fontsize="x-large", frameon=True, title="Strain Ellipse Legend")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.5)
plt.setp(leg.get_title(),fontsize=14)
ax.axis('off') | GPS_Strain/GPS_Strain.py |
# Import Packages
import pandas as pd
import numpy as np
import math
from pylab import *
from scipy import linalg as la
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.transforms as mtrans
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.gridspec as gridspec
from matplotlib.patches import FancyBboxPatch
from matplotlib import patheffects
plt.rcParams['font.family'] = ["Georgia"]
class unavco_data:
def __init__(self, **kwargs):
self.start_time = kwargs.get('start_time', '')
self.end_time = kwargs.get('end_time', '')
def get_stations(self, minlon, maxlon, minlat, maxlat):
# Returns a pandas dataframe with all sites within a specific set of coordinates
import requests, io
url_ = "https://web-services.unavco.org/gps/metadata/sites/v1?minlatitude="
coordinates = str(minlat) + "&maxlatitude=" + str(maxlat) + "&minlongitude=" + str(minlon) + "&maxlongitude=" + str(maxlon)
srt_ = "&starttime=" + str(self.start_time)
end_ = "&endtime=" + str(self.end_time)
full_url = url_ + coordinates + "&summary=true"
urlData = requests.get(full_url).content
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
return rawData
def site_data(self, sites, **kwargs):
# Generates a pandas dataframe with all of the site information
period = kwargs.get('period', 365)
from dateutil.relativedelta import relativedelta
from dateutil.parser import parse
data_ = []
for i in range(3):
location = sites[i]
file = "https://web-services.unavco.org/gps/data/position/"
start_ = "/v3?analysisCenter=cwu&referenceFrame=nam14&starttime=" + str(self.start_time)
end_ = "&endtime=" + str(self.end_time)
query = "&report=long&dataPostProcessing=Cleaned&refCoordOption=first_epoch"
data_loop = pd.read_csv(file + location + start_ + end_ + query, skiprows=[i for i in range(0,8)])
site = location
lon = data_loop.head(1)[' E longitude'][0] - 360
lat = data_loop.head(1)[' N latitude'][0]
difference_in_years = relativedelta(parse(max(data_loop['Datetime'])), parse(min(data_loop['Datetime']))).years
difference_in_years = difference_in_years if difference_in_years > 0 else 1
e_vel = (data_loop[' delta E'].mean() / difference_in_years) * 1000
e_unc = 0.01
n_vel = (data_loop[' delta N'].mean() / difference_in_years) * 1000
n_unc = 0.01
data_.append(dict(zip(['site', 'longitude', 'latitude', 'E velocity (mm/yr)', 'E uncertainty (mm/yr)',
'N velocity (mm/yr)', 'N uncertainty (mm/yr)'],
[site, lon, lat, e_vel, e_unc, n_vel, n_unc])))
data_df = pd.DataFrame(data_)
return data_df
class strain_data:
def __init__(self, data_unav):
self.data_unav = data_unav
class computation:
pass
def output_data(self, **kwargs):
# read in computation for easier data retrieval
computation = strain_data.computation()
# Primary output for the sites
data_df = self.data_unav
pwr = kwargs.get('pwr', 7)
# Convert to radians
l_rads = data_df[['site', 'longitude', 'latitude']].copy()
l_rads['longitude'] = l_rads['longitude'].apply(lambda x: x * (math.pi/180))
l_rads['latitude'] = l_rads['latitude'].apply(lambda x: x * (math.pi/180))
computation.l_rads = l_rads
# Determine UTM Zone
utm_z = data_df[['site', 'longitude']].copy()
utm_z['UTM_Zone'] = utm_z['longitude'].apply(lambda x: (x + 180)/6)
def utm_zone(x):
if x - int(x) > 0:
return int(x) + 1
else:
return int(x)
utm_z['UTM_Zone'] = utm_z['UTM_Zone'].apply(lambda x: utm_zone(x))
utm_z = utm_z[['site', 'UTM_Zone']]
computation.utm_z = utm_z
# Central Meridian of Zone (long0)
cm_long0 = utm_z.copy()
cm_long0['long0'] = cm_long0['UTM_Zone'].apply(lambda x: -183 + (6 * x))
cm_long0 = cm_long0[['site', 'long0']]
computation.cm_long0 = cm_long0
# Central Meridian of Zone (long0) in radians
cm_long0_r = cm_long0.copy()
cm_long0_r['long0_r'] = cm_long0_r['long0'].apply(lambda x: x * math.pi/180)
cm_long0_r = cm_long0_r[['site', 'long0_r']]
computation.cm_long0_r = cm_long0_r
# Central meridian of zone to the west (the 'pseudo' zone)
def cm_west(x):
if x == -177:
return 177
else:
return x - 6
p_z = cm_long0.copy()
p_z['cm_pseudo_zone'] = cm_long0['long0'].apply(lambda x: cm_west(x))
p_z = p_z[['site', 'cm_pseudo_zone']]
computation.p_z = p_z
# Central meridian of zone to the west (the 'pseudo' zone) in radians
p_z_r = p_z.copy()
p_z_r['cm_pseudo_zone_r'] = p_z_r['cm_pseudo_zone'].apply(lambda x: x * math.pi/180)
p_z_r = p_z_r[['site', 'cm_pseudo_zone_r']]
computation.p_z_r = p_z_r
# UTM 'pseudo' zone
utm_p_z = p_z.copy()
utm_p_z['UTM_Pseudo_Zone'] = utm_p_z['cm_pseudo_zone'].apply(lambda x: (x + 180)/6)
utm_p_z['UTM_Pseudo_Zone'] = utm_p_z['UTM_Pseudo_Zone'].apply(lambda x: utm_zone(x))
computation.utm_p_z = utm_p_z
# WGS84 datum
a_wgs84 = 6378137
b_wgs84 = 6356752.3142
computation.a_wgs84 = a_wgs84
computation.b_wgs84 = b_wgs84
# Calculate key components
k0 = 0.9996
computation.k0 = k0
e = math.sqrt(1-b_wgs84**2/a_wgs84**2)
computation.e = e
e_2 = ((e * a_wgs84)/b_wgs84)**2
computation.e_2 = e_2
n = (a_wgs84 - b_wgs84)/(a_wgs84 + b_wgs84)
computation.n = n
# Calculate rho
def calc_rho(x, e_, a_):
p_1 = a_*(1-e_**2)
p_2 = (1-(e_**2 * math.sin(x)**2))**(3/2)
return p_1/p_2
rho = l_rads[['site', 'latitude']].copy()
rho['$\rho$'] = rho['latitude'].apply(lambda x: calc_rho(x, e, a_wgs84))
rho = rho[['site', '$\rho$']]
computation.rho = rho
# Calculate nu
def calc_nu(x, e_, a_):
p_1 = a_
p_2 = math.sqrt(1-(e_**2 * math.sin(x)**2))
return p_1/p_2
nu = l_rads[['site', 'latitude']].copy()
nu['$\nu$'] = nu['latitude'].apply(lambda x: calc_nu(x, e, a_wgs84))
nu = nu[['site', '$\nu$']]
computation.nu = nu
# Calculate p
p_0 = l_rads[['site', 'longitude']].copy()
p_1 = cm_long0_r.copy()
p_merge = p_0.merge(p_1, on='site')
p_merge['p'] = p_merge.longitude - p_merge.long0_r
p = p_merge[['site', 'p']]
computation.p = p
# Calculate pseudo p
p_p0 = l_rads[['site', 'longitude']].copy()
p_p1 = p_z.copy()
p_p2 = p_z_r.copy()
p_p_m = p_p0.merge(p_p1, on='site').merge(p_p2, on='site')
computation.p_p_m = p_p_m
def pseudo_p(x, y, z):
if y == 177:
return abs(x) - z
else:
return x - z
p_p_m['pseudo_p'] = p_p_m.apply(lambda x: pseudo_p(x.longitude, x.cm_pseudo_zone, x.cm_pseudo_zone_r), axis=1)
pseudo_p = p_p_m[['site', 'pseudo_p']]
computation.pseudo_p = pseudo_p
# Matrix Components
def mat_comps(x, e, m):
if m == 'm1':
return x*(1-((e**2)/4)-((3*(e**4))/64)-((5*(e**6))/256))
elif m == 'm2':
return math.sin(2*x)*(((3*(e**2))/8)+((3*(e**4))/32)+((45*(e**6))/1024))
elif m == 'm3':
return math.sin(4*x)*(((15*(e**4))/256)+((45*(e**6))/1024))
else:
return math.sin(6*x)*((35*(e**6))/3072)
def m_comp(m):
m_ = l_rads[['site', 'latitude']].copy()
i = 0
while i < len(m):
m_[m[i]] = m_['latitude'].apply(lambda x: mat_comps(x, e, m[i]))
i+=1
return m_
m_comps = m_comp(m=['m1', 'm2', 'm3', 'm4'])
computation.m_comps = m_comps
# Calculate M
def calc_M(x0, x1, x2, x3, a):
eq_ = (x0 - x1 + x2 - x3)
return a*eq_
M = m_comps.copy()
M['M'] = M.apply(lambda x: calc_M(x.m1, x.m2, x.m3, x.m4, a_wgs84), axis=1)
M = M[['site', 'M']]
computation.M = M
# Calculate the K components
def k_comps(x, M, nu, k0, e_2, k):
if k == 'K1':
return M*k0
elif k == 'K2':
return k0*nu*math.sin(2*x)/4
elif k == 'K3':
return (k0*nu*math.sin(x)*((math.cos(x))**3)/24)*(5-((math.tan(x))**2)+(9*e_2*((math.cos(x))**2))+(4*(e_2**2)*((math.cos(x))**4)))
elif k == 'K4':
return k0*nu*math.cos(x)
else:
return (k0*nu*((math.cos(x))**3)/6)*(1-((math.tan(x))**2)+(e_2*((math.cos(x))**2)))
def k_comp(k):
k_0 = l_rads[['site', 'latitude']].copy()
k_1 = M.copy()
k_2 = nu.copy()
k_ = k_0.merge(k_1, on='site').merge(k_2, on='site')
i = 0
while i < len(k):
k_[k[i]] = k_.apply(lambda x: k_comps(x.latitude, x.M, x['$\nu$'], k0, e_2, k[i]), axis=1)
i+=1
k_ = k_[['site'] + k]
return k_
k_c = k_comp(k=['K1', 'K2', 'K3', 'K4', 'K5'])
computation.k_c = k_c
# True Northing and Easting
def t_ne(K1, K2, K3, K4, K5, p, ne):
if ne == 'northing':
return K1+(K2*(p**2))+(K3*(p**4))
else:
return 500000+(K4*p)+(K5*(p**3))
t_n_0 = k_c.merge(p, on='site')
computation.t_n_0 = t_n_0
t_n_0['true_northing'] = t_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.p, 'northing'), axis = 1)
t_n_0['true_easting'] = t_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.p, 'easting'), axis = 1)
t_n_e = t_n_0[['site', 'true_northing', 'true_easting']]
computation.t_n_e = t_n_e
# Pseudo Northing and Easting
p_n_0 = k_c.merge(pseudo_p, on='site')
p_n_0['pseudo_northing'] = p_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.pseudo_p, 'northing'), axis = 1)
p_n_0['pseudo_easting'] = p_n_0.apply(lambda x: t_ne(x.K1, x.K2, x.K3, x.K4, x.K5, x.pseudo_p, 'easting'), axis = 1)
p_n_e = p_n_0[['site', 'pseudo_northing', 'pseudo_easting']]
computation.p_n_e = p_n_e
# Westernmost Zone
def w_z_():
if np.std(utm_z.UTM_Zone) > 5:
return 60
else:
return (np.sum(utm_z.UTM_Zone)/3)//1
w_z = w_z_()
w_z_avg = (np.sum(utm_z.UTM_Zone)/3)
w_z_std = np.std(utm_z.UTM_Zone)
computation.w_z = w_z
computation.w_z_avg = w_z_avg
computation.w_z_std = w_z_std
# UTM coordinates relative to the westernmost zone, to be used in strain analysis
def utm_w_z(x, w, t, p):
if x == w:
return t
else:
return p
utm_0 = utm_z.copy()
utm_1 = t_n_e.copy()
utm_2 = p_n_e.copy()
utm_w = utm_0.merge(utm_1, on='site').merge(utm_2, on='site')
utm_w['UTM_w_z_easting'] = utm_w.apply(lambda x: utm_w_z(x.UTM_Zone, w_z, x.true_easting, x.pseudo_easting), axis=1)
utm_w['UTM_w_z_northing'] = utm_w.apply(lambda x: utm_w_z(x.UTM_Zone, w_z, x.true_northing, x.pseudo_northing), axis=1)
utm_w = utm_w[['site', 'UTM_w_z_easting', 'UTM_w_z_northing']]
computation.utm_w = utm_w
# Center of Triangle
mean_n = utm_w.UTM_w_z_northing.mean()
mean_e = utm_w.UTM_w_z_easting.mean()
computation.mean_n = mean_n
computation.mean_e = mean_e
# Revised Locations
sites_r = utm_w.copy()
sites_r['revised_easting'] = sites_r['UTM_w_z_easting'].apply(lambda x: x - mean_e)
sites_r['revised_northing'] = sites_r['UTM_w_z_northing'].apply(lambda x: x - mean_n)
sites_r = sites_r[['site', 'revised_easting', 'revised_northing']]
computation.sites_r = sites_r
# Velocities converted from mm/yr to m/yr
vel_m = self.data_unav.copy().drop(['longitude', 'latitude'], axis=1)
vel_m['E velocity (m/yr)'] = vel_m['E velocity (mm/yr)'].apply(lambda x: x * 0.001)
vel_m['E uncertainty (m/yr)'] = vel_m['E uncertainty (mm/yr)'].apply(lambda x: x * 0.001)
vel_m['N velocity (m/yr)'] = vel_m['N velocity (mm/yr)'].apply(lambda x: x * 0.001)
vel_m['N uncertainty (m/yr)'] = vel_m['N uncertainty (mm/yr)'].apply(lambda x: x * 0.001)
vel_m = vel_m.drop(['E velocity (mm/yr)', 'E uncertainty (mm/yr)', 'N velocity (mm/yr)', 'N uncertainty (mm/yr)'], axis=1)
computation.vel_m = vel_m
# Matrix 1
M1 = np.array([[sites_r.revised_easting], [sites_r.revised_northing]]).transpose()
computation.M1 = M1
# Matrix 2
def mat2(x):
mat_2 = pd.DataFrame()
for i in range(3):
x = sites_r.revised_easting[i]
y = sites_r.revised_northing[i]
list_s = [pd.Series(np.array([1, 0, (-1 * y), x, y, 0]).transpose()), pd.Series(np.array([0, 1, x, 0, x, y]).transpose())]
mat_2 = mat_2.append(list_s, ignore_index=True)
continue
return mat_2
M2 = mat2(x=sites_r)
computation.M2 = np.array(M2)
# Matrix 3
M3 = la.inv(M2)
M3 = pd.DataFrame(M3)
computation.M3 = np.array(M3)
# Matrix 4
M4_ = pd.concat([vel_m['E velocity (m/yr)'], vel_m['N velocity (m/yr)']]).sort_index()
M4 = np.array(M4_)[np.newaxis].T
computation.M4 = M4
# Matrix 5
M5 = np.matrix(M3).dot(np.matrix(M4))
computation.M5 = M5
# North Unit Vector
n_v_unit = [0, 1]
computation.n_v_unit = n_v_unit
# Translation Vector
t_v = [float(M5[0]), float(M5[1])]
computation.t_v = t_v
# Magnitude of translation vector, or speed (m/yr)
t_v_s = np.sqrt((t_v[0]**2)+(t_v[1]**2))
computation.t_v_s = t_v_s
# Unit Translation Vector
t_v_unit = [(t_v[0]/t_v_s), (t_v[1]/t_v_s)]
computation.t_v_unit = t_v_unit
# Angle between north vector and unit trans vector
n_t_a = math.acos((t_v_unit[0]*n_v_unit[0])+(t_v_unit[1]*n_v_unit[1]))*(180/math.pi)
computation.n_t_a = n_t_a
# Azimuth of trans vect (degrees clockwise from north)
def trans_azi(x, y):
if x < 0:
return 360 - y
else:
return y
t_v_azi = trans_azi(t_v[0], n_t_a)
computation.t_v_azi = t_v_azi
# Matrix M6
M6 = np.array([[M5[-3], M5[-2]], [M5[-2], M5[-1]]])
computation.M6 = M6
# Eigen System
def eigen_s(x0, x1, x2, x3):
ev_0 = x0 + x3
ev_1 = 4 * x1 * x2
ev_2 = (x0 - x3)**2
ev_3 = np.sqrt(ev_1 + ev_2)
ev_a = (ev_0 + ev_3) / 2
ev_b = (ev_0 - ev_3) / 2
eigen = [ev_a, ev_b]
return eigen
e_s = eigen_s(float(M6[0][0]), float(M6[0][1]), float(M6[1][0]), float(M6[1][1]))
computation.e_s = e_s
# Calculate e1 and e2
def det_e(e_sys):
if e_sys[0] > e_sys[1]:
return [e_sys[0], e_sys[1]]
else:
return [e_sys[1], e_sys[0]]
e1_2 = det_e(e_s)
computation.e1_2 = e1_2
# Calculate e1 and e2 unit eigenvectors
def unit_eigen(x, y, z):
x_c = 1/np.sqrt(1+((x-y)/z)**2)
y_c = ((x-y)/z)/np.sqrt(1+((x-y)/z)**2)
return [x_c, y_c]
e1_unit = unit_eigen(e1_2[0], float(M6[0][0]), float(M6[0][1]))
computation.e1_unit = e1_unit
e2_unit = unit_eigen(e1_2[1], float(M6[0][0]), float(M6[0][1]))
computation.e2_unit = e2_unit
# Angle between north vector and e1/e2 unit eigenvectors (Degrees)
def find_angle(w, x, y, z):
return math.acos((w*x)+(y*z))*(180/math.pi)
nv_e1 = find_angle(e1_unit[0], n_v_unit[0], e1_unit[1], n_v_unit[1])
computation.nv_e1 = nv_e1
nv_e2 = find_angle(e2_unit[0], n_v_unit[0], e2_unit[1], n_v_unit[1])
computation.nv_e2 = nv_e2
# Azimuth of e1/e2 unit eigenvectors
def az_e(x, y):
if x < 0:
return 360 - y
else:
return y
e1_azi = az_e(e1_unit[0], nv_e1)
computation.e1_azi = e1_azi
e2_azi = az_e(e2_unit[0], nv_e2)
computation.e2_azi = e2_azi
# Alternate Azimuth of e1/e2 unit eigenvectors
def a_az_e(x):
if x < 180:
return x + 180
else:
return x - 180
e1_azi_a = a_az_e(e1_azi)
computation.e1_azi_a = e1_azi_a
e2_azi_a = a_az_e(e2_azi)
computation.e2_azi_a = e2_azi_a
# Maximum infinitesimal shear strain
mis_strain = 2 * np.sqrt(((float(M6[0][0]) - float(M6[1][1])) / 2)**2 + (float(M6[0][1])**2))
computation.mis_strain = mis_strain
# Area Strain
a_strain = e1_2[0] + e1_2[1]
computation.a_strain = a_strain
# Invariants of the infinitesimal strain rate tensor
inv_0 = a_strain
computation.inv_0 = inv_0
inv_1 = e1_2[0] * e1_2[1]
computation.inv_1 = inv_1
inv_2 = inv_1
computation.inv_2 = inv_2
# Matrix 7
def m7(x, y):
v = pd.concat([x, y]).sort_index()
v = np.array(list(v.apply(lambda x: 1 / (x**2))))
return np.diag(v)
M7 = pd.DataFrame(m7(vel_m['E uncertainty (m/yr)'], vel_m['N uncertainty (m/yr)']))
computation.M7 = np.array(M7)
# Matrix 8
M8 = M2.T
computation.M8 = M8
# Matrix (m9.1 = m7 dot m2)
M9_1 = M7.dot(M2)
computation.M9_1 = M9_1
# Matrix (m9.2 = m8 dot m9.1)
M9_2 = M8.dot(M9_1)
computation.M9_2 = M9_2
# Matrix 9
M9 = la.inv(M9_2)
computation.M9 = M9
# Primary Data Output
fields_ = ['E component ± uncert [m/yr]', 'N component ± uncert [m/yr]', 'Azimuth [degrees]',
'Speed [m/yr]', 'Rotation ± uncertainty [degrees/yr]', 'Rotation ± uncertainty [nano-rad/yr]', 'Direction of rotation',
'Max horizontal extension (e1H) [nano-strain]', 'Azimuth of S1H [degrees]', 'Min horizontal extension (e2H) [nano-strain]',
'Azimuth of S2H [degrees]', 'Max shear strain [nano-strain]', 'Area strain [nano-strain]']
data_1 = str(round(float(M5[0]), 4)) + ' $\pm$ ' + str(round(float(M9[0][0]), 12))
data_2 = str(round(float(M5[1]), 4)) + ' $\pm$ ' + str(round(float(M9[1][1]), 12))
data_3 = str(round(float(M5[2]) * (180 / math.pi), 10)) + ' $\pm$ ' + str(round(np.sqrt(float(M9[2][2])) * (180 / math.pi), 12))
data_4 = str(round(float(M5[2]) * (10**9), 4)) + ' $\pm$ ' + str(round(np.sqrt(float(M9[2][2])) * (10**9), 4))
data_5 = 'Clockwise' if (float(M5[2]) * (10**9)) < 0 else 'Anti-Clockwise'
data_6 = str(round(float(e1_2[0]) * (10**9), 4))
data_7 = str(round(e1_azi, 4)) + ' or ' + str(round(e1_azi_a, 4))
data_8 = str(round(float(e1_2[1]) * (10**9), 4))
data_9 = str(round(e2_azi, 4)) + ' or ' + str(round(e2_azi_a, 4))
values_ = [data_1, data_2, str(round(t_v_azi, 4)), str(round(t_v_s, 4)), data_3, data_4, data_5,
data_6, data_7, data_8, data_9, str(round(mis_strain*(10**9), 4)), str(round(a_strain*(10**9), 4))]
primary = pd.DataFrame(values_, index=fields_)
primary.columns = ['Translation Vector']
computation.primary_data = primary
# Calculate the strain ellipse
stretch = np.array([[float(M5[3]), 0], [0, float(M5[5])]])
computation.stretch = stretch
shear = np.array([[0, float(M5[4])/2], [float(M5[4])/2, 0]])
computation.shear = shear
theta = float(M5[2]) * (180/math.pi)
rotation = array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
computation.rotation_tensor = rotation
S = (stretch + shear)
computation.stretch_tensor = S
R = rotation
F_ = R.dot(S) * 10**6 + np.array([[1, 0], [0, 1]])
F = dot(F_, F_.T)
computation.deformation_matrix = F
B = F @ F.T
C = F.T @ F
V = la.sqrtm(B)
computation.left_stretch_tensor = V
U = la.sqrtm(C)
computation.right_stretch_tensor = U
R_r = la.inv(V) @ F
R_l = F @ la.inv(U)
return computation
class strain_viz:
# Some of the Python Functions are adaptations of <NAME>'s GitHub repository
def __init__(self, strain_data):
self.strain_data = strain_data
def def_ellipse(self, V):
# Draw strain ellipse from deformation gradient
theta = linspace(0, 2*pi, 180)
xc, yc = cos(theta), sin(theta)
x,y = dot(V, [xc,yc])
plt.plot(xc, yc, 'slategrey', x, y, lw=2, linestyle='--')
plt.fill(xc, yc, 'w', alpha=0.45)
u, s, v = svd(V)
plt.plot(x, y, 'k', lw=2, zorder=40)
plt.quiver(zeros(2), zeros(2),
hstack((s*u[0],-s*u[0])), hstack((s*u[1],-s*u[1])),
scale=1, units='xy', color=['tomato', 'cornflowerblue'],
width=0.065, headaxislength=0, headlength=0, zorder=30)
plt.quiver(zeros(2), zeros(2),
hstack((1,0)), hstack((0,1)),
scale=1, units='xy', color=['tomato', 'cornflowerblue'],
width=0.065, linestyle='dashed', alpha=0.25, headaxislength=0, headlength=0, zorder=10)
plt.quiver(zeros(2), zeros(2),
hstack((-1,0)), hstack((0,-1)),
scale=1, units='xy', color=['tomato', 'cornflowerblue'],
width=0.065, linestyle='dashed', alpha=0.25, headaxislength=0, headlength=0, zorder=10)
axis('equal')
axis('off')
def def_field(self, V, **kwargs):
# Visualize displacement field from
# displacement gradient
alpha_ = kwargs.get('alpha', '1')
F = asarray(V)
J = F - eye(2)
X, Y = meshgrid(linspace(-3, 3, 21),
linspace(-2, 2, 17))
u, v = tensordot(J, [X, Y], axes=1)
plt.quiver(X, Y, u, v, angles='xy', color='black', alpha=alpha_)
axis('off')
def get_center(sites_):
# Locate the center of the triangle
lonc = sites_.longitude.sum() / 3
latc = sites_.latitude.sum() /3
if lonc < -180:
lonc = lonc + 360
elif lonc > 180:
lonc = lonc - 360
return lonc, latc
def end_df(sites_):
sites = sites_
first_site = pd.DataFrame(sites.head(1))
last_site = pd.DataFrame(sites.tail(1))
end_sites = pd.concat([first_site, last_site]).reset_index(drop=True)
return end_sites
def ellipse_plot(self, **kwargs):
sites = self.strain_data
V = kwargs.get('V', 'off')
ax = kwargs.get('ax', None)
fig = kwargs.get('fig', None)
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
# Figure Size
if ax is None:
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
figx = kwargs.get('figx', 15)
figy = kwargs.get('figy', 15)
fig = plt.figure(figsize=(figx, figy))
ax = fig.add_subplot(1, 1, 1, projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
ax.add_image(tiler, tiler_size, interpolation='spline36')
ax.set_aspect(1, 'datalim')
ax.gridlines(draw_labels=True)
plt.plot(sites.longitude, sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(end_sites.longitude, end_sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(sites.longitude, sites.latitude, color='black', linewidth=0, marker=',', transform=ccrs.PlateCarree(), label=sites.site, zorder=20)
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
my_dpi = fig.dpi
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree())
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * scale_loc[1]
tmc = ccrs.TransverseMercator(sbllx, sblly)
x0, x1, y0, y1 = ax.get_extent(tmc)
sbx = x0 + (x1 - x0) * scale_loc[0]
sby = y0 + (y1 - y0) * scale_loc[1]
# print(sbx, sby)
sbxe = ((sbx + length * 500)/5)*2
sbxf = round(sbx - length * 500)
j = sbxf
k = 1
while k <= 5:
bar_xs = [j, j + sbxe]
if k % 2 == 0:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='w', linewidth=15, zorder=10)
else:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='k', linewidth=15, zorder=11)
j += sbxe
k += 1
buffer = [patheffects.withStroke(linewidth=1.5, foreground="w")]
hei_ = kwargs.get('hei_', 5)
ax.text(-1*sbxf, sby+(hei_*sby), str(length) + ' km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='left', verticalalignment='bottom')
ax.text(sbxf, sby+(hei_*sby), '0 km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='right', verticalalignment='bottom')
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'show')
for i in range(len(sites)):
plt.draw()
lon, lat = sites.longitude[i], sites.latitude[i]
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
axi = fig.add_axes([(x_ - (5/width)*0.5), (y_ - (5/height)*0.5), (5/width), (5/height)])
colors = color_list
scale_arrow = kwargs.get('scale_arrow', 40)
if arrows == 'show':
axi.quiver(sites['E velocity (mm/yr)'][i], sites['N velocity (mm/yr)'][i], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
axi.plot(0, 0, marker='o', markersize=10, color=colors[i])
axi.axis('equal')
axi.axis('off')
sites_h = []
for i in range(3):
site_0 = Line2D([0], [0], marker='o', color='b', linestyle='--',fillstyle='full', markeredgecolor='red',
markeredgewidth=0.0, label=sites.site[i], markerfacecolor=color_list[i], markersize=15)
sites_h.append(site_0)
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Add Legend
leg = ax.legend(handles=[sites_h[0], sites_h[1], sites_h[2]], ncol=3, loc=loc_, fontsize="x-large")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.75)
# Add Strain Ellipse
if V is not 'off':
plt.draw()
lon, lat = lonc, latc
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
ax2 = fig.add_axes([(x_), (y_), 0.2, 0.2])
ax2.set_xlim([-1,1])
ax2.set_ylim([-1,1])
strain_viz.def_ellipse(self, V)
ax2.axis('equal')
ax2.axis('off')
p1 = ax.get_position()
p2 = ax2.get_position()
ax2.set_position([x_ - (p2.width/2 + shiftx), y_ - (p2.height/2 + shifty), p2.width, p2.height])
axn = fig.add_axes([(x_), (y_), 0.05, 0.05])
buffer = [patheffects.withStroke(linewidth=4, foreground="w")]
axn.text(0.5, 0.0,u'\u25B2 \nN ', ha='center', fontsize=35, family='Arial', path_effects=buffer, rotation = 0)
axn.axis('equal')
axn.axis('off')
p3 = ax.get_position()
p4 = axn.get_position()
axn.set_position([p3.x0 + (0.05*p3.x1), p3.y0 + (0.05*p3.y1), 0.05, 0.05])
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def symbol_map(self, **kwargs):
sites = self.strain_data
ax = kwargs.get('ax', None)
fig = kwargs.get('fig', None)
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
if ax is None:
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
figx = kwargs.get('figx', 15)
figy = kwargs.get('figy', 15)
fig = plt.figure(figsize=(figx, figy))
ax = fig.add_subplot(1, 1, 1, projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
ax.add_image(tiler, tiler_size, interpolation='spline36')
ax.set_aspect(1, 'datalim')
ax.gridlines(draw_labels=True)
plt.plot(sites.longitude, sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(end_sites.longitude, end_sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(sites.longitude, sites.latitude, color='black', linewidth=0, marker=',', transform=ccrs.PlateCarree(), label=sites.site, zorder=20)
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
my_dpi = fig.dpi
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree())
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * scale_loc[1]
tmc = ccrs.TransverseMercator(sbllx, sblly)
x0, x1, y0, y1 = ax.get_extent(tmc)
sbx = x0 + (x1 - x0) * scale_loc[0]
sby = y0 + (y1 - y0) * scale_loc[1]
sbxe = ((sbx + length * 500)/5)*2
sbxf = round(sbx - length * 500)
j = sbxf
k = 1
while k <= 5:
bar_xs = [j, j + sbxe]
if k % 2 == 0:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='w', linewidth=15, zorder=10)
else:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='k', linewidth=15, zorder=11)
j += sbxe
k += 1
buffer = [patheffects.withStroke(linewidth=2.5, foreground="w")]
hei_ = kwargs.get('hei_', 5)
ax.text(-1*sbxf, sby+(hei_*sby), str(length) + ' km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='left', verticalalignment='bottom')
ax.text(sbxf, sby+(hei_*sby), '0 km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='right', verticalalignment='bottom')
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'off')
for i in range(len(sites)):
plt.draw()
lon, lat = sites.longitude[i], sites.latitude[i]
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
axi = fig.add_axes([(x_ - (5/width)*0.5), (y_ - (5/height)*0.5), (5/width), (5/height)])
colors = color_list
scale_arrow = kwargs.get('scale_arrow', 40)
if arrows == 'show':
axi.quiver(sites['E velocity (mm/yr)'][i], sites['N velocity (mm/yr)'][i], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
axi.plot(0, 0, marker='o', markersize=10, color=colors[i])
axi.axis('equal')
axi.axis('off')
sites_h = []
for i in range(3):
site_0 = Line2D([0], [0], marker='o', color='b', linestyle='--',fillstyle='full', markeredgecolor='red',
markeredgewidth=0.0, label=sites.site[i], markerfacecolor=color_list[i], markersize=15)
sites_h.append(site_0)
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Add Legend
leg = ax.legend(handles=[sites_h[0], sites_h[1], sites_h[2]], ncol=3, loc=loc_, fontsize="x-large")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.75)
plt.draw()
# Add in the e1 and e2 symbols
e1 = kwargs.get('e1', None)
e2 = kwargs.get('e2', None)
#e_loc = kwargs.get('e_loc', 'lower left')
e_rot = kwargs.get('e_rot', 0)
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
# Add Map Symbol
if None not in (e1, e2):
plt.draw()
lon, lat = lonc, latc
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
ax2 = fig.add_axes([(x_), (y_), (5/width), (5/height)])
ax2.set_xlim([-1,1])
ax2.set_ylim([-1,1])
strain_viz.map_symbol(self, e1, e2, rot=e_rot, old_range=old_range, new_range_a=new_range_a, new_range_b=new_range_b, max_strain=max_strain, min_strain=min_strain, ax=ax2)
ax2.axis('equal')
#ax2.axis('off')
p1 = ax.get_position()
p2 = ax2.get_position()
ax2.set_position([x_ - (p2.width/2 + shiftx), y_ - (p2.height/2 + shifty), p2.width, p2.height])
ax2.autoscale(False)
plt.draw()
axn = fig.add_axes([(x_), (y_), 0.05, 0.05])
buffer = [patheffects.withStroke(linewidth=4, foreground="w")]
axn.text(0.5, 0.0,u'\u25B2 \nN ', ha='center', fontsize=35, family='Arial', path_effects=buffer, rotation = 0)
axn.axis('equal')
axn.axis('off')
p3 = ax.get_position()
p4 = axn.get_position()
axn.set_position([p3.x0 + (0.05*p3.x1), p3.y0 + (0.05*p3.y1), 0.05, 0.05])
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def scale_arrow(value, old_range, new_range):
tmin, tmax = old_range
xmin, xmax = new_range
percent = abs((value - tmin) / (tmax - tmin))
return ((xmax - xmin) * percent) + xmin
def scale_arrow_percent(value, old_range):
tmin, tmax = old_range
return abs((value - tmin) / (tmax - tmin))
def map_symbol(self, e1, e2, **kwargs):
# Add Figure to plot
ax = kwargs.get('ax', 'none')
rot = kwargs.get('rot', 0)
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
sz_e1 = strain_viz.scale_arrow(e1 * 10**9, old_range, new_range_a)
sz_e2 = strain_viz.scale_arrow(e2 * 10**9, old_range, new_range_a)
sz_e1_d = strain_viz.scale_arrow(e1 * 10**9, old_range, new_range_b)
sz_e2_d = strain_viz.scale_arrow(e2 * 10**9, old_range, new_range_b)
sz_p_e1 = strain_viz.scale_arrow(e1 * 10**9, [min_strain, max_strain], [0.2, 0.6])
sz_p_e2 = strain_viz.scale_arrow(e2 * 10**9, [min_strain, max_strain], [0.2, 0.6])
scale_arrow_percent_0 = strain_viz.scale_arrow(e1 * 10**9, [min_strain, max_strain], [0.2, 0.6])
boxstyle0_d = f"darrow,pad=%s" % (scale_arrow_percent_0)
scale_arrow_percent_1 = strain_viz.scale_arrow(e2 * 10**9, [min_strain, max_strain], [0.2, 0.6])
boxstyle1_d = f"darrow,pad=%s" % (scale_arrow_percent_1)
#scale_arrow_percent_1 = str(round(strain_viz.scale_arrow_percent(e2 * 10**9, old_range), 1))
#boxstyle1_l = f"larrow,pad=%s" % (scale_arrow_percent_1)
#boxstyle1_r = f"rarrow,pad=%s" % (scale_arrow_percent_1)
if ax == 'none':
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
if (e1 == 0) and (e2 < 0):
rot0 = mtrans.Affine2D().rotate_deg(rot)
x0, y0 = rot0.transform_point((0.0, sz_p_e2))
x1, y1 = rot0.transform_point((0.0, -sz_p_e2))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
elif (e1 > 0) and (e2 == 0):
bbox_props1 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.0, 0.0, sz_text1, ha="center", va="center", rotation=rot + 90,
size=sz_e1_d, color='w',
bbox=bbox_props1)
elif (e1 > 0) and (e2 > 0):
bbox_props2 = dict(boxstyle=boxstyle1_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.0, 0.0, sz_text1, ha="center", va="center", rotation=rot,
size=sz_e2_d, color='w',
bbox=bbox_props2)
sz_text0 = "---------------" + ('-' * int(20*float(scale_arrow_percent_0)))
bbox_props3 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
ax.text(0.0, 0.0, sz_text0, ha="center", va="center", rotation=rot+90,
size=sz_e1_d, color='w',
bbox=bbox_props3)
elif (e1 > 0) and (e2 < 0):
angle_phi = rot
l2 = np.array((5, 5))
trans_angle = plt.gca().transData.transform_angles(np.array((angle_phi,)),
l2.reshape((1, 2)))[0]
bbox_props = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
sz_text = "---------------" + ('-' * int(20*float(scale_arrow_percent_0)))
t = ax.text(0.0, 0.0, sz_text, ha="center", va="center",
size=sz_e1_d, color='w', rotation=trans_angle, bbox=bbox_props)
rot1 = mtrans.Affine2D().rotate_deg(angle_phi)
x0, y0 = rot1.transform_point((0.0, sz_p_e2))
x1, y1 = rot1.transform_point((0.0, -sz_p_e2))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
elif (e1 < 0) and (e2 < 0):
rot0 = mtrans.Affine2D().rotate_deg(rot)
x0, y0 = rot0.transform_point((0.0, sz_p_e2))
x1, y1 = rot0.transform_point((0.0, -sz_p_e2))
x2, y2 = rot0.transform_point((sz_p_e1, 0.0))
x3, y3 = rot0.transform_point((-sz_p_e1, 0.0))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x2, y2),
size=sz_e1, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x3, y3),
size=sz_e1, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
axis('off')
def symbol_map_full(self, **kwargs):
sites = self.strain_data
V = kwargs.get('V', None)
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'off')
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Get data for plot
e1 = kwargs.get('e1', None)
e2 = kwargs.get('e2', None)
e_loc = kwargs.get('e_loc', 'lower left')
e_rot = kwargs.get('e_rot', 0)
# Import Site data and find center
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
# Figure Size
fig = plt.figure(figsize=(20, 15), constrained_layout=False)
gs = gridspec.GridSpec(30, 40, figure=fig, wspace=0.0, hspace=0.0)
ax = fig.add_subplot(gs[:, 11:], projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
scale_arrow = kwargs.get('scale_arrow', 40)
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
hei_ = kwargs.get('hei_', 5)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
strain_viz.symbol_map(self, e1=e1, e2=e2, e_loc=e_loc, e_rot=e_rot, hei_=hei_, old_range=old_range, new_range_a=new_range_a,
new_range_b=new_range_b, max_strain=max_strain, min_strain=min_strain,
arrows=arrows, color_list=color_list, tiler_size=tiler_size, map_tile_type=map_tile_type,
scale_arrow=scale_arrow, length=length, scale_loc=scale_loc, loc_=loc_, ax=ax, fig=fig)
ax1 = fig.add_subplot(gs[27:30, 1:9])
image = kwargs.get('image', "https://www.unavco.org/education/resources/lib/images/unavco-logo-red-white-shadow.png")
strain_viz.unavco_logo(image=image, ax=ax1)
ax1_1 = fig.add_subplot(gs[:3, :10])
title_ = kwargs.get('title', "GPS Triangle-Strain Map\nUsing UNAVCO PBO Data")
fontsize_ = kwargs.get('fontsize', 24)
ha_ = kwargs.get('ha', 'center')
va_ = kwargs.get('va', 'top')
xy_ = kwargs.get('xy', (0.5, 0.925))
strain_viz.map_title(title=str(title_), xy=xy_, fontsize=fontsize_, ha=ha_, va=va_, ax=ax1_1)
ax2 = fig.add_subplot(gs[4:12, 1:9])
strain_viz.ellipse_subplot(self, V=V, ax=ax2)
ax3 = fig.add_subplot(gs[13:18, :10])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
old_range = kwargs.get('old_range', [0.1, 300])
strain_viz.contraction(old_range=old_range, max_strain=max_strain, min_strain=min_strain, ax=ax3)
ax4 = fig.add_subplot(gs[20:25, :10])
strain_viz.elongation(old_range=old_range, max_strain=max_strain, min_strain=min_strain, ax=ax4)
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def strain_map_full(self, **kwargs):
sites = self.strain_data
V = kwargs.get('V', None)
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'show')
size = kwargs.get('size', 10)
label = kwargs.get('label', '10 mm/yr')
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Import Site data and find center
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
# Figure Size
fig = plt.figure(figsize=(15, 20), constrained_layout=False)
gs = gridspec.GridSpec(40, 30, figure=fig)
ax = fig.add_subplot(gs[:30, :], projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
scale_arrow = kwargs.get('scale_arrow', 40)
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
hei_ = kwargs.get('hei_', 5)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
strain_viz.ellipse_plot(self, V=V, arrows=arrows, color_list=color_list, tiler_size=tiler_size, map_tile_type=map_tile_type,
hei_=hei_, scale_arrow=scale_arrow, length=length, scale_loc=scale_loc, loc_=loc_, ax=ax, fig=fig)
fig.canvas.draw()
ax1 = fig.add_subplot(gs[30:34, 23:])
image = kwargs.get('image', "https://www.unavco.org/education/resources/lib/images/unavco-logo-red-white-shadow.png")
strain_viz.unavco_logo(image=image, ax=ax1)
ax1_1 = fig.add_subplot(gs[31:33, :23])
title_ = kwargs.get('title', "GPS Triangle-Strain Map Using UNAVCO PBO Data")
fontsize_ = kwargs.get('fontsize', 24)
ha_ = kwargs.get('ha', 'left')
va_ = kwargs.get('va', 'center')
strain_viz.map_title(title=str(title_), fontsize=fontsize_, ha=ha_, va=va_, ax=ax1_1)
ax2 = fig.add_subplot(gs[30:40, 10:24])
strain_viz.quiver_legend(self, sites=sites, size=size, label=label, scale_arrow=scale_arrow, ax=ax2)
ax3 = fig.add_subplot(gs[33:37, 1:10])
strain_viz.strain_legend(ax=ax3)
ax4 = fig.add_subplot(gs[38:, :])
strain_viz.table_data(self, sites=sites, ax=ax4)
ax5 = fig.add_subplot(gs[34:36, 21:])
strain_viz.speed_data(self, sites=sites, ax=ax5)
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def unavco_logo(**kwargs):
im_read = kwargs.get('image', "https://www.unavco.org/education/resources/lib/images/unavco-logo-red-white-shadow.png")
a = plt.imread(im_read)
plt.imshow(a, aspect='equal')
axis('off')
def map_title(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(5, 1.5))
ax = fig.add_subplot(1, 1, 1)
title_ = kwargs.get('title', "GPS Triangle-Strain Map Using UNAVCO PBO Data")
fontsize_ = kwargs.get('fontsize', 20)
ha_ = kwargs.get('ha', 'center')
va_ = kwargs.get('va', 'top')
xy_ = kwargs.get('xy', (0.0, 0.5))
ax.annotate(str(title_), xy=xy_, va=va_, ha=ha_, fontsize=fontsize_)
ax.axis('off')
def ellipse_subplot(self, V, **kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
strain_viz.def_ellipse(self, V)
ax.set_title("Infinitesimal Strain Ellipse", x=0.5, y=1.05, fontsize=16, fontweight='light')
sites_h = []
colors = ['tomato', 'cornflowerblue']
strain_ = ['$S_{1H}$', '$S_{2H}$']
for i in range(2):
site_0 = Line2D([0], [0], color=colors[i], linestyle='-', linewidth=1.5, fillstyle='full', label=strain_[i])
sites_h.append(site_0)
leg = ax.legend(handles=[sites_h[0], sites_h[1]], ncol=2, loc='upper center', bbox_to_anchor=(0.5, 1.1), fontsize="x-large", frameon=False)
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.5)
ax.axis('off')
def contraction(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(5, 2.5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
rot0 = mtrans.Affine2D().rotate_deg(0)
x0, y0 = rot0.transform_point((0.35, -strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [0.25, 0.75]) + 0.75))
x1, y1 = rot0.transform_point((-0.35, -strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [0.25, 0.75]) + 0.5))
sz_e1 = strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [40, 80])
sz_e2 = strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [40, 80])
x = np.array([-0.35, 0.35])
y_1 = np.array([0.48, 0.73])
y_2 = np.array([y1+0.01, y0+0.01])
plt.plot((-0.35, 0.35), (0.48, 0.73), color='slategrey', linewidth=1, linestyle='--', marker=',')
plt.plot((-0.35, 0.35), (y1+0.01, y0+0.01), color='slategrey', linewidth=1, linestyle='--', marker=',')
plt.fill_between(x, y_1, y_2, where=(y_1 > y_2), color='slategrey', alpha=0.15, interpolate=True)
ax.annotate("",
xy=(0.35, 0.75),
xytext=(x0, y0), textcoords='data',
size=sz_e1, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2)
)
ax.annotate("",
xy=(-0.35,0.5),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2)
)
ax.annotate("Infinitesimal Strain (Contraction)", xy=(0.0, 0.9), xycoords="data",
va="top", ha="center", fontsize=16)
ax.annotate("%s\nnano-strain" % (min_strain), xy=(-0.75, 0.3), xycoords="data",
va="center", ha="center", fontsize=12)
ax.annotate("%s\nnano-strain" % (max_strain), xy=(0.75, 0.3), xycoords="data",
va="center", ha="center", fontsize=12)
ax.set_xlim([-1,1])
ax.set_ylim([0,1])
ax.axis('off')
def elongation(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(5, 2.5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
scale_arrow_percent_0 = strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [0.2, 0.6])
boxstyle0_d = f"darrow,pad=%s" % (scale_arrow_percent_0)
scale_arrow_percent_1 = strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [0.2, 0.6])
boxstyle1_d = f"darrow,pad=%s" % (scale_arrow_percent_1)
sz_e1_d = strain_viz.scale_arrow(max_strain, [min_strain, max_strain], [10, 15])
sz_e2_d = strain_viz.scale_arrow(min_strain, [min_strain, max_strain], [10, 15])
x = np.array([0.85, 0.35, -0.21, -0.32])
y_2 = np.array([scale_arrow_percent_1+0.1, scale_arrow_percent_0 + 0.0975, 0.65, 0.125])
ax.fill(x, y_2, color='slategrey', alpha=0.15)
plt.plot((-0.21, -0.32), (0.65, 0.125), color='slategrey', linewidth=1, linestyle='--', marker=',')
plt.plot((0.8, 0.35), (scale_arrow_percent_1+0.15, scale_arrow_percent_0 + 0.0975), color='slategrey', linewidth=1, linestyle='--', marker=',')
bbox_props2 = dict(boxstyle=boxstyle1_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.1, 0.68, sz_text1, ha="center", va="top", rotation=0,
size=sz_e2_d, color='w', bbox=bbox_props2)
sz_text0 = "---------------" + ('-' * int(20*float(scale_arrow_percent_0)))
bbox_props3 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
ax.text(0.35, 0.2, sz_text0, ha="center", va="top", rotation=0,
size=sz_e1_d, color='w', bbox=bbox_props3)
ax.annotate("Infinitesimal Strain (Elongation)", xy=(0.0, 0.925), xycoords="data",
va="top", ha="center", fontsize=16, fontweight='book')
ax.annotate("%s\nnano-strain" % (min_strain), xy=(-0.65, 0.63), xycoords="data",
va="center", ha="center", fontsize=12)
ax.annotate("%s\nnano-strain" % (max_strain), xy=(-0.65, 0.05), xycoords="data",
va="center", ha="center", fontsize=12)
bboxprops = dict(boxstyle="round,pad=1", facecolor='white', edgecolor='black', lw=3)
ax.annotate("", xy=(-0.65, 0.05), xycoords="data",
va="center", ha="center", fontsize=12, bbox=bboxprops)
ax.set_xlim([-1,1])
ax.set_ylim([0, 1])
ax.axis('off')
def table_data(self, sites, **kwargs):
ax = kwargs.get('ax', None)
fontsize = kwargs.get('fontsize', 11.25)
scale = kwargs.get('fontsize', 1.75)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
table = ax.table(cellText=sites.round(6).values, colLabels=sites.columns, cellLoc='center', rowLoc='center',loc='center')
table.auto_set_font_size(False)
table.set_fontsize(fontsize)
table.scale(1, scale)
ax.axis('off')
def speed_data(self, sites, **kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
sites_t = sites.copy().drop(['E uncertainty (mm/yr)', 'N uncertainty (mm/yr)'], axis=1)
sites_t.columns = ['sites', 'longitude', 'latitude', 'east_v', 'north_v']
sites_t['Speed (mm/yr)'] = sites_t[['east_v', 'north_v']].apply(lambda x: np.sqrt((x.east_v**2)+(x.north_v**2)), axis=1)
sites_t = sites_t[['sites', 'Speed (mm/yr)']]
table = ax.table(cellText=sites_t.round(6).values, colLabels=sites_t.columns, cellLoc='center', rowLoc='center',loc='center')
table.auto_set_font_size(False)
table.set_fontsize(11.25)
table.scale(1, 1.75)
ax.axis('off')
def quiver_legend(self, sites, **kwargs):
ax = kwargs.get('ax', None)
size = kwargs.get('size', 10)
label = kwargs.get('label', '10 mm/yr')
scale_arrow = kwargs.get('scale_arrow', 40)
if ax is None:
#fig_ = plt.figure(figsize=(5, 5))
fig_ = plt.figure()
ax = fig_.add_subplot(1, 1, 1)
Q = ax.quiver(sites['E velocity (mm/yr)'], sites['N velocity (mm/yr)'], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
ax.clear()
p_fancy = FancyBboxPatch((0.115, 0.415),
0.59, 0.17,
boxstyle="square,pad=0.05",
fc='w', ec='k', lw=1, alpha=0.25)
ax.add_patch(p_fancy)
annotate("Velocity Relative to SNARF", xy=(0.4, 0.6), xycoords="data",
va="top", ha="center", fontsize=14, fontweight='book')
ax.quiverkey(Q, 0.45, 0.45, size, label, labelpos='E', fontproperties=dict(size=12.5), labelsep=0.2,
coordinates='axes')
ax.axis('off')
def strain_legend(**kwargs):
ax = kwargs.get('ax', None)
if ax is None:
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
sites_h = []
colors = ['tomato', 'cornflowerblue']
strain_ = ['$S_{1H}$', '$S_{2H}$']
for i in range(2):
site_0 = Line2D([0], [0], color=colors[i], linestyle='-', linewidth=1.5, fillstyle='full', label=strain_[i])
sites_h.append(site_0)
site_1 = Line2D([0], [0], marker='$\u25CC$', color='w', linestyle='--', markeredgecolor='slategrey',
markeredgewidth=0.5, label='Initial State', markerfacecolor='slategrey', markersize=20)
site_2 = Line2D([0], [0], marker='o', color='w', linestyle='--', markeredgecolor='k',
markeredgewidth=1.1, label='Strain Ellipse', markerfacecolor='w', markersize=18)
leg = ax.legend(handles=[sites_h[0], site_1, sites_h[1], site_2], ncol=2, loc='center', fontsize="x-large", frameon=True, title="Strain Ellipse Legend")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.5)
plt.setp(leg.get_title(),fontsize=14)
ax.axis('off') | 0.67971 | 0.398787 |
import sys
import os
import glob as gb
from subprocess import check_output, Popen, PIPE, STDOUT
import pytest
from idftags import __version__ as VERSION
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHelp():
"""
Py.test class for the help
"""
def test_help(self):
"""
Py.test for -h or --help
"""
output = check_output(['idf-tags', '-h'])
assert 'Usage:' in output.decode('utf-8')
output = check_output(['idf-tags', '--help'])
assert 'Usage:' in output.decode('utf-8')
def test_recursive_and_path(self):
"""
Py.test to check that if both --recursive and a path are given it
shows the help
"""
# Cannot call check_output, it's going to crash because the return code
# isn't 0 in this case (it is - after all - a non valid call!)
output = Popen(['idf-tags', '-r', 'i.idf'],
stdout=PIPE, stderr=STDOUT).communicate()[0]
assert 'Usage:' in output.decode('utf-8')
output = Popen(['idf-tags', '--recursive', 'i.idf'],
stdout=PIPE, stderr=STDOUT).communicate()[0]
assert 'Usage:' in output.decode('utf-8')
class TestVersion():
"""
Py.test class for version
"""
def test_version_short(self):
"""
Py.test for -v
"""
output = check_output(['idf-tags', '-v'])
assert output.decode('utf-8').strip() == VERSION
def test_version_long(self):
"""
Py.test for --version
"""
output = check_output(['idf-tags', '--version'])
assert output.decode('utf-8').strip() == VERSION
class TestIdfTagsCLI():
"""
Py.test class to test that the arguments are understood correctly by the
CLI
"""
@pytest.fixture(autouse=True)
def cleanup_out_files(self):
"""
Fixture run around tests. Will change the current working dir
Will delete all 'xx-out.idf' files created to avoid multiplication
of files.
"""
curdir = os.getcwd()
os.chdir("{}/test_files".format(TEST_DIR))
yield
# This runs even if the test failed
# Python 2 doesn't support recursive...
if sys.version_info[0] < 3:
# Python 2 doesn't support recursive...
import fnmatch
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*out.idf'):
idf_path = os.path.join(root, filename)
os.remove(idf_path)
else:
for filepath in gb.iglob("**/*out.idf", recursive=True):
os.remove(filepath)
# Teardown
os.chdir(curdir)
def test_without_recursive(self):
"""
Py.test when recursive isn't used
"""
output = check_output(['idf-tags']).decode('utf-8')
lines = output.split('\n')
assert len(lines) == 4
def test_with_recursive(self):
"""
Py.test when recursive is used
"""
output = check_output(['idf-tags', '-r']).decode('utf-8')
lines = output.split('\n')
assert len(lines) == 5
def test_with_path(self):
"""
Py.test for a single file
"""
output = check_output(['idf-tags',
'WaterHeaterStandAlone.idf']).decode('utf-8')
lines = output.split('\n')
# There's an extra newline character line... user sees two
# Processing xxxx.idf and "Generated tag"
assert len(lines) == 3 | tests/test_cli.py |
import sys
import os
import glob as gb
from subprocess import check_output, Popen, PIPE, STDOUT
import pytest
from idftags import __version__ as VERSION
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHelp():
"""
Py.test class for the help
"""
def test_help(self):
"""
Py.test for -h or --help
"""
output = check_output(['idf-tags', '-h'])
assert 'Usage:' in output.decode('utf-8')
output = check_output(['idf-tags', '--help'])
assert 'Usage:' in output.decode('utf-8')
def test_recursive_and_path(self):
"""
Py.test to check that if both --recursive and a path are given it
shows the help
"""
# Cannot call check_output, it's going to crash because the return code
# isn't 0 in this case (it is - after all - a non valid call!)
output = Popen(['idf-tags', '-r', 'i.idf'],
stdout=PIPE, stderr=STDOUT).communicate()[0]
assert 'Usage:' in output.decode('utf-8')
output = Popen(['idf-tags', '--recursive', 'i.idf'],
stdout=PIPE, stderr=STDOUT).communicate()[0]
assert 'Usage:' in output.decode('utf-8')
class TestVersion():
"""
Py.test class for version
"""
def test_version_short(self):
"""
Py.test for -v
"""
output = check_output(['idf-tags', '-v'])
assert output.decode('utf-8').strip() == VERSION
def test_version_long(self):
"""
Py.test for --version
"""
output = check_output(['idf-tags', '--version'])
assert output.decode('utf-8').strip() == VERSION
class TestIdfTagsCLI():
"""
Py.test class to test that the arguments are understood correctly by the
CLI
"""
@pytest.fixture(autouse=True)
def cleanup_out_files(self):
"""
Fixture run around tests. Will change the current working dir
Will delete all 'xx-out.idf' files created to avoid multiplication
of files.
"""
curdir = os.getcwd()
os.chdir("{}/test_files".format(TEST_DIR))
yield
# This runs even if the test failed
# Python 2 doesn't support recursive...
if sys.version_info[0] < 3:
# Python 2 doesn't support recursive...
import fnmatch
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*out.idf'):
idf_path = os.path.join(root, filename)
os.remove(idf_path)
else:
for filepath in gb.iglob("**/*out.idf", recursive=True):
os.remove(filepath)
# Teardown
os.chdir(curdir)
def test_without_recursive(self):
"""
Py.test when recursive isn't used
"""
output = check_output(['idf-tags']).decode('utf-8')
lines = output.split('\n')
assert len(lines) == 4
def test_with_recursive(self):
"""
Py.test when recursive is used
"""
output = check_output(['idf-tags', '-r']).decode('utf-8')
lines = output.split('\n')
assert len(lines) == 5
def test_with_path(self):
"""
Py.test for a single file
"""
output = check_output(['idf-tags',
'WaterHeaterStandAlone.idf']).decode('utf-8')
lines = output.split('\n')
# There's an extra newline character line... user sees two
# Processing xxxx.idf and "Generated tag"
assert len(lines) == 3 | 0.399343 | 0.317876 |
import time
import requests
from bs4 import BeautifulSoup
from crawlers.generic import BaseCrawler
from settings import BEGIN_CRAWL_SINCE, UTC_HOUR_DIFF
class OneJuxCrawler(BaseCrawler):
def __init__(self, *args, **kwargs):
super(OneJuxCrawler, self).__init__(source='one_jux', *args, **kwargs)
self.url = 'https://en.1jux.net/tag/meme/1'
def get_feed(self, page=1, last_post=None):
images = []
page_url = self.url
if page > 1:
data = {
"post_level": None,
"post_id": last_post,
"task": "tag",
"tdata[tag]": "meme",
"tdata[level]": 1,
"tdata[start]": None
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
}
response = requests.post("https://en.1jux.net/ajax/tag", data=data, headers=headers)
else:
response = requests.get(page_url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
posts = soup.findAll('li', {"class": "post-item"})
for p in posts:
try:
a = p.find("a", {"class": "post-image"})
i = a.find("img")
t = p.find("span", {"class": "time"})
if 'njf_s.jpg' in i.attrs.get('src'):
continue
images.append({
"id": p.attrs.get("id").strip("post-"),
"title": i.attrs.get('alt'),
"url": "https://en.1jux.net{}".format(i.attrs.get('src')),
"created_at": time.mktime(
time.strptime(t.attrs.get("title"), '%Y-%m-%d %H:%M:%S')) - 3600 * (
UTC_HOUR_DIFF + 1)
})
except Exception as e:
print(e)
return images
def _pre_process_data(self, data):
results = []
for d in data:
results.append(
{
"id": d['id'],
"title": d.get('title'),
"image_url": d.get('url'),
"file_name": 'data/{}/{}.jpg'.format(self.source, d['id']),
"source": self.source,
"created_at": d.get('created_at')
}
)
return results
def run(self):
self._log_console("Starting up {} crawler ...".format(self.source))
self._create_mongo_db_connection()
next_page = 0
last_post = None
while self.running:
try:
next_page += 1
data = self.get_feed(next_page, last_post)
if len(data):
last_post = data[-1]['id']
pre_processed_data = self._pre_process_data(data)
inserted, oldest_timestamp = self.process_data(pre_processed_data)
self._log_console("Iteration ended with {} results".format(len(pre_processed_data)))
time.sleep(4)
if oldest_timestamp < BEGIN_CRAWL_SINCE or not inserted:
next_page = 0
last_post = None
if (oldest_timestamp - BEGIN_CRAWL_SINCE) > 300:
time.sleep(60)
except Exception as e:
print(e)
self._log_console("Exception on main thread run()") | crawlers/one_jux.py | import time
import requests
from bs4 import BeautifulSoup
from crawlers.generic import BaseCrawler
from settings import BEGIN_CRAWL_SINCE, UTC_HOUR_DIFF
class OneJuxCrawler(BaseCrawler):
def __init__(self, *args, **kwargs):
super(OneJuxCrawler, self).__init__(source='one_jux', *args, **kwargs)
self.url = 'https://en.1jux.net/tag/meme/1'
def get_feed(self, page=1, last_post=None):
images = []
page_url = self.url
if page > 1:
data = {
"post_level": None,
"post_id": last_post,
"task": "tag",
"tdata[tag]": "meme",
"tdata[level]": 1,
"tdata[start]": None
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
}
response = requests.post("https://en.1jux.net/ajax/tag", data=data, headers=headers)
else:
response = requests.get(page_url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
posts = soup.findAll('li', {"class": "post-item"})
for p in posts:
try:
a = p.find("a", {"class": "post-image"})
i = a.find("img")
t = p.find("span", {"class": "time"})
if 'njf_s.jpg' in i.attrs.get('src'):
continue
images.append({
"id": p.attrs.get("id").strip("post-"),
"title": i.attrs.get('alt'),
"url": "https://en.1jux.net{}".format(i.attrs.get('src')),
"created_at": time.mktime(
time.strptime(t.attrs.get("title"), '%Y-%m-%d %H:%M:%S')) - 3600 * (
UTC_HOUR_DIFF + 1)
})
except Exception as e:
print(e)
return images
def _pre_process_data(self, data):
results = []
for d in data:
results.append(
{
"id": d['id'],
"title": d.get('title'),
"image_url": d.get('url'),
"file_name": 'data/{}/{}.jpg'.format(self.source, d['id']),
"source": self.source,
"created_at": d.get('created_at')
}
)
return results
def run(self):
self._log_console("Starting up {} crawler ...".format(self.source))
self._create_mongo_db_connection()
next_page = 0
last_post = None
while self.running:
try:
next_page += 1
data = self.get_feed(next_page, last_post)
if len(data):
last_post = data[-1]['id']
pre_processed_data = self._pre_process_data(data)
inserted, oldest_timestamp = self.process_data(pre_processed_data)
self._log_console("Iteration ended with {} results".format(len(pre_processed_data)))
time.sleep(4)
if oldest_timestamp < BEGIN_CRAWL_SINCE or not inserted:
next_page = 0
last_post = None
if (oldest_timestamp - BEGIN_CRAWL_SINCE) > 300:
time.sleep(60)
except Exception as e:
print(e)
self._log_console("Exception on main thread run()") | 0.209955 | 0.122156 |
import csv
import os
from six import string_types, Iterator
from toolz.functoolz import curry, identity
from functools import partial
def sortcsv(input_filename, output_filename, on_cols, input_file_callable=lambda x: open(x, 'r'),
output_file_callable=lambda x: open(x, 'w'), input_csv_config={},
output_csv_config={}, conversions=None,
input_header=True, output_header=True, tmp_reader_callable=lambda x: open(x, 'r'),
tmp_writer_callable=lambda x: open(x, 'w'), tmpdir=None, tmp_size=100000, tmp_csv_config={}):
input_file = input_file_callable(input_filename)
reader = csv.reader(input_file, **input_csv_config)
if input_header:
header = next(reader)
else:
header = None
header_to_idx = dict(zip(header, range(len(header))))
sort_key_idx = []
for col in on_cols:
if isinstance(col, string_types):
sort_key_idx.append(header_to_idx[col])
else:
sort_key_idx.append(col)
if conversions is None:
conversions = lambda x: identity
else:
conversions = dict(zip(sort_key_idx, conversions)).__getitem__
key = rowkey(conversions, sort_key_idx)
if tmpdir is None:
tmpdir = os.path.dirname(os.path.abspath(output_filename))
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
tmpdir_is_tmp = True
else:
tmpdir_is_tmp = False
tmppaths = splitcsv(reader, tmpdir, tmp_size, tmp_writer_callable, tmp_csv_config)
for tmppath in tmppaths:
sortpart(tmppath, tmp_reader_callable, tmp_writer_callable, tmp_csv_config, key)
mergeparts(tmppaths, tmp_reader_callable, tmp_csv_config, output_filename, output_file_callable,
output_csv_config, key, header if output_header else None)
for tmppath in tmppaths:
os.remove(tmppath)
if tmpdir_is_tmp:
os.removedirs(tmpdir)
@curry
def rowkey(conversions, indices, row):
result = []
for i in indices:
try:
result.append(conversions(i)(row[i]))
except:
raise
return result
def sortpart(path, read_callable, write_callable, csv_config, key):
infile = read_callable(path)
reader = csv.reader(infile, **csv_config)
data = list(reader)
infile.close()
data.sort(key=key)
outfile = write_callable(path)
writer = csv.writer(outfile, **csv_config)
writer.writerows(data)
outfile.close()
def splitcsv(reader, tmpdir, tmp_size, file_callable, csv_config):
outrownum = tmp_size
outfilenum = 0
outfile = None
result = []
for row in reader:
if outrownum >= tmp_size:
if outfile is not None:
outfile.close()
outpath = os.path.join(tmpdir, 'tmp_%d.csv'%outfilenum)
if os.path.exists(outpath):
raise ValueError('Path %s already exists!' % outpath)
outfile = file_callable(outpath)
writer = csv.writer(outfile, **csv_config)
result.append(outpath)
outrownum = 0
outfilenum += 1
writer.writerow(row)
outrownum += 1
try:
outfile.close()
except:
pass
return result
def next_or_none(it):
try:
return next(it)
except StopIteration:
return None
class MergeIterator(Iterator):
def __init__(self, readers, key):
self.readers = readers
self.key = key
self.current_rows = list(map(next_or_none, readers))
self.current_keys = list(map(key, self.current_rows))
def __iter__(self):
return self
def __next__(self):
lowest_key = None
lowest_key_idx = None
for i in range(len(self.current_keys)):
current_key = self.current_keys[i]
if current_key is None:
continue
if lowest_key is None or current_key < lowest_key:
lowest_key = self.current_keys[i]
lowest_key_idx = i
if lowest_key_idx is None:
raise StopIteration()
result = self.current_rows[lowest_key_idx]
self.current_rows[lowest_key_idx] = next_or_none(self.readers[lowest_key_idx])
self.current_keys[lowest_key_idx] = self.key(self.current_rows[lowest_key_idx]) if self.current_rows[lowest_key_idx] is not None else None
return result
def mergeparts(input_paths, input_callable, input_csv_config, output_path, output_callable, output_csv_config, key, header):
infiles = list(map(input_callable, input_paths))
readers = list(map(partial(csv.reader, **input_csv_config), infiles))
merger = MergeIterator(readers, key)
if os.path.exists(output_path):
raise ValueError('Path %s already exists!' % output_path)
outfile = output_callable(output_path)
writer = csv.writer(outfile, **output_csv_config)
if header is not None:
writer.writerow(header)
writer.writerows(merger) | oreader/sortcsv.py | import csv
import os
from six import string_types, Iterator
from toolz.functoolz import curry, identity
from functools import partial
def sortcsv(input_filename, output_filename, on_cols, input_file_callable=lambda x: open(x, 'r'),
output_file_callable=lambda x: open(x, 'w'), input_csv_config={},
output_csv_config={}, conversions=None,
input_header=True, output_header=True, tmp_reader_callable=lambda x: open(x, 'r'),
tmp_writer_callable=lambda x: open(x, 'w'), tmpdir=None, tmp_size=100000, tmp_csv_config={}):
input_file = input_file_callable(input_filename)
reader = csv.reader(input_file, **input_csv_config)
if input_header:
header = next(reader)
else:
header = None
header_to_idx = dict(zip(header, range(len(header))))
sort_key_idx = []
for col in on_cols:
if isinstance(col, string_types):
sort_key_idx.append(header_to_idx[col])
else:
sort_key_idx.append(col)
if conversions is None:
conversions = lambda x: identity
else:
conversions = dict(zip(sort_key_idx, conversions)).__getitem__
key = rowkey(conversions, sort_key_idx)
if tmpdir is None:
tmpdir = os.path.dirname(os.path.abspath(output_filename))
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
tmpdir_is_tmp = True
else:
tmpdir_is_tmp = False
tmppaths = splitcsv(reader, tmpdir, tmp_size, tmp_writer_callable, tmp_csv_config)
for tmppath in tmppaths:
sortpart(tmppath, tmp_reader_callable, tmp_writer_callable, tmp_csv_config, key)
mergeparts(tmppaths, tmp_reader_callable, tmp_csv_config, output_filename, output_file_callable,
output_csv_config, key, header if output_header else None)
for tmppath in tmppaths:
os.remove(tmppath)
if tmpdir_is_tmp:
os.removedirs(tmpdir)
@curry
def rowkey(conversions, indices, row):
result = []
for i in indices:
try:
result.append(conversions(i)(row[i]))
except:
raise
return result
def sortpart(path, read_callable, write_callable, csv_config, key):
infile = read_callable(path)
reader = csv.reader(infile, **csv_config)
data = list(reader)
infile.close()
data.sort(key=key)
outfile = write_callable(path)
writer = csv.writer(outfile, **csv_config)
writer.writerows(data)
outfile.close()
def splitcsv(reader, tmpdir, tmp_size, file_callable, csv_config):
outrownum = tmp_size
outfilenum = 0
outfile = None
result = []
for row in reader:
if outrownum >= tmp_size:
if outfile is not None:
outfile.close()
outpath = os.path.join(tmpdir, 'tmp_%d.csv'%outfilenum)
if os.path.exists(outpath):
raise ValueError('Path %s already exists!' % outpath)
outfile = file_callable(outpath)
writer = csv.writer(outfile, **csv_config)
result.append(outpath)
outrownum = 0
outfilenum += 1
writer.writerow(row)
outrownum += 1
try:
outfile.close()
except:
pass
return result
def next_or_none(it):
try:
return next(it)
except StopIteration:
return None
class MergeIterator(Iterator):
def __init__(self, readers, key):
self.readers = readers
self.key = key
self.current_rows = list(map(next_or_none, readers))
self.current_keys = list(map(key, self.current_rows))
def __iter__(self):
return self
def __next__(self):
lowest_key = None
lowest_key_idx = None
for i in range(len(self.current_keys)):
current_key = self.current_keys[i]
if current_key is None:
continue
if lowest_key is None or current_key < lowest_key:
lowest_key = self.current_keys[i]
lowest_key_idx = i
if lowest_key_idx is None:
raise StopIteration()
result = self.current_rows[lowest_key_idx]
self.current_rows[lowest_key_idx] = next_or_none(self.readers[lowest_key_idx])
self.current_keys[lowest_key_idx] = self.key(self.current_rows[lowest_key_idx]) if self.current_rows[lowest_key_idx] is not None else None
return result
def mergeparts(input_paths, input_callable, input_csv_config, output_path, output_callable, output_csv_config, key, header):
infiles = list(map(input_callable, input_paths))
readers = list(map(partial(csv.reader, **input_csv_config), infiles))
merger = MergeIterator(readers, key)
if os.path.exists(output_path):
raise ValueError('Path %s already exists!' % output_path)
outfile = output_callable(output_path)
writer = csv.writer(outfile, **output_csv_config)
if header is not None:
writer.writerow(header)
writer.writerows(merger) | 0.182025 | 0.09947 |
class Error(Exception):
pass
class Line(str):
"""A line of text with associated filename and line number."""
def error(self, message):
"""Return an error relating to this line."""
return Error("{0}({1}): {2}\n{3}"
.format(self.filename, self.lineno, message, self))
class Lines(object):
"""Lines(filename, iterator) wraps 'iterator' so that it yields Line
objects, with line numbers starting from 1. 'filename' is used in
error messages.
"""
def __init__(self, filename, iterator):
self.filename = filename
self.lines = enumerate(iterator, start=1)
def __iter__(self):
return self
def __next__(self):
lineno, s = next(self.lines)
line = Line(s)
line.filename = self.filename
line.lineno = lineno
return line
# For compatibility with Python 2.
next = __next__
def read_fastq(filename, iterator):
"""Read FASTQ data from 'iterator' (which may be a file object or any
other iterator that yields strings) and generate tuples (sequence
name, sequence data, quality data). 'filename' is used in error
messages.
"""
# This implementation follows the FASTQ specification given here:
# <http://nar.oxfordjournals.org/content/38/6/1767.full>
import re
at_seqname_re = re.compile(r'@(.+)$')
sequence_re = re.compile(r'[!-*,-~]*$')
plus_seqname_re = re.compile(r'\+(.*)$')
quality_re = re.compile(r'[!-~]*$')
lines = Lines(filename, iterator)
for line in lines:
# First line of block is @<seqname>.
m = at_seqname_re.match(line)
if not m:
raise line.error("Expected @<seqname> but found:")
seqname = m.group(1)
try:
# One or more lines of sequence data.
sequence = []
for line in lines:
m = sequence_re.match(line)
if not m:
break
sequence.append(m.group(0))
if not sequence:
raise line.error("Expected <sequence> but found:")
# The line following the sequence data consists of a plus
# sign and an optional sequence name (if supplied, it must
# match the sequence name from the start of the block).
m = plus_seqname_re.match(line)
if not m:
raise line.error("Expected +[<seqname>] but found:")
if m.group(1) not in ['', seqname]:
raise line.error("Expected +{} but found:".format(seqname))
# One or more lines of quality data, containing the same
# number of characters as the sequence data.
quality = []
n = sum(map(len, sequence))
while n > 0:
line = next(lines)
m = quality_re.match(line)
if not m:
raise line.error("Expected <quality> but found:")
n -= len(m.group(0))
if n < 0:
raise line.error("<quality> is longer than <sequence>:")
quality.append(m.group(0))
yield seqname, ''.join(sequence), ''.join(quality)
except StopIteration:
raise line.error("End of input before sequence was complete:") | readFastQ.py | class Error(Exception):
pass
class Line(str):
"""A line of text with associated filename and line number."""
def error(self, message):
"""Return an error relating to this line."""
return Error("{0}({1}): {2}\n{3}"
.format(self.filename, self.lineno, message, self))
class Lines(object):
"""Lines(filename, iterator) wraps 'iterator' so that it yields Line
objects, with line numbers starting from 1. 'filename' is used in
error messages.
"""
def __init__(self, filename, iterator):
self.filename = filename
self.lines = enumerate(iterator, start=1)
def __iter__(self):
return self
def __next__(self):
lineno, s = next(self.lines)
line = Line(s)
line.filename = self.filename
line.lineno = lineno
return line
# For compatibility with Python 2.
next = __next__
def read_fastq(filename, iterator):
"""Read FASTQ data from 'iterator' (which may be a file object or any
other iterator that yields strings) and generate tuples (sequence
name, sequence data, quality data). 'filename' is used in error
messages.
"""
# This implementation follows the FASTQ specification given here:
# <http://nar.oxfordjournals.org/content/38/6/1767.full>
import re
at_seqname_re = re.compile(r'@(.+)$')
sequence_re = re.compile(r'[!-*,-~]*$')
plus_seqname_re = re.compile(r'\+(.*)$')
quality_re = re.compile(r'[!-~]*$')
lines = Lines(filename, iterator)
for line in lines:
# First line of block is @<seqname>.
m = at_seqname_re.match(line)
if not m:
raise line.error("Expected @<seqname> but found:")
seqname = m.group(1)
try:
# One or more lines of sequence data.
sequence = []
for line in lines:
m = sequence_re.match(line)
if not m:
break
sequence.append(m.group(0))
if not sequence:
raise line.error("Expected <sequence> but found:")
# The line following the sequence data consists of a plus
# sign and an optional sequence name (if supplied, it must
# match the sequence name from the start of the block).
m = plus_seqname_re.match(line)
if not m:
raise line.error("Expected +[<seqname>] but found:")
if m.group(1) not in ['', seqname]:
raise line.error("Expected +{} but found:".format(seqname))
# One or more lines of quality data, containing the same
# number of characters as the sequence data.
quality = []
n = sum(map(len, sequence))
while n > 0:
line = next(lines)
m = quality_re.match(line)
if not m:
raise line.error("Expected <quality> but found:")
n -= len(m.group(0))
if n < 0:
raise line.error("<quality> is longer than <sequence>:")
quality.append(m.group(0))
yield seqname, ''.join(sequence), ''.join(quality)
except StopIteration:
raise line.error("End of input before sequence was complete:") | 0.856242 | 0.335215 |
import unittest
import os
from shutil import rmtree
import numpy as np
import torch
import torch.nn as nn
from inferno.trainers.basic import Trainer
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
from inferno.extensions.layers.reshape import AsMatrix
class TestTensorboard(unittest.TestCase):
ROOT_DIR = os.path.dirname(__file__)
PRECISION = 'float'
SAVE_DIRECTORY = os.path.join(ROOT_DIR, 'saves')
LOG_DIRECTORY = os.path.join(ROOT_DIR, 'logs')
@staticmethod
def _make_test_model(input_channels):
toy_net = nn.Sequential(nn.Conv2d(input_channels, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 16, 3, 1, 1),
nn.ELU(),
nn.AdaptiveMaxPool2d((1, 1)),
AsMatrix(),
nn.Linear(16, 10))
return toy_net
def tearDown(self):
for d in [self.SAVE_DIRECTORY, self.LOG_DIRECTORY]:
try:
rmtree(d)
except OSError:
pass
def get_random_dataloaders(self, input_channels=3):
# Convert build random tensor dataset
data_shape = (1, input_channels, 64, 64)
target_shape = (1)
random_array = torch.from_numpy(np.random.rand(*data_shape)).float()
target_array = torch.from_numpy(np.random.randint(0, 9, size=target_shape))
train_dataset = TensorDataset(random_array, target_array)
test_dataset = TensorDataset(random_array, target_array)
# Build dataloaders from dataset
train_loader = DataLoader(train_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
test_loader = DataLoader(test_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
return train_loader, test_loader
def get_trainer(self, input_channels):
# Build model
net = self._make_test_model(input_channels)
# Build trainer
trainer = Trainer(net)\
.build_logger(TensorboardLogger(send_image_at_batch_indices=0,
send_image_at_channel_indices='all',
log_images_every=(20, 'iterations')),
log_directory=self.LOG_DIRECTORY)\
.build_criterion('CrossEntropyLoss')\
.build_metric('CategoricalError')\
.build_optimizer('Adam')\
.validate_every((1, 'epochs'))\
.save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\
.save_at_best_validation_score()\
.set_max_num_epochs(2)\
.set_precision(self.PRECISION)
# Bind loaders
train_loader, test_loader = self.get_random_dataloaders(input_channels=input_channels)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
return trainer
def test_tensorboard(self):
trainer = self.get_trainer(3)
trainer.fit()
def test_tensorboard_grayscale(self):
trainer = self.get_trainer(1)
trainer.fit()
def test_serialization(self):
trainer = self.get_trainer(3)
# Serialize
trainer.save()
# Unserialize
trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
train_loader, test_loader = self.get_random_dataloaders(input_channels=3)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
trainer.fit()
if __name__ == '__main__':
unittest.main() | tests/test_training/test_callbacks/test_logging/test_tensorboard.py | import unittest
import os
from shutil import rmtree
import numpy as np
import torch
import torch.nn as nn
from inferno.trainers.basic import Trainer
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
from inferno.extensions.layers.reshape import AsMatrix
class TestTensorboard(unittest.TestCase):
ROOT_DIR = os.path.dirname(__file__)
PRECISION = 'float'
SAVE_DIRECTORY = os.path.join(ROOT_DIR, 'saves')
LOG_DIRECTORY = os.path.join(ROOT_DIR, 'logs')
@staticmethod
def _make_test_model(input_channels):
toy_net = nn.Sequential(nn.Conv2d(input_channels, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 16, 3, 1, 1),
nn.ELU(),
nn.AdaptiveMaxPool2d((1, 1)),
AsMatrix(),
nn.Linear(16, 10))
return toy_net
def tearDown(self):
for d in [self.SAVE_DIRECTORY, self.LOG_DIRECTORY]:
try:
rmtree(d)
except OSError:
pass
def get_random_dataloaders(self, input_channels=3):
# Convert build random tensor dataset
data_shape = (1, input_channels, 64, 64)
target_shape = (1)
random_array = torch.from_numpy(np.random.rand(*data_shape)).float()
target_array = torch.from_numpy(np.random.randint(0, 9, size=target_shape))
train_dataset = TensorDataset(random_array, target_array)
test_dataset = TensorDataset(random_array, target_array)
# Build dataloaders from dataset
train_loader = DataLoader(train_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
test_loader = DataLoader(test_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
return train_loader, test_loader
def get_trainer(self, input_channels):
# Build model
net = self._make_test_model(input_channels)
# Build trainer
trainer = Trainer(net)\
.build_logger(TensorboardLogger(send_image_at_batch_indices=0,
send_image_at_channel_indices='all',
log_images_every=(20, 'iterations')),
log_directory=self.LOG_DIRECTORY)\
.build_criterion('CrossEntropyLoss')\
.build_metric('CategoricalError')\
.build_optimizer('Adam')\
.validate_every((1, 'epochs'))\
.save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\
.save_at_best_validation_score()\
.set_max_num_epochs(2)\
.set_precision(self.PRECISION)
# Bind loaders
train_loader, test_loader = self.get_random_dataloaders(input_channels=input_channels)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
return trainer
def test_tensorboard(self):
trainer = self.get_trainer(3)
trainer.fit()
def test_tensorboard_grayscale(self):
trainer = self.get_trainer(1)
trainer.fit()
def test_serialization(self):
trainer = self.get_trainer(3)
# Serialize
trainer.save()
# Unserialize
trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
train_loader, test_loader = self.get_random_dataloaders(input_channels=3)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
trainer.fit()
if __name__ == '__main__':
unittest.main() | 0.792625 | 0.430387 |
import tweepy as tw
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import pandas as pd
from tqdm import tqdm
from os import getenv, path, remove
class Tweet:
def __init__(self) -> None:
self.file_path = 'tmp/tweets_to_work.csv'
self._auth = tw.OAuthHandler(
getenv('API_KEY'),
getenv('API_KEY_SECRET')
)
self._api = tw.API(self._auth)
self._auth.set_access_token(
getenv('ACCESS_TOKEN'),
getenv('ACCESS_TOKEN_SECRET')
)
def tweet(self, hashtag='#BAT',
limit_tweet=100, lang="en") -> list:
ret = [[
'Id',
'Text',
'Username',
'UserFollowerCount',
'FavouritesCount',
'CreatedAt',
'score' # TODO
]]
analyzer = SentimentIntensityAnalyzer()
with tqdm(total=limit_tweet) as pbar:
for tweet in tqdm(
tw.Cursor(self._api.search,
lang=lang,
q=hashtag,
rpp=100)
.items(limit_tweet),
ascii=True,
desc="Obteniendo Tweets"):
ret.append([
str(tweet.id),
tweet.text,
tweet.user.name,
tweet.user.followers_count,
tweet.user.favourites_count,
tweet.created_at,
analyzer.polarity_scores(
tweet.text
)['compound'] * (
(tweet.user.followers_count + 1) *
(tweet.favorite_count + 1))
])
pbar.update(1)
return ret
def csv_tweet(self, list_to_convert) -> None:
df = pd.DataFrame(list_to_convert)
if path.exists(self.file_path):
remove(self.file_path)
print("guardado en " + self.file_path)
df.to_csv(self.file_path, index=False, header=False)
def get_interval_tweet(self,
limit_tweet,
interval_date_list,
query='#BAT',
lang='es'):
ret = [[
'Id',
'Text',
'Username',
'UserFollowerCount',
'FavouritesCount',
'CreatedAt',
'score'
]]
i=0
for interval in interval_date_list:
analyzer = SentimentIntensityAnalyzer()
for tweet in tw.Cursor(
self._api.search,
lang=lang,
q=query,
rpp=100,
tweet_mode='extended',
# result_type='mixed',
since=interval[0],
until=interval[1])\
.items(limit_tweet):
ana = analyzer.polarity_scores(tweet.full_text)
score = ana['compound'] * (
(tweet.user.followers_count + 1) *
(tweet.favorite_count + 1))
ret.append([
str(tweet.id),
tweet.full_text,
tweet.user.name,
tweet.user.followers_count,
tweet.user.favourites_count,
tweet.created_at,
score
])
i = i+1
print(interval[0]+'-'+interval[0]+': ' + str(i))
return ret
if __name__ == '__main__':
tws = Tweet()
tws.file_path='tmp/tweet_work.csv'
# tws.csv_tweet(tws.tweet(limit_tweet=100))
list_date =[
["2021-08-12","2021-08-13"],
["2021-08-11","2021-08-12"],
["2021-08-10","2021-08-11"],
["2021-08-09","2021-08-10"],
["2021-08-08","2021-08-09"],
# ["2021-08-07","2021-08-08"]
]
tws.file_path='tmp/tweet_work.csv'
tws.csv_tweet(
tws.get_interval_tweet(
100, list_date, query='#BAT', lang='es'
)
) | Tweet.py |
import tweepy as tw
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import pandas as pd
from tqdm import tqdm
from os import getenv, path, remove
class Tweet:
def __init__(self) -> None:
self.file_path = 'tmp/tweets_to_work.csv'
self._auth = tw.OAuthHandler(
getenv('API_KEY'),
getenv('API_KEY_SECRET')
)
self._api = tw.API(self._auth)
self._auth.set_access_token(
getenv('ACCESS_TOKEN'),
getenv('ACCESS_TOKEN_SECRET')
)
def tweet(self, hashtag='#BAT',
limit_tweet=100, lang="en") -> list:
ret = [[
'Id',
'Text',
'Username',
'UserFollowerCount',
'FavouritesCount',
'CreatedAt',
'score' # TODO
]]
analyzer = SentimentIntensityAnalyzer()
with tqdm(total=limit_tweet) as pbar:
for tweet in tqdm(
tw.Cursor(self._api.search,
lang=lang,
q=hashtag,
rpp=100)
.items(limit_tweet),
ascii=True,
desc="Obteniendo Tweets"):
ret.append([
str(tweet.id),
tweet.text,
tweet.user.name,
tweet.user.followers_count,
tweet.user.favourites_count,
tweet.created_at,
analyzer.polarity_scores(
tweet.text
)['compound'] * (
(tweet.user.followers_count + 1) *
(tweet.favorite_count + 1))
])
pbar.update(1)
return ret
def csv_tweet(self, list_to_convert) -> None:
df = pd.DataFrame(list_to_convert)
if path.exists(self.file_path):
remove(self.file_path)
print("guardado en " + self.file_path)
df.to_csv(self.file_path, index=False, header=False)
def get_interval_tweet(self,
limit_tweet,
interval_date_list,
query='#BAT',
lang='es'):
ret = [[
'Id',
'Text',
'Username',
'UserFollowerCount',
'FavouritesCount',
'CreatedAt',
'score'
]]
i=0
for interval in interval_date_list:
analyzer = SentimentIntensityAnalyzer()
for tweet in tw.Cursor(
self._api.search,
lang=lang,
q=query,
rpp=100,
tweet_mode='extended',
# result_type='mixed',
since=interval[0],
until=interval[1])\
.items(limit_tweet):
ana = analyzer.polarity_scores(tweet.full_text)
score = ana['compound'] * (
(tweet.user.followers_count + 1) *
(tweet.favorite_count + 1))
ret.append([
str(tweet.id),
tweet.full_text,
tweet.user.name,
tweet.user.followers_count,
tweet.user.favourites_count,
tweet.created_at,
score
])
i = i+1
print(interval[0]+'-'+interval[0]+': ' + str(i))
return ret
if __name__ == '__main__':
tws = Tweet()
tws.file_path='tmp/tweet_work.csv'
# tws.csv_tweet(tws.tweet(limit_tweet=100))
list_date =[
["2021-08-12","2021-08-13"],
["2021-08-11","2021-08-12"],
["2021-08-10","2021-08-11"],
["2021-08-09","2021-08-10"],
["2021-08-08","2021-08-09"],
# ["2021-08-07","2021-08-08"]
]
tws.file_path='tmp/tweet_work.csv'
tws.csv_tweet(
tws.get_interval_tweet(
100, list_date, query='#BAT', lang='es'
)
) | 0.273769 | 0.075176 |
import torch
import torch.nn as nn
def weights_init_reg(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
xx_channel, yy_channel = xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)
ret = torch.cat([
input_tensor,
xx_channel,
yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConv(nn.Module):
def __init__(self, in_channels, out_channels, with_r=False, **kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels+2
if with_r:
in_size += 1
self.conv = nn.Conv2d(in_size, out_channels, **kwargs)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret
def conv_bn(inp, oup, kernels, stride, pad):
return nn.Sequential(
nn.Conv2d(inp, oup, kernels, stride, pad, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2REG(nn.Module):
def __init__(self, input_dim, input_channel, width_mult, pts_num):
super(MobileNetV2REG, self).__init__()
self.pts_num = pts_num
block = InvertedResidual
interverted_residual_setting = [
# t, c, n, s
[1, 48 , 1, 1],
[2, 48 , 5, 2],
[2, 96 , 1, 2],
[4, 96 , 6, 1],
[2, 16 , 1, 1],
]
input_channel = int(input_channel * width_mult)
features = [conv_bn(input_dim, input_channel, (3,3), 2, 1)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0: stride = s
else : stride = 1
features.append( block(input_channel, output_channel, stride, expand_ratio=t) )
input_channel = output_channel
features.append( nn.AdaptiveAvgPool2d( (14,14) ) )
self.features = nn.Sequential(*features)
self.S1 = nn.Sequential(
CoordConv(input_channel , input_channel*2, True, kernel_size=3, padding=1),
conv_bn(input_channel*2, input_channel*2, (3,3), 2, 1))
self.S2 = nn.Sequential(
CoordConv(input_channel*2, input_channel*4, True, kernel_size=3, padding=1),
conv_bn(input_channel*4, input_channel*8, (7,7), 1, 0))
output_neurons = 14*14*input_channel + 7*7*input_channel*2 + input_channel*8
self.locator = nn.Sequential(
nn.Linear(output_neurons, pts_num*2))
#self.classifier = nn.Linear(output_neurons, pts_num)
#self.classifier = nn.Sequential(
# block(input_channel*1, input_channel*4, 1, 2),
# nn.AdaptiveAvgPool2d( (16,12) ),
# block(input_channel*4, input_channel*4, 1, 2),
# nn.AdaptiveAvgPool2d( (8,6) ),
# nn.Conv2d(input_channel*4, pts_num, (8,6)))
self.apply( weights_init_reg )
def forward(self, x):
batch, C, H, W = x.size()
features = self.features(x)
S1 = self.S1( features )
S2 = self.S2( S1 )
tensors = torch.cat((features.view(batch, -1), S1.view(batch, -1), S2.view(batch, -1)), dim=1)
batch_locs = self.locator(tensors).view(batch, self.pts_num, 2)
#batch_scos = self.classifier(tensors).view(batch, self.pts_num, 1)
return batch_locs
if __name__ == '__main__':
model = MobileNetV2REG(3, 24, 1, 18) # REG on AFLW | .backup/ProREG.py | import torch
import torch.nn as nn
def weights_init_reg(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
xx_channel, yy_channel = xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)
ret = torch.cat([
input_tensor,
xx_channel,
yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConv(nn.Module):
def __init__(self, in_channels, out_channels, with_r=False, **kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels+2
if with_r:
in_size += 1
self.conv = nn.Conv2d(in_size, out_channels, **kwargs)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret
def conv_bn(inp, oup, kernels, stride, pad):
return nn.Sequential(
nn.Conv2d(inp, oup, kernels, stride, pad, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2REG(nn.Module):
def __init__(self, input_dim, input_channel, width_mult, pts_num):
super(MobileNetV2REG, self).__init__()
self.pts_num = pts_num
block = InvertedResidual
interverted_residual_setting = [
# t, c, n, s
[1, 48 , 1, 1],
[2, 48 , 5, 2],
[2, 96 , 1, 2],
[4, 96 , 6, 1],
[2, 16 , 1, 1],
]
input_channel = int(input_channel * width_mult)
features = [conv_bn(input_dim, input_channel, (3,3), 2, 1)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0: stride = s
else : stride = 1
features.append( block(input_channel, output_channel, stride, expand_ratio=t) )
input_channel = output_channel
features.append( nn.AdaptiveAvgPool2d( (14,14) ) )
self.features = nn.Sequential(*features)
self.S1 = nn.Sequential(
CoordConv(input_channel , input_channel*2, True, kernel_size=3, padding=1),
conv_bn(input_channel*2, input_channel*2, (3,3), 2, 1))
self.S2 = nn.Sequential(
CoordConv(input_channel*2, input_channel*4, True, kernel_size=3, padding=1),
conv_bn(input_channel*4, input_channel*8, (7,7), 1, 0))
output_neurons = 14*14*input_channel + 7*7*input_channel*2 + input_channel*8
self.locator = nn.Sequential(
nn.Linear(output_neurons, pts_num*2))
#self.classifier = nn.Linear(output_neurons, pts_num)
#self.classifier = nn.Sequential(
# block(input_channel*1, input_channel*4, 1, 2),
# nn.AdaptiveAvgPool2d( (16,12) ),
# block(input_channel*4, input_channel*4, 1, 2),
# nn.AdaptiveAvgPool2d( (8,6) ),
# nn.Conv2d(input_channel*4, pts_num, (8,6)))
self.apply( weights_init_reg )
def forward(self, x):
batch, C, H, W = x.size()
features = self.features(x)
S1 = self.S1( features )
S2 = self.S2( S1 )
tensors = torch.cat((features.view(batch, -1), S1.view(batch, -1), S2.view(batch, -1)), dim=1)
batch_locs = self.locator(tensors).view(batch, self.pts_num, 2)
#batch_scos = self.classifier(tensors).view(batch, self.pts_num, 1)
return batch_locs
if __name__ == '__main__':
model = MobileNetV2REG(3, 24, 1, 18) # REG on AFLW | 0.937397 | 0.546194 |
import tkinter as tk
import subprocess
import os
import signal
from tkinter import *
from tkinter import ttk, filedialog, messagebox, colorchooser
from copy import copy, deepcopy
from time import sleep
from threading import Timer
sign = lambda x: (1, -1)[x < 0]
global colour_selected, colour_possible_moves
colour_selected = "khaki"
colour_possible_moves = "orange"
LARGE_FONT = ("Verdana", 40)
ai_players = ['b']
#ai_players = ['w', 'b']
passes = {
'w' : 0,
'b' : 0,
}
depth_ai_player = {
'w' : 10,
'b' : 10,
}
def write(msg):
log['state'] = 'normal'
if log.index('end-1c')!='1.0':
log.insert('end', '\n')
log.see(END)
log.insert('end', msg)
log['state'] = 'disabled'
def clear_log():
log['state'] = 'normal'
log.delete('1.0', END)
log['state'] = 'disabled'
#os.killpg(os.getpgid(p.pid), signal.SIGTERM)
def start_engine():
global p
p = subprocess.Popen(['out/console_interface.exe'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
start_engine()
def get_legal_movements_subprocess(board, x, y):
global p
data = 'get_legal_movements\n'
data += "%d %d\n" % (x, y)
for i in range(8):
data += "%s\n" % board[i]
print(data)
try:
outs, errs = p.communicate(bytes(data, 'ascii'), timeout=10)
p.kill()
print(outs)
print(outs.decode('ascii'))
lines = list(filter(None, outs.decode('ascii').split('|')))
movements = []
for m in lines[1:]:
l = list(map(int, list(filter(None, m.split(' ')))))
lis = []
for i in range(0, len(l), 2):
lis.append([l[i], l[i + 1]])
movements.append(lis)
start_engine()
return movements
except Exception as e:
print(e)
return []
game_over = False
def call_ai_movement():
global game_over
if game_over:
return
movements = board.ai_movement()
board.make_movement_ai(movements)
board.next_turn()
win = board.check_mate()
if win != None:
if win == "b":
write("Black Wins")
game_over = True
if win == "w":
write("White Wins")
game_over = True
if win == "t":
write("Tie")
game_over = True
if board.turn() in ai_players:
task_delay()
def task_delay():
t = Timer(0.5, call_ai_movement)
t.start()
def get_next_move_subprocess(board, player):
global p
data = 'get_next_movement\n'
data += "%s %d\n" % (player, depth_ai_player[player])
for i in range(8):
data += "%s\n" % board[i]
print(data)
try:
outs, errs = p.communicate(bytes(data, 'ascii'), timeout=15)
p.kill()
line = outs.decode('ascii')
print(line)
print("Number of nodes: ",line.count("!"))
l = list(map(int, list(filter(None, line.replace("!", "").split(' ')))))
lis = []
for i in range(0, len(l), 2):
lis.append([l[i], l[i + 1]])
movements = lis
start_engine()
return movements
except Exception as e:
print(e)
return []
"""
p.stdin.write(b'abc\n')
p.stdin.close()
print("Reading result 1:", p.stdout.readline().decode(encoding='ascii'))
exit(0)
"""
class symbols(object):
b = ' '
w = '#'
bm = '+'
bk = '*'
wm = '-'
wk = '%'
def upgrade_to_king(b, r, c):
ans = deepcopy(b)
if b[r][c] in [symbols.wm, symbols.wk]:
ans[r][c] = symbols.wk;
elif b[r][c] in [symbols.bm, symbols.bk]:
ans[r][c] = symbols.bk;
return ans
def should_upgrade(b, r, c):
if b[r][c] in [symbols.wk, symbols.bk]:
return False
if b[r][c] in [symbols.wm, symbols.wk] and r == 0:
return True
elif b[r][c] in [symbols.bm, symbols.bk] and r == 7:
return True
return False
def color_square(c, r):
if (c + r) % 2 == 1:
return symbols.w
return symbols.b
def opposite(p1, p2):
if ((p1 == symbols.wm or p1 == symbols.wk) and (p2 == symbols.bm or p2 == symbols.bk)):
return True
if ((p2 == symbols.wm or p2 == symbols.wk) and (p1 == symbols.bm or p1 == symbols.bk)):
return True
return False
def write_movement(player, row, col, movements):
message = ""
if player == 'w':
player_name = "White"
else:
player_name = "Black"
if len(movements) == 0:
message = "%s player cannot move this turn." % player_name
else:
message = "%s player move from (%d,%d) to " % (player_name, row, col)
for i in range(len(movements)):
if i > 0:
message += ","
message += "(%d, %d)" % (movements[i][0], movements[i][1])
write(message)
class Board(object):
# The chess board is represented as a 8x8 2D array
def __init__(self):
self._turn = "w"
self.board = [
symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm,
symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b,
symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm,
symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b,
symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm,
symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b,
]
"""
self.board = [
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.bm + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.w + symbols.b + symbols.wm + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.wk + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
]
"""
self.legal_movements = None
if self.turn() in ai_players:
task_delay()
def legalmoves(self, from_coords, board):
# print(from_coords)
self.legal_movements = get_legal_movements_subprocess(self.board, from_coords[0], from_coords[1])
return self.legal_movements
def is_legal(self, from_coords):
for i in range(0, len(self.legal_movements)):
for j in range(0, len(self.legal_movements[i])):
move = self.legal_movements[i][j]
if move[0] == from_coords[0] and move[1] == from_coords[1]:
return True
return False
def make_movement(self, row1, col1, row2, col2):
for i in range(0, len(self.legal_movements)):
move = self.legal_movements[i][-1]
if move[0] == row2 and move[1] == col2:
write_movement(self.turn(), row1, col1, self.legal_movements[i])
for j in range(0, len(self.legal_movements[i])):
if j == 0:
self.make_simple_movement(row1, col1, self.legal_movements[i][j][0], self.legal_movements[i][j][1])
else:
self.make_simple_movement(self.legal_movements[i][j - 1][0], self.legal_movements[i][j - 1][1], self.legal_movements[i][j][0], self.legal_movements[i][j][1])
PiecesImagesUpdate()
app.update()
sleep(0.33)
return True
return False
def make_movement_ai(self, movements):
if len(movements) == 0:
passes[self.turn()] += 1
write_movement(self.turn(), 0, 0, [])
else:
passes[self.turn()] = 0
print(movements[1:])
write_movement(self.turn(), movements[0][0], movements[0][1], movements[1:])
for j in range(1, len(movements)):
self.make_simple_movement(movements[j - 1][0], movements[j - 1][1], movements[j][0], movements[j][1])
PiecesImagesUpdate()
app.update()
sleep(0.33)
return True
def make_simple_movement(self, row1, col1, row2, col2):
delta_row = sign(row2 - row1)
delta_col = sign(col2 - col1)
ans = deepcopy(self.board)
for i in range(8):
ans[i] = list(ans[i])
c = col1 + delta_col
r = row1 + delta_row
while r != row2:
if opposite(ans[r][c], ans[row1][col1]):
ans[r][c] = color_square(r, c);
break;
r += delta_row
c += delta_col
ans[row2][col2] = ans[row1][col1]
ans[row1][col1] = color_square(row1, col1)
if should_upgrade(ans, row2, col2):
ans = upgrade_to_king(ans, row2, col2)
for i in range(8):
ans[i] = "".join(ans[i])
self.board = ans
for i in range(8):
#ans[i] = "".join(ans[i])
print(self.board[i])
def ai_movement(self):
moves = get_next_move_subprocess(self.board, self.turn())
print(moves)
return moves
def check_mate(self):
whitePieces = 0
blackPieces = 0
if (passes['b'] >= 3 and passes['w'] >= 2) or (passes['b'] >= 2 and passes['w'] >= 3):
return 't'
elif passes['b'] >= 3:
return 'w'
elif passes['w'] >= 3:
return 'b'
for i in range(8):
for j in range(8):
if self.board[i][j] == symbols.wm or self.board[i][j] == symbols.wk:
whitePieces += 1
if self.board[i][j] == symbols.bm or self.board[i][j] == symbols.bk:
blackPieces += 1
if whitePieces > 1 and blackPieces > 1:
return None
if whitePieces == blackPieces:
return "t"
if whitePieces == 0:
return "b"
elif blackPieces == 0:
return "w"
return None
def turn(self):
return self._turn
def next_turn(self):
self._turn = 'w' if self._turn == 'b' else 'b'
board = Board()
class CheckersApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#tk.Tk.iconbitmap(self, default = "chessgame.ico")
tk.Tk.wm_title(self, "Checkers GAMA")
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
global selected, piece_selected, square_selected
selected = False
square_selected = ""
piece_selected = ""
global game_played
game_played = False
self.frames = {}
for F in [Game]:
frame = F(container, self)
self.frames[F] = frame
frame.grid(row = 0, column = 0, sticky = "NSEW")
self.show_frame(Game)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
def Reset_Board(controller):
global selected, square_selected, piece_selected, game_over
game_over = False
selected = False
square_selected = ""
piece_selected = ""
class Game(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
loadimages()
Reset_Board(controller)
grid = Frame(self)
grid.grid(sticky=N + S + E + W, column=0, row=7, columnspan=2)
Grid.rowconfigure(self, 7, weight=1, minsize=70)
Grid.columnconfigure(self, 0, weight=1, minsize=70)
global board, stack, log
global game_played
game_played = True
global Buttons, Colours
Buttons = []
Colours = []
# Import images for pieces
# where
# colour + PIECE + .gif = file name
for x in range(0, 8):
Even = True
if x % 2:
Even = False
Buttons.append([])
Colours.append([])
for y in range(0, 8):
if Even:
btn = tk.Button(self, bg="white", image=Empty)
Colours[x].append("white")
Even = False
else:
btn = tk.Button(self, bg="black", image=Empty)
Colours[x].append("black")
Even = True
if board.board[y][x] != symbols.w and board.board[y][x] != symbols.b:
if board.board[y][x] == symbols.bm:
btn.configure(image=bM)
if board.board[y][x] == symbols.bk:
btn.configure(image=bK)
if board.board[y][x] == symbols.wm:
btn.configure(image=wM)
if board.board[y][x] == symbols.wk:
btn.configure(image=wK)
btn.grid(column=x, row=y, sticky=N + S + E + W)
Buttons[x].append(btn)
for x in range(0, 8):
Grid.columnconfigure(self, x, weight=1, minsize=70)
for y in range(0, 8):
Grid.rowconfigure(self, y, weight=1, minsize=70)
global Turn_var
Turn_var = StringVar()
PlayerTurnLabel = tk.Label(self, textvariable=Turn_var ,font=30)
PlayerTurnLabel.grid(column=10, row=0, sticky=(N,S))
gap = ttk.Label(self, text=" "*25)
gap.grid(column=9, row=0,rowspan=8, sticky=(N,S))
gap2 = ttk.Label(self, text=" "*20)
gap2.grid(column=16, row=1, rowspan=8, sticky=(N,S))
log = Text(self, state='disabled',height=25, width=60, wrap="none",font=26)
log.grid(column=10, row=1, rowspan=6,sticky=(N,S,E,W))
button = ttk.Button(self, text = "Print Board", command = lambda: board.boardshow())
button.grid(column=9, row=7,ipady=10,ipadx=10)
def PiecesImagesUpdate():
for i in range(0, 8):
for j in range(0, 8):
btn = Buttons[i][j]
if board.board[j][i] != symbols.w and board.board[j][i] != symbols.b:
if board.board[j][i] == symbols.bm:
btn.configure(image=bM)
if board.board[j][i] == symbols.bk:
btn.configure(image=bK)
if board.board[j][i] == symbols.wm:
btn.configure(image=wM)
if board.board[j][i] == symbols.wk:
btn.configure(image=wK)
if selected != True:
btn = Buttons[i][j]
colour = Colours[j][i]
if colour == "white":
btn.configure(bg="white")
if colour == "black":
btn.configure(bg="black")
if board.board[j][i] == symbols.w or board.board[j][i] == symbols.b:
btn = Buttons[i][j]
btn.configure(image=Empty)
turn = board.turn()
if turn == "w":
Turn_var.set("White's turn")
if turn == "b":
Turn_var.set("Black's turn")
def click(event):
global selected, square_selected, piece_selected, Playing, game_over
try:
PiecesImagesUpdate()
grid_info = event.widget.grid_info()
z = grid_info["column"]
w = grid_info["row"]
if 0 <= z <= 8 and 0 <= w <= 8:
coords = z,w
Playing = True
except KeyError or AttributeError:
Playing = False
if Playing == True and 0 <= z <= 8 and 0 <= w <= 8 and game_over == False and not (board.turn() in ai_players):
if 0 <= z <= 8 and 0 <= w <= 8:
if selected == True and (square_selected == coords):
try:
selected = False
square_selected = ""
piece_selected = None
except IndexError:
selected = True
elif selected == False:
try:
currentTurn = board.turn()
print(board.board[w][z], w, z)
if board.board[w][z] != symbols.w or board.board[w][z] != symbols.b:
btn = Buttons[z][w]
square_selected = z, w
piece_selected = w, z
if currentTurn == "w":
if board.board[square_selected[1]][square_selected[0]][0] in [symbols.wm, symbols.wk]:
btn.configure(bg=colour_selected)
selected = True
if currentTurn == "b":
if board.board[square_selected[1]][square_selected[0]][0] in [symbols.bm, symbols.bk]:
btn.configure(bg=colour_selected)
selected = True
if selected != True:
square_selected = ""
piece_selected = None
except IndexError:
selected = False
elif selected == True:
if piece_selected != None:
try:
pw, pz = piece_selected
#print("JEEE ", pw, pz, w, z)
if board.is_legal([w, z]):
board.make_movement(pw, pz, w, z)
board.next_turn()
selected = False
square_selected = ""
piece_selected = None
PiecesImagesUpdate()
except AttributeError as e:
print(e)
print(selected, piece_selected)
if selected == True and piece_selected is not None:
from_coords = [square_selected[1], square_selected[0]]
possiblemoves = board.legalmoves(from_coords, board)
#print(possiblemoves)
for i in range(0, len(possiblemoves)):
for j in range(0, len(possiblemoves[i])):
moves = possiblemoves[i][j]
btn = Buttons[moves[1]][moves[0]]
btn.configure(bg=colour_possible_moves)
elif selected == False:
for i in range(0, 8):
for j in range(0, 8):
btn = Buttons[i][j]
colour = Colours[i][j]
if colour == "white":
btn.configure(bg="white")
if colour == "black":
btn.configure(bg="black")
win = board.check_mate()
if win != None:
if win == "b":
write("Black Wins")
game_over = True
if win == "w":
write("White Wins")
game_over = True
if win == "t":
write("Tie")
game_over = True
if not game_over and board.turn() in ai_players:
task_delay()
def loadimages():
global bM, bK
global wM, wK, Empty
# black pieces
bM = PhotoImage(file="Pieces/bm.gif")
bK = PhotoImage(file="Pieces/bk.gif")
# white pieces
wM = PhotoImage(file="Pieces/wm.gif")
wK = PhotoImage(file="Pieces/wk.gif")
Empty = PhotoImage(file="Pieces/Empty.gif")
app = CheckersApp()
app.bind("<Button-1>", click)
app.mainloop() | checkers_gui.py | import tkinter as tk
import subprocess
import os
import signal
from tkinter import *
from tkinter import ttk, filedialog, messagebox, colorchooser
from copy import copy, deepcopy
from time import sleep
from threading import Timer
sign = lambda x: (1, -1)[x < 0]
global colour_selected, colour_possible_moves
colour_selected = "khaki"
colour_possible_moves = "orange"
LARGE_FONT = ("Verdana", 40)
ai_players = ['b']
#ai_players = ['w', 'b']
passes = {
'w' : 0,
'b' : 0,
}
depth_ai_player = {
'w' : 10,
'b' : 10,
}
def write(msg):
log['state'] = 'normal'
if log.index('end-1c')!='1.0':
log.insert('end', '\n')
log.see(END)
log.insert('end', msg)
log['state'] = 'disabled'
def clear_log():
log['state'] = 'normal'
log.delete('1.0', END)
log['state'] = 'disabled'
#os.killpg(os.getpgid(p.pid), signal.SIGTERM)
def start_engine():
global p
p = subprocess.Popen(['out/console_interface.exe'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
start_engine()
def get_legal_movements_subprocess(board, x, y):
global p
data = 'get_legal_movements\n'
data += "%d %d\n" % (x, y)
for i in range(8):
data += "%s\n" % board[i]
print(data)
try:
outs, errs = p.communicate(bytes(data, 'ascii'), timeout=10)
p.kill()
print(outs)
print(outs.decode('ascii'))
lines = list(filter(None, outs.decode('ascii').split('|')))
movements = []
for m in lines[1:]:
l = list(map(int, list(filter(None, m.split(' ')))))
lis = []
for i in range(0, len(l), 2):
lis.append([l[i], l[i + 1]])
movements.append(lis)
start_engine()
return movements
except Exception as e:
print(e)
return []
game_over = False
def call_ai_movement():
global game_over
if game_over:
return
movements = board.ai_movement()
board.make_movement_ai(movements)
board.next_turn()
win = board.check_mate()
if win != None:
if win == "b":
write("Black Wins")
game_over = True
if win == "w":
write("White Wins")
game_over = True
if win == "t":
write("Tie")
game_over = True
if board.turn() in ai_players:
task_delay()
def task_delay():
t = Timer(0.5, call_ai_movement)
t.start()
def get_next_move_subprocess(board, player):
global p
data = 'get_next_movement\n'
data += "%s %d\n" % (player, depth_ai_player[player])
for i in range(8):
data += "%s\n" % board[i]
print(data)
try:
outs, errs = p.communicate(bytes(data, 'ascii'), timeout=15)
p.kill()
line = outs.decode('ascii')
print(line)
print("Number of nodes: ",line.count("!"))
l = list(map(int, list(filter(None, line.replace("!", "").split(' ')))))
lis = []
for i in range(0, len(l), 2):
lis.append([l[i], l[i + 1]])
movements = lis
start_engine()
return movements
except Exception as e:
print(e)
return []
"""
p.stdin.write(b'abc\n')
p.stdin.close()
print("Reading result 1:", p.stdout.readline().decode(encoding='ascii'))
exit(0)
"""
class symbols(object):
b = ' '
w = '#'
bm = '+'
bk = '*'
wm = '-'
wk = '%'
def upgrade_to_king(b, r, c):
ans = deepcopy(b)
if b[r][c] in [symbols.wm, symbols.wk]:
ans[r][c] = symbols.wk;
elif b[r][c] in [symbols.bm, symbols.bk]:
ans[r][c] = symbols.bk;
return ans
def should_upgrade(b, r, c):
if b[r][c] in [symbols.wk, symbols.bk]:
return False
if b[r][c] in [symbols.wm, symbols.wk] and r == 0:
return True
elif b[r][c] in [symbols.bm, symbols.bk] and r == 7:
return True
return False
def color_square(c, r):
if (c + r) % 2 == 1:
return symbols.w
return symbols.b
def opposite(p1, p2):
if ((p1 == symbols.wm or p1 == symbols.wk) and (p2 == symbols.bm or p2 == symbols.bk)):
return True
if ((p2 == symbols.wm or p2 == symbols.wk) and (p1 == symbols.bm or p1 == symbols.bk)):
return True
return False
def write_movement(player, row, col, movements):
message = ""
if player == 'w':
player_name = "White"
else:
player_name = "Black"
if len(movements) == 0:
message = "%s player cannot move this turn." % player_name
else:
message = "%s player move from (%d,%d) to " % (player_name, row, col)
for i in range(len(movements)):
if i > 0:
message += ","
message += "(%d, %d)" % (movements[i][0], movements[i][1])
write(message)
class Board(object):
# The chess board is represented as a 8x8 2D array
def __init__(self):
self._turn = "w"
self.board = [
symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm,
symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b,
symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm + symbols.b + symbols.bm,
symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b,
symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm,
symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b + symbols.wm + symbols.b,
]
"""
self.board = [
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.bm + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.w + symbols.b + symbols.wm + symbols.b + symbols.w + symbols.b,
symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b + symbols.w,
symbols.w + symbols.b + symbols.wk + symbols.b + symbols.w + symbols.b + symbols.w + symbols.b,
]
"""
self.legal_movements = None
if self.turn() in ai_players:
task_delay()
def legalmoves(self, from_coords, board):
# print(from_coords)
self.legal_movements = get_legal_movements_subprocess(self.board, from_coords[0], from_coords[1])
return self.legal_movements
def is_legal(self, from_coords):
for i in range(0, len(self.legal_movements)):
for j in range(0, len(self.legal_movements[i])):
move = self.legal_movements[i][j]
if move[0] == from_coords[0] and move[1] == from_coords[1]:
return True
return False
def make_movement(self, row1, col1, row2, col2):
for i in range(0, len(self.legal_movements)):
move = self.legal_movements[i][-1]
if move[0] == row2 and move[1] == col2:
write_movement(self.turn(), row1, col1, self.legal_movements[i])
for j in range(0, len(self.legal_movements[i])):
if j == 0:
self.make_simple_movement(row1, col1, self.legal_movements[i][j][0], self.legal_movements[i][j][1])
else:
self.make_simple_movement(self.legal_movements[i][j - 1][0], self.legal_movements[i][j - 1][1], self.legal_movements[i][j][0], self.legal_movements[i][j][1])
PiecesImagesUpdate()
app.update()
sleep(0.33)
return True
return False
def make_movement_ai(self, movements):
if len(movements) == 0:
passes[self.turn()] += 1
write_movement(self.turn(), 0, 0, [])
else:
passes[self.turn()] = 0
print(movements[1:])
write_movement(self.turn(), movements[0][0], movements[0][1], movements[1:])
for j in range(1, len(movements)):
self.make_simple_movement(movements[j - 1][0], movements[j - 1][1], movements[j][0], movements[j][1])
PiecesImagesUpdate()
app.update()
sleep(0.33)
return True
def make_simple_movement(self, row1, col1, row2, col2):
delta_row = sign(row2 - row1)
delta_col = sign(col2 - col1)
ans = deepcopy(self.board)
for i in range(8):
ans[i] = list(ans[i])
c = col1 + delta_col
r = row1 + delta_row
while r != row2:
if opposite(ans[r][c], ans[row1][col1]):
ans[r][c] = color_square(r, c);
break;
r += delta_row
c += delta_col
ans[row2][col2] = ans[row1][col1]
ans[row1][col1] = color_square(row1, col1)
if should_upgrade(ans, row2, col2):
ans = upgrade_to_king(ans, row2, col2)
for i in range(8):
ans[i] = "".join(ans[i])
self.board = ans
for i in range(8):
#ans[i] = "".join(ans[i])
print(self.board[i])
def ai_movement(self):
moves = get_next_move_subprocess(self.board, self.turn())
print(moves)
return moves
def check_mate(self):
whitePieces = 0
blackPieces = 0
if (passes['b'] >= 3 and passes['w'] >= 2) or (passes['b'] >= 2 and passes['w'] >= 3):
return 't'
elif passes['b'] >= 3:
return 'w'
elif passes['w'] >= 3:
return 'b'
for i in range(8):
for j in range(8):
if self.board[i][j] == symbols.wm or self.board[i][j] == symbols.wk:
whitePieces += 1
if self.board[i][j] == symbols.bm or self.board[i][j] == symbols.bk:
blackPieces += 1
if whitePieces > 1 and blackPieces > 1:
return None
if whitePieces == blackPieces:
return "t"
if whitePieces == 0:
return "b"
elif blackPieces == 0:
return "w"
return None
def turn(self):
return self._turn
def next_turn(self):
self._turn = 'w' if self._turn == 'b' else 'b'
board = Board()
class CheckersApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#tk.Tk.iconbitmap(self, default = "chessgame.ico")
tk.Tk.wm_title(self, "Checkers GAMA")
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
global selected, piece_selected, square_selected
selected = False
square_selected = ""
piece_selected = ""
global game_played
game_played = False
self.frames = {}
for F in [Game]:
frame = F(container, self)
self.frames[F] = frame
frame.grid(row = 0, column = 0, sticky = "NSEW")
self.show_frame(Game)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
def Reset_Board(controller):
global selected, square_selected, piece_selected, game_over
game_over = False
selected = False
square_selected = ""
piece_selected = ""
class Game(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
loadimages()
Reset_Board(controller)
grid = Frame(self)
grid.grid(sticky=N + S + E + W, column=0, row=7, columnspan=2)
Grid.rowconfigure(self, 7, weight=1, minsize=70)
Grid.columnconfigure(self, 0, weight=1, minsize=70)
global board, stack, log
global game_played
game_played = True
global Buttons, Colours
Buttons = []
Colours = []
# Import images for pieces
# where
# colour + PIECE + .gif = file name
for x in range(0, 8):
Even = True
if x % 2:
Even = False
Buttons.append([])
Colours.append([])
for y in range(0, 8):
if Even:
btn = tk.Button(self, bg="white", image=Empty)
Colours[x].append("white")
Even = False
else:
btn = tk.Button(self, bg="black", image=Empty)
Colours[x].append("black")
Even = True
if board.board[y][x] != symbols.w and board.board[y][x] != symbols.b:
if board.board[y][x] == symbols.bm:
btn.configure(image=bM)
if board.board[y][x] == symbols.bk:
btn.configure(image=bK)
if board.board[y][x] == symbols.wm:
btn.configure(image=wM)
if board.board[y][x] == symbols.wk:
btn.configure(image=wK)
btn.grid(column=x, row=y, sticky=N + S + E + W)
Buttons[x].append(btn)
for x in range(0, 8):
Grid.columnconfigure(self, x, weight=1, minsize=70)
for y in range(0, 8):
Grid.rowconfigure(self, y, weight=1, minsize=70)
global Turn_var
Turn_var = StringVar()
PlayerTurnLabel = tk.Label(self, textvariable=Turn_var ,font=30)
PlayerTurnLabel.grid(column=10, row=0, sticky=(N,S))
gap = ttk.Label(self, text=" "*25)
gap.grid(column=9, row=0,rowspan=8, sticky=(N,S))
gap2 = ttk.Label(self, text=" "*20)
gap2.grid(column=16, row=1, rowspan=8, sticky=(N,S))
log = Text(self, state='disabled',height=25, width=60, wrap="none",font=26)
log.grid(column=10, row=1, rowspan=6,sticky=(N,S,E,W))
button = ttk.Button(self, text = "Print Board", command = lambda: board.boardshow())
button.grid(column=9, row=7,ipady=10,ipadx=10)
def PiecesImagesUpdate():
for i in range(0, 8):
for j in range(0, 8):
btn = Buttons[i][j]
if board.board[j][i] != symbols.w and board.board[j][i] != symbols.b:
if board.board[j][i] == symbols.bm:
btn.configure(image=bM)
if board.board[j][i] == symbols.bk:
btn.configure(image=bK)
if board.board[j][i] == symbols.wm:
btn.configure(image=wM)
if board.board[j][i] == symbols.wk:
btn.configure(image=wK)
if selected != True:
btn = Buttons[i][j]
colour = Colours[j][i]
if colour == "white":
btn.configure(bg="white")
if colour == "black":
btn.configure(bg="black")
if board.board[j][i] == symbols.w or board.board[j][i] == symbols.b:
btn = Buttons[i][j]
btn.configure(image=Empty)
turn = board.turn()
if turn == "w":
Turn_var.set("White's turn")
if turn == "b":
Turn_var.set("Black's turn")
def click(event):
global selected, square_selected, piece_selected, Playing, game_over
try:
PiecesImagesUpdate()
grid_info = event.widget.grid_info()
z = grid_info["column"]
w = grid_info["row"]
if 0 <= z <= 8 and 0 <= w <= 8:
coords = z,w
Playing = True
except KeyError or AttributeError:
Playing = False
if Playing == True and 0 <= z <= 8 and 0 <= w <= 8 and game_over == False and not (board.turn() in ai_players):
if 0 <= z <= 8 and 0 <= w <= 8:
if selected == True and (square_selected == coords):
try:
selected = False
square_selected = ""
piece_selected = None
except IndexError:
selected = True
elif selected == False:
try:
currentTurn = board.turn()
print(board.board[w][z], w, z)
if board.board[w][z] != symbols.w or board.board[w][z] != symbols.b:
btn = Buttons[z][w]
square_selected = z, w
piece_selected = w, z
if currentTurn == "w":
if board.board[square_selected[1]][square_selected[0]][0] in [symbols.wm, symbols.wk]:
btn.configure(bg=colour_selected)
selected = True
if currentTurn == "b":
if board.board[square_selected[1]][square_selected[0]][0] in [symbols.bm, symbols.bk]:
btn.configure(bg=colour_selected)
selected = True
if selected != True:
square_selected = ""
piece_selected = None
except IndexError:
selected = False
elif selected == True:
if piece_selected != None:
try:
pw, pz = piece_selected
#print("JEEE ", pw, pz, w, z)
if board.is_legal([w, z]):
board.make_movement(pw, pz, w, z)
board.next_turn()
selected = False
square_selected = ""
piece_selected = None
PiecesImagesUpdate()
except AttributeError as e:
print(e)
print(selected, piece_selected)
if selected == True and piece_selected is not None:
from_coords = [square_selected[1], square_selected[0]]
possiblemoves = board.legalmoves(from_coords, board)
#print(possiblemoves)
for i in range(0, len(possiblemoves)):
for j in range(0, len(possiblemoves[i])):
moves = possiblemoves[i][j]
btn = Buttons[moves[1]][moves[0]]
btn.configure(bg=colour_possible_moves)
elif selected == False:
for i in range(0, 8):
for j in range(0, 8):
btn = Buttons[i][j]
colour = Colours[i][j]
if colour == "white":
btn.configure(bg="white")
if colour == "black":
btn.configure(bg="black")
win = board.check_mate()
if win != None:
if win == "b":
write("Black Wins")
game_over = True
if win == "w":
write("White Wins")
game_over = True
if win == "t":
write("Tie")
game_over = True
if not game_over and board.turn() in ai_players:
task_delay()
def loadimages():
global bM, bK
global wM, wK, Empty
# black pieces
bM = PhotoImage(file="Pieces/bm.gif")
bK = PhotoImage(file="Pieces/bk.gif")
# white pieces
wM = PhotoImage(file="Pieces/wm.gif")
wK = PhotoImage(file="Pieces/wk.gif")
Empty = PhotoImage(file="Pieces/Empty.gif")
app = CheckersApp()
app.bind("<Button-1>", click)
app.mainloop() | 0.219923 | 0.105257 |
def load_result_file(path):
results = []
with open(path) as f:
r = Result()
for line in f:
if line.startswith('==='):
results.append(r)
r = Result()
continue
name, val = line.split(':')
if name == 'total ports':
r.total_ports = int(val)
elif name == 'excess ports':
r.excess_ports = int(val)
elif name == 'mean latency (us)':
r.mean_latency_us = float(val)
elif name == 'pkt per sec':
r.pkt_per_sec = float(val)
elif name == 'pkt send failure':
r.send_failure = int(val)
elif name == 'total pkt sent':
r.total_pkt_send = int(val)
elif name == 'bess_drops':
r.bess_drops = int(val)
elif name == 'experiment duration':
r.exp_duration = float(val)
else:
print('unknown name while parsing results file:', name, val)
return results
class Result:
@classmethod
def from_netperf_stdout(cls, txt):
r = Result()
lines = txt.split('\n')
for line in lines:
if 'ran for' in line:
raw = line.split()
t = float(raw[2])
r.exp_duration = t
pkts = int(raw[5])
r.total_pkt_send = pkts
elif line.startswith('client reqs/s'):
raw = line.split()
v = float(raw[2])
r.pkt_per_sec = v
elif line.startswith('mean latency (us):'):
raw = line.split()
v = float(raw[3])
r.mean_latency_us = v
elif line.startswith('send failures:'):
raw = line.split()
v = int(raw[2])
r.send_failure = v
return r
def __init__(self):
self.excess_ports = -1
self.total_ports = -1
self.mean_latency_us = -1
self.pkt_per_sec = -1
self.send_failure = -1
self.total_pkt_send = -1
self.bess_drops = -1
self.exp_duration = -1
def set_excess_ports(self, count):
self.excess_ports = count
self.total_ports = count + 2
def generate_report(self):
txt = '\n'.join([
'total ports: {}'.format(self.total_ports),
'excess ports: {}'.format(self.excess_ports),
'mean latency (us): {}'.format(self.mean_latency_us),
'pkt per sec: {}'.format(self.pkt_per_sec),
'pkt send failure: {}'.format(self.send_failure),
'total pkt sent: {}'.format(self.total_pkt_send),
'bess_drops: {}'.format(self.bess_drops),
'experiment duration: {}'.format(self.exp_duration),
'',
])
return txt
def __repr__(self):
return '<More Ports Exp Result>' | exp/motivation/more_ports_exp/exp_result.py | def load_result_file(path):
results = []
with open(path) as f:
r = Result()
for line in f:
if line.startswith('==='):
results.append(r)
r = Result()
continue
name, val = line.split(':')
if name == 'total ports':
r.total_ports = int(val)
elif name == 'excess ports':
r.excess_ports = int(val)
elif name == 'mean latency (us)':
r.mean_latency_us = float(val)
elif name == 'pkt per sec':
r.pkt_per_sec = float(val)
elif name == 'pkt send failure':
r.send_failure = int(val)
elif name == 'total pkt sent':
r.total_pkt_send = int(val)
elif name == 'bess_drops':
r.bess_drops = int(val)
elif name == 'experiment duration':
r.exp_duration = float(val)
else:
print('unknown name while parsing results file:', name, val)
return results
class Result:
@classmethod
def from_netperf_stdout(cls, txt):
r = Result()
lines = txt.split('\n')
for line in lines:
if 'ran for' in line:
raw = line.split()
t = float(raw[2])
r.exp_duration = t
pkts = int(raw[5])
r.total_pkt_send = pkts
elif line.startswith('client reqs/s'):
raw = line.split()
v = float(raw[2])
r.pkt_per_sec = v
elif line.startswith('mean latency (us):'):
raw = line.split()
v = float(raw[3])
r.mean_latency_us = v
elif line.startswith('send failures:'):
raw = line.split()
v = int(raw[2])
r.send_failure = v
return r
def __init__(self):
self.excess_ports = -1
self.total_ports = -1
self.mean_latency_us = -1
self.pkt_per_sec = -1
self.send_failure = -1
self.total_pkt_send = -1
self.bess_drops = -1
self.exp_duration = -1
def set_excess_ports(self, count):
self.excess_ports = count
self.total_ports = count + 2
def generate_report(self):
txt = '\n'.join([
'total ports: {}'.format(self.total_ports),
'excess ports: {}'.format(self.excess_ports),
'mean latency (us): {}'.format(self.mean_latency_us),
'pkt per sec: {}'.format(self.pkt_per_sec),
'pkt send failure: {}'.format(self.send_failure),
'total pkt sent: {}'.format(self.total_pkt_send),
'bess_drops: {}'.format(self.bess_drops),
'experiment duration: {}'.format(self.exp_duration),
'',
])
return txt
def __repr__(self):
return '<More Ports Exp Result>' | 0.408631 | 0.194349 |
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
from sklearn import metrics
import pandas as pd
import numpy as np
import seaborn as sns
import os
import matplotlib.pyplot as plt
from scipy import stats
diabetes = load_diabetes()
diabetes_df = pd.DataFrame(data=np.c_[diabetes.data, diabetes.target], columns=diabetes.feature_names + ['target'])
diabetes_df.columns = ['Age', 'Sex', 'BMI', 'BP', 'map', 'tc', 'ldl', 'hdl', 'tch', 'glu', 'Target']
encoded_sex = pd.get_dummies(diabetes_df['Sex'], drop_first=True)
diabetes_df = pd.concat([diabetes_df, encoded_sex], axis=1)
diabetes_df.rename(columns = {list(diabetes_df)[11]: "Encoded Sex"}, inplace=True)
diabetes_df.drop(['Sex'], axis=1, inplace=True)
z = np.abs(stats.zscore(diabetes_df))
diabetes_df_o = diabetes_df[(z < 3).all(axis=1)]
print(diabetes_df.shape)
print(diabetes_df_o.shape)
X = diabetes_df_o.loc[:, ['Age', 'BMI', 'BP', 'map', 'tc', 'ldl', 'hdl', 'tch', 'glu', 'Encoded Sex']]
y = diabetes_df_o['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train, y_train)
print(f"Intercept: {lm.intercept_}\n")
print(f"Coeficients: {lm.coef_}\n")
print(f"Named Coeficients: {pd.DataFrame(lm.coef_, X.columns)}")
pd.DataFrame(lm.coef_, X.columns).to_csv("Linear Regression Coefficients")
predicted_values = lm.predict(X_test)
os.makedirs('plots/', exist_ok=True)
sns.set(palette="Paired")
residuals = y_test - predicted_values
sns.scatterplot(y_test, predicted_values, marker="H")
plt.plot([0, 300], [0, 300], ':', linewidth=2.0, color='g')
plt.xlabel('Real Value')
plt.ylabel('Predicted Value')
plt.title('Linear Regression Real Value vs Predicted Values')
plt.savefig('plots/Linear_Predicted.png')
plt.clf()
sns.scatterplot(y_test, residuals, marker=5)
plt.plot([300, 0], [0, 0], ':',linewidth=2.0, color='g')
plt.xlabel('Real Value')
plt.ylabel('Residuals')
plt.title('Linear Regression Real Value vs Residuals')
plt.savefig('plots/Linear_Residuals.png')
plt.clf()
sns.distplot(residuals, bins=20, kde=False)
plt.plot([0, 0], [50, 0], ':', linewidth=2.0, )
plt.title('Linear Regression Residual Distribution', color='g')
plt.savefig('plots/Linear_Residual_Distn.png')
plt.clf()
print(f"MAE error(avg abs residual): {metrics.mean_absolute_error(y_test, predicted_values)}")
print(f"MSE error: {metrics.mean_squared_error(y_test, predicted_values)}")
print(f"RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}") | Python_Scripts/linear_regression.py | from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
from sklearn import metrics
import pandas as pd
import numpy as np
import seaborn as sns
import os
import matplotlib.pyplot as plt
from scipy import stats
diabetes = load_diabetes()
diabetes_df = pd.DataFrame(data=np.c_[diabetes.data, diabetes.target], columns=diabetes.feature_names + ['target'])
diabetes_df.columns = ['Age', 'Sex', 'BMI', 'BP', 'map', 'tc', 'ldl', 'hdl', 'tch', 'glu', 'Target']
encoded_sex = pd.get_dummies(diabetes_df['Sex'], drop_first=True)
diabetes_df = pd.concat([diabetes_df, encoded_sex], axis=1)
diabetes_df.rename(columns = {list(diabetes_df)[11]: "Encoded Sex"}, inplace=True)
diabetes_df.drop(['Sex'], axis=1, inplace=True)
z = np.abs(stats.zscore(diabetes_df))
diabetes_df_o = diabetes_df[(z < 3).all(axis=1)]
print(diabetes_df.shape)
print(diabetes_df_o.shape)
X = diabetes_df_o.loc[:, ['Age', 'BMI', 'BP', 'map', 'tc', 'ldl', 'hdl', 'tch', 'glu', 'Encoded Sex']]
y = diabetes_df_o['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train, y_train)
print(f"Intercept: {lm.intercept_}\n")
print(f"Coeficients: {lm.coef_}\n")
print(f"Named Coeficients: {pd.DataFrame(lm.coef_, X.columns)}")
pd.DataFrame(lm.coef_, X.columns).to_csv("Linear Regression Coefficients")
predicted_values = lm.predict(X_test)
os.makedirs('plots/', exist_ok=True)
sns.set(palette="Paired")
residuals = y_test - predicted_values
sns.scatterplot(y_test, predicted_values, marker="H")
plt.plot([0, 300], [0, 300], ':', linewidth=2.0, color='g')
plt.xlabel('Real Value')
plt.ylabel('Predicted Value')
plt.title('Linear Regression Real Value vs Predicted Values')
plt.savefig('plots/Linear_Predicted.png')
plt.clf()
sns.scatterplot(y_test, residuals, marker=5)
plt.plot([300, 0], [0, 0], ':',linewidth=2.0, color='g')
plt.xlabel('Real Value')
plt.ylabel('Residuals')
plt.title('Linear Regression Real Value vs Residuals')
plt.savefig('plots/Linear_Residuals.png')
plt.clf()
sns.distplot(residuals, bins=20, kde=False)
plt.plot([0, 0], [50, 0], ':', linewidth=2.0, )
plt.title('Linear Regression Residual Distribution', color='g')
plt.savefig('plots/Linear_Residual_Distn.png')
plt.clf()
print(f"MAE error(avg abs residual): {metrics.mean_absolute_error(y_test, predicted_values)}")
print(f"MSE error: {metrics.mean_squared_error(y_test, predicted_values)}")
print(f"RMSE error: {np.sqrt(metrics.mean_squared_error(y_test, predicted_values))}") | 0.629319 | 0.670258 |
import numpy
import os
folder_gdsc = os.path.dirname(__file__)+"/"
gdsc_file = folder_gdsc+"ic50_excl_empty_filtered_cell_lines_drugs.txt"
gdsc_file_std = folder_gdsc+"ic50_excl_empty_filtered_cell_lines_drugs_standardised.txt"
def load_gdsc(location=None,standardised=False,sep=","):
"""
Load in data. We get a masked array, and set masked values to 0.
Returns:
X Drug sensitivity values (original)
X_min Drug sensitivity values, minus (the lowest value in the dataset + 1)
M Mask of known vs unknown values
drug_names List of drug names
cell_lines List of which cell lines they are
cancer_types List of the cancer types of the cell lines
tissues List of tissue types of the cell lines
"""
if location:
fin = location
else:
fin = gdsc_file if not standardised else gdsc_file_std
lines = [line.split("\n")[0].split("\r")[0].split(sep) for line in open(fin,'r').readlines()]
drug_names = lines[0][3:]
cell_lines = []
cancer_types = []
tissues = []
X = []
M = []
for line in lines[1:]:
cell_lines.append(line[0])
cancer_types.append(line[1])
tissues.append(line[2])
X.append([float(v) if v != '' else 0.0 for v in line[3:]])
M.append([1.0 if v != '' else 0.0 for v in line[3:]])
X = numpy.array(X,dtype=float)
M = numpy.array(M,dtype=float)
minimum = X.min()-1
X_min = []
for row,row_M in zip(X,M):
X_min.append([v-minimum if m else 0.0 for v,m in zip(row,row_M)])
X_min = numpy.array(X_min,dtype=float)
return (X,X_min,M,drug_names,cell_lines,cancer_types,tissues)
def negate_gdsc(X,M):
''' Take in the Sanger dataset, take the negative of all values, and shift
to positive values (+1), for interpretability. '''
lowest_value = 0
X = -X
minimum = X.min()-lowest_value
X_min = []
for row,row_M in zip(X,M):
X_min.append([v-minimum if m else 0.0 for v,m in zip(row,row_M)])
X_min = numpy.array(X_min,dtype=float)
return X_min
def store_gdsc(location,X,M,drug_names,cell_lines,cancer_types,tissues):
''' Store the data X. First line is drug names, then comes the data.
For the data, first column is cell line name, second is cancer type,
third is tissue, then follows the drug sensitivity values.
For missing values we store nothing '''
fout = open(location,'w')
fout.write("Cell Line\tCancer Type\tTissue\t" + "\t".join(drug_names) + "\n")
for i,(cell_line,cancer_type,tissue,row) in enumerate(zip(cell_lines,cancer_types,tissues,X)):
line = cell_line+"\t"+cancer_type+"\t"+tissue+"\t"
data = [str(val) if M[i][j] else "" for (j,val) in enumerate(row)]
line += "\t".join(data) + "\n"
fout.write(line)
fout.close()
def load_kernels(folder,file_names):
''' Load in all the files specified in the list <file_names> in <folder>,
and return as a list, along with the drug/cell line names.'''
kernels = []
for name in file_names:
lines = open(folder+name,'r').readlines()
#entity_names = lines[0]
values = [line.split("\t") for line in lines[1:]]
kernel = numpy.array(values,dtype=float)
kernels.append(kernel)
return kernels
def load_features(location,delim="\t"):
''' Load in the features at the specified location, ignoring the first
row (column names) and column (row names). '''
lines = open(location,'r').readlines()
lines = numpy.array([line.split("\n")[0].split(delim) for line in lines[1:]])
values = numpy.array(lines[0:,1:],dtype=float)
return (values)
'''
(X,X_min,M,drug_names,cell_lines,cancer_types,tissues) = load_gdsc()
(I,J)= X.shape
print I,J
print I*J, M.sum(), M.sum()/(I*J)
''' | data_drug_sensitivity/gdsc/load_data.py | import numpy
import os
folder_gdsc = os.path.dirname(__file__)+"/"
gdsc_file = folder_gdsc+"ic50_excl_empty_filtered_cell_lines_drugs.txt"
gdsc_file_std = folder_gdsc+"ic50_excl_empty_filtered_cell_lines_drugs_standardised.txt"
def load_gdsc(location=None,standardised=False,sep=","):
"""
Load in data. We get a masked array, and set masked values to 0.
Returns:
X Drug sensitivity values (original)
X_min Drug sensitivity values, minus (the lowest value in the dataset + 1)
M Mask of known vs unknown values
drug_names List of drug names
cell_lines List of which cell lines they are
cancer_types List of the cancer types of the cell lines
tissues List of tissue types of the cell lines
"""
if location:
fin = location
else:
fin = gdsc_file if not standardised else gdsc_file_std
lines = [line.split("\n")[0].split("\r")[0].split(sep) for line in open(fin,'r').readlines()]
drug_names = lines[0][3:]
cell_lines = []
cancer_types = []
tissues = []
X = []
M = []
for line in lines[1:]:
cell_lines.append(line[0])
cancer_types.append(line[1])
tissues.append(line[2])
X.append([float(v) if v != '' else 0.0 for v in line[3:]])
M.append([1.0 if v != '' else 0.0 for v in line[3:]])
X = numpy.array(X,dtype=float)
M = numpy.array(M,dtype=float)
minimum = X.min()-1
X_min = []
for row,row_M in zip(X,M):
X_min.append([v-minimum if m else 0.0 for v,m in zip(row,row_M)])
X_min = numpy.array(X_min,dtype=float)
return (X,X_min,M,drug_names,cell_lines,cancer_types,tissues)
def negate_gdsc(X,M):
''' Take in the Sanger dataset, take the negative of all values, and shift
to positive values (+1), for interpretability. '''
lowest_value = 0
X = -X
minimum = X.min()-lowest_value
X_min = []
for row,row_M in zip(X,M):
X_min.append([v-minimum if m else 0.0 for v,m in zip(row,row_M)])
X_min = numpy.array(X_min,dtype=float)
return X_min
def store_gdsc(location,X,M,drug_names,cell_lines,cancer_types,tissues):
''' Store the data X. First line is drug names, then comes the data.
For the data, first column is cell line name, second is cancer type,
third is tissue, then follows the drug sensitivity values.
For missing values we store nothing '''
fout = open(location,'w')
fout.write("Cell Line\tCancer Type\tTissue\t" + "\t".join(drug_names) + "\n")
for i,(cell_line,cancer_type,tissue,row) in enumerate(zip(cell_lines,cancer_types,tissues,X)):
line = cell_line+"\t"+cancer_type+"\t"+tissue+"\t"
data = [str(val) if M[i][j] else "" for (j,val) in enumerate(row)]
line += "\t".join(data) + "\n"
fout.write(line)
fout.close()
def load_kernels(folder,file_names):
''' Load in all the files specified in the list <file_names> in <folder>,
and return as a list, along with the drug/cell line names.'''
kernels = []
for name in file_names:
lines = open(folder+name,'r').readlines()
#entity_names = lines[0]
values = [line.split("\t") for line in lines[1:]]
kernel = numpy.array(values,dtype=float)
kernels.append(kernel)
return kernels
def load_features(location,delim="\t"):
''' Load in the features at the specified location, ignoring the first
row (column names) and column (row names). '''
lines = open(location,'r').readlines()
lines = numpy.array([line.split("\n")[0].split(delim) for line in lines[1:]])
values = numpy.array(lines[0:,1:],dtype=float)
return (values)
'''
(X,X_min,M,drug_names,cell_lines,cancer_types,tissues) = load_gdsc()
(I,J)= X.shape
print I,J
print I*J, M.sum(), M.sum()/(I*J)
''' | 0.305801 | 0.442576 |
import pandas as pd
import numpy as np
import MAIN.Basics as basics
import MAIN.Reinforcement as RL
import tensorflow as tf
import seaborn as sns
import matplotlib.pyplot as plt
from UTIL import FileIO
from STRATEGY.Cointegration import EGCointegration
# Read config
config_path = 'CONFIG\config_train.yml'
config_train = FileIO.read_yaml(config_path)
# Read prices
x = pd.read_csv('STATICS\PRICE\JNJ.csv')
y = pd.read_csv('STATICS\PRICE\PG.csv')
x, y = EGCointegration.clean_data(x, y, 'date', 'close')
# Separate training and testing sets
train_pct = 0.7
train_len = round(len(x) * 0.7)
idx_train = list(range(0, train_len))
idx_test = list(range(train_len, len(x)))
EG_Train = EGCointegration(x.iloc[idx_train, :], y.iloc[idx_train, :], 'date', 'close')
EG_Test = EGCointegration(x.iloc[idx_test, :], y.iloc[idx_test, :], 'date', 'close')
# Create action space
n_hist = list(np.arange(60, 601, 60))
n_forward = list(np.arange(120, 1201, 120))
trade_th = list(np.arange(1, 5.1, 1))
stop_loss = list(np.arange(1, 2.1, 0.5))
cl = list(np.arange(0.05, 0.11, 0.05))
actions = {'n_hist': n_hist,
'n_forward': n_forward,
'trade_th': trade_th,
'stop_loss': stop_loss,
'cl': cl}
n_action = int(np.product([len(actions[key]) for key in actions.keys()]))
# Create state space
transaction_cost = [0.001]
states = {'transaction_cost': transaction_cost}
n_state = len(states)
# Assign state and action spaces to config
config_train['StateSpaceState'] = states
config_train['ActionSpaceAction'] = actions
# Create and build network
one_hot = {'one_hot': {'func_name': 'one_hot',
'input_arg': 'indices',
'layer_para': {'indices': None,
'depth': n_state}}}
output_layer = {'final': {'func_name': 'fully_connected',
'input_arg': 'inputs',
'layer_para': {'inputs': None,
'num_outputs': n_action,
'biases_initializer': None,
'activation_fn': tf.nn.relu,
'weights_initializer': tf.ones_initializer()}}}
state_in = tf.placeholder(shape=[1], dtype=tf.int32)
N = basics.Network(state_in)
N.build_layers(one_hot)
N.add_layer_duplicates(output_layer, 1)
# Create learning object and perform training
RL_Train = RL.ContextualBandit(N, config_train, EG_Train)
sess = tf.Session()
RL_Train.process(sess, save=False, restore=False)
# Extract training results
action = RL_Train.recorder.record['NETWORK_ACTION']
reward = RL_Train.recorder.record['ENGINE_REWARD']
print(np.mean(reward))
df1 = pd.DataFrame()
df1['action'] = action
df1['reward'] = reward
mean_reward = df1.groupby('action').mean()
sns.distplot(mean_reward)
# Test by trading continuously
[opt_action] = sess.run([RL_Train.output], feed_dict=RL_Train.feed_dict)
opt_action = np.argmax(opt_action)
action_dict = RL_Train.action_space.convert(opt_action, 'index_to_dict')
indices = range(601, len(EG_Test.x) - 1200)
pnl = pd.DataFrame()
pnl['Time'] = EG_Test.timestamp
pnl['Trade_Profit'] = 0
pnl['Cost'] = 0
pnl['N_Trade'] = 0
import warnings
warnings.filterwarnings('ignore')
for i in indices:
if i % 100 == 0:
print(i)
EG_Test.process(index=i, transaction_cost=0.001, **action_dict)
trade_record = EG_Test.record
if (trade_record is not None) and (len(trade_record) > 0):
print('value at {}'.format(i))
trade_record = pd.DataFrame(trade_record)
trade_cost = trade_record.groupby('trade_time')['trade_cost'].sum()
close_cost = trade_record.groupby('close_time')['close_cost'].sum()
profit = trade_record.groupby('close_time')['profit'].sum()
open_pos = trade_record.groupby('trade_time')['long_short'].sum()
close_pos = trade_record.groupby('close_time')['long_short'].sum() * -1
pnl['Cost'].loc[pnl['Time'].isin(trade_cost.index)] += trade_cost.values
pnl['Cost'].loc[pnl['Time'].isin(close_cost.index)] += close_cost.values
pnl['Trade_Profit'].loc[pnl['Time'].isin(close_cost.index)] += profit.values
pnl['N_Trade'].loc[pnl['Time'].isin(trade_cost.index)] += open_pos.values
pnl['N_Trade'].loc[pnl['Time'].isin(close_cost.index)] += close_pos.values
warnings.filterwarnings(action='once')
# Plot the testing result
pnl['PnL'] = (pnl['Trade_Profit'] - pnl['Cost']).cumsum()
plt.plot(pnl['PnL'])
plt.plot(pnl['N_Trade'])
plt.plot(pnl['Time'], pnl['PnL'])
plt.plot(pnl['Time'], pnl['N_Trade'])
sess.close() | EXAMPLE/RunningScript.py | import pandas as pd
import numpy as np
import MAIN.Basics as basics
import MAIN.Reinforcement as RL
import tensorflow as tf
import seaborn as sns
import matplotlib.pyplot as plt
from UTIL import FileIO
from STRATEGY.Cointegration import EGCointegration
# Read config
config_path = 'CONFIG\config_train.yml'
config_train = FileIO.read_yaml(config_path)
# Read prices
x = pd.read_csv('STATICS\PRICE\JNJ.csv')
y = pd.read_csv('STATICS\PRICE\PG.csv')
x, y = EGCointegration.clean_data(x, y, 'date', 'close')
# Separate training and testing sets
train_pct = 0.7
train_len = round(len(x) * 0.7)
idx_train = list(range(0, train_len))
idx_test = list(range(train_len, len(x)))
EG_Train = EGCointegration(x.iloc[idx_train, :], y.iloc[idx_train, :], 'date', 'close')
EG_Test = EGCointegration(x.iloc[idx_test, :], y.iloc[idx_test, :], 'date', 'close')
# Create action space
n_hist = list(np.arange(60, 601, 60))
n_forward = list(np.arange(120, 1201, 120))
trade_th = list(np.arange(1, 5.1, 1))
stop_loss = list(np.arange(1, 2.1, 0.5))
cl = list(np.arange(0.05, 0.11, 0.05))
actions = {'n_hist': n_hist,
'n_forward': n_forward,
'trade_th': trade_th,
'stop_loss': stop_loss,
'cl': cl}
n_action = int(np.product([len(actions[key]) for key in actions.keys()]))
# Create state space
transaction_cost = [0.001]
states = {'transaction_cost': transaction_cost}
n_state = len(states)
# Assign state and action spaces to config
config_train['StateSpaceState'] = states
config_train['ActionSpaceAction'] = actions
# Create and build network
one_hot = {'one_hot': {'func_name': 'one_hot',
'input_arg': 'indices',
'layer_para': {'indices': None,
'depth': n_state}}}
output_layer = {'final': {'func_name': 'fully_connected',
'input_arg': 'inputs',
'layer_para': {'inputs': None,
'num_outputs': n_action,
'biases_initializer': None,
'activation_fn': tf.nn.relu,
'weights_initializer': tf.ones_initializer()}}}
state_in = tf.placeholder(shape=[1], dtype=tf.int32)
N = basics.Network(state_in)
N.build_layers(one_hot)
N.add_layer_duplicates(output_layer, 1)
# Create learning object and perform training
RL_Train = RL.ContextualBandit(N, config_train, EG_Train)
sess = tf.Session()
RL_Train.process(sess, save=False, restore=False)
# Extract training results
action = RL_Train.recorder.record['NETWORK_ACTION']
reward = RL_Train.recorder.record['ENGINE_REWARD']
print(np.mean(reward))
df1 = pd.DataFrame()
df1['action'] = action
df1['reward'] = reward
mean_reward = df1.groupby('action').mean()
sns.distplot(mean_reward)
# Test by trading continuously
[opt_action] = sess.run([RL_Train.output], feed_dict=RL_Train.feed_dict)
opt_action = np.argmax(opt_action)
action_dict = RL_Train.action_space.convert(opt_action, 'index_to_dict')
indices = range(601, len(EG_Test.x) - 1200)
pnl = pd.DataFrame()
pnl['Time'] = EG_Test.timestamp
pnl['Trade_Profit'] = 0
pnl['Cost'] = 0
pnl['N_Trade'] = 0
import warnings
warnings.filterwarnings('ignore')
for i in indices:
if i % 100 == 0:
print(i)
EG_Test.process(index=i, transaction_cost=0.001, **action_dict)
trade_record = EG_Test.record
if (trade_record is not None) and (len(trade_record) > 0):
print('value at {}'.format(i))
trade_record = pd.DataFrame(trade_record)
trade_cost = trade_record.groupby('trade_time')['trade_cost'].sum()
close_cost = trade_record.groupby('close_time')['close_cost'].sum()
profit = trade_record.groupby('close_time')['profit'].sum()
open_pos = trade_record.groupby('trade_time')['long_short'].sum()
close_pos = trade_record.groupby('close_time')['long_short'].sum() * -1
pnl['Cost'].loc[pnl['Time'].isin(trade_cost.index)] += trade_cost.values
pnl['Cost'].loc[pnl['Time'].isin(close_cost.index)] += close_cost.values
pnl['Trade_Profit'].loc[pnl['Time'].isin(close_cost.index)] += profit.values
pnl['N_Trade'].loc[pnl['Time'].isin(trade_cost.index)] += open_pos.values
pnl['N_Trade'].loc[pnl['Time'].isin(close_cost.index)] += close_pos.values
warnings.filterwarnings(action='once')
# Plot the testing result
pnl['PnL'] = (pnl['Trade_Profit'] - pnl['Cost']).cumsum()
plt.plot(pnl['PnL'])
plt.plot(pnl['N_Trade'])
plt.plot(pnl['Time'], pnl['PnL'])
plt.plot(pnl['Time'], pnl['N_Trade'])
sess.close() | 0.488771 | 0.288043 |
import tensorflow as tf
from tensorflow.python.keras.layers import Flatten, Dense, Conv2D, Dropout, BatchNormalization
from abc import ABC, abstractmethod
import numpy as np
import warnings
def dqn_mask_loss(batch_data, y_pred):
# The target is defined only for the action that was taken during the replay, hence the loss is computed based
# only on this action's output
batch_actions = tf.dtypes.cast(batch_data[:, 1], tf.int32)
batch_true_q_values = batch_data[:, 0]
mask = tf.one_hot(batch_actions, depth=y_pred.shape[1], dtype=tf.bool, on_value=True, off_value=False)
batch_predicted_q_values = tf.boolean_mask(y_pred, mask)
return tf.keras.losses.Huber()(batch_true_q_values, batch_predicted_q_values)
class _Net(ABC):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
super(_Net, self).__init__()
self.n_actions = n_actions
self.trainable = trainable
self.encoding = encoding
self.n_players = n_players
self.input_shape = self.get_input_shape_from_encoding(input_shape, self.encoding, self.n_players)
self.model = self.init_model()
@staticmethod
def get_input_shape_from_encoding(input_shape, encoding, n_players):
if encoding == '2d':
if len(input_shape) == 2:
return input_shape
else:
raise ValueError("Encoding is '2d' but len(input_shape) != 2")
elif encoding == '3d':
if len(input_shape) == 2:
warnings.warn("Encoding is '3d', but len(input_shape) == 2")
new_input_shape = input_shape[0], input_shape[1], n_players
warnings.warn("Adding third dimension from n_players, new input_shape={}".format(new_input_shape))
return new_input_shape
@abstractmethod
def init_model(self):
raise NotImplementedError
@staticmethod
def process_input(x, encoding, n_players):
if encoding == '3d' and len(x.shape) != 4:
processed_input = np.zeros((x.shape[0], x.shape[1], x.shape[2], n_players))
for player_id in [1, 2]:
processed_input[:, :, :, player_id-1][np.nonzero(x==player_id)] = 1
return processed_input
else:
return x
class CFDense(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
super(CFDense, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Flatten(input_shape=self.input_shape))
model.add(Dense(24, activation='relu', trainable=self.trainable))
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='Adam', metrics=['accuracy'])
return model
class CFDense2(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
super(CFDense2, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Flatten(input_shape=self.input_shape))
model.add(Dense(84, activation='relu', trainable=self.trainable))
model.add(Dense(168, activation='relu', trainable=self.trainable))
model.add(Dense(64, activation='relu', trainable=self.trainable))
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='RMSprop', metrics=['accuracy'])
return model
class CFConv1(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
if encoding == '2d':
raise ValueError("Cannot instantiate CFConv1 net with encoding '2d'")
super(CFConv1, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Conv2D(3, kernel_size=3, trainable=self.trainable, input_shape=self.input_shape))
model.add(Dropout(0.5, trainable=self.trainable))
model.add(Flatten())
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='Adam', metrics=['accuracy'])
return model
class CFConv2(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
if encoding == '2d':
raise ValueError("Cannot instantiate CFConv2 net with encoding '2d'")
super(CFConv2, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Conv2D(4, kernel_size=3, padding='same', trainable=self.trainable, input_shape=self.input_shape))
model.add(BatchNormalization(trainable=self.trainable))
model.add(Conv2D(16, kernel_size=3, padding='same', trainable=self.trainable))
model.add(BatchNormalization(trainable=self.trainable))
model.add(Conv2D(32, kernel_size=3, padding='same', trainable=self.trainable))
model.add(BatchNormalization(trainable=self.trainable))
model.add(Flatten())
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='RMSprop', metrics=['accuracy'])
return model | src/main/models/nets.py | import tensorflow as tf
from tensorflow.python.keras.layers import Flatten, Dense, Conv2D, Dropout, BatchNormalization
from abc import ABC, abstractmethod
import numpy as np
import warnings
def dqn_mask_loss(batch_data, y_pred):
# The target is defined only for the action that was taken during the replay, hence the loss is computed based
# only on this action's output
batch_actions = tf.dtypes.cast(batch_data[:, 1], tf.int32)
batch_true_q_values = batch_data[:, 0]
mask = tf.one_hot(batch_actions, depth=y_pred.shape[1], dtype=tf.bool, on_value=True, off_value=False)
batch_predicted_q_values = tf.boolean_mask(y_pred, mask)
return tf.keras.losses.Huber()(batch_true_q_values, batch_predicted_q_values)
class _Net(ABC):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
super(_Net, self).__init__()
self.n_actions = n_actions
self.trainable = trainable
self.encoding = encoding
self.n_players = n_players
self.input_shape = self.get_input_shape_from_encoding(input_shape, self.encoding, self.n_players)
self.model = self.init_model()
@staticmethod
def get_input_shape_from_encoding(input_shape, encoding, n_players):
if encoding == '2d':
if len(input_shape) == 2:
return input_shape
else:
raise ValueError("Encoding is '2d' but len(input_shape) != 2")
elif encoding == '3d':
if len(input_shape) == 2:
warnings.warn("Encoding is '3d', but len(input_shape) == 2")
new_input_shape = input_shape[0], input_shape[1], n_players
warnings.warn("Adding third dimension from n_players, new input_shape={}".format(new_input_shape))
return new_input_shape
@abstractmethod
def init_model(self):
raise NotImplementedError
@staticmethod
def process_input(x, encoding, n_players):
if encoding == '3d' and len(x.shape) != 4:
processed_input = np.zeros((x.shape[0], x.shape[1], x.shape[2], n_players))
for player_id in [1, 2]:
processed_input[:, :, :, player_id-1][np.nonzero(x==player_id)] = 1
return processed_input
else:
return x
class CFDense(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
super(CFDense, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Flatten(input_shape=self.input_shape))
model.add(Dense(24, activation='relu', trainable=self.trainable))
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='Adam', metrics=['accuracy'])
return model
class CFDense2(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
super(CFDense2, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Flatten(input_shape=self.input_shape))
model.add(Dense(84, activation='relu', trainable=self.trainable))
model.add(Dense(168, activation='relu', trainable=self.trainable))
model.add(Dense(64, activation='relu', trainable=self.trainable))
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='RMSprop', metrics=['accuracy'])
return model
class CFConv1(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
if encoding == '2d':
raise ValueError("Cannot instantiate CFConv1 net with encoding '2d'")
super(CFConv1, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Conv2D(3, kernel_size=3, trainable=self.trainable, input_shape=self.input_shape))
model.add(Dropout(0.5, trainable=self.trainable))
model.add(Flatten())
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='Adam', metrics=['accuracy'])
return model
class CFConv2(_Net):
def __init__(self, n_actions, input_shape, trainable, encoding, n_players):
if encoding == '2d':
raise ValueError("Cannot instantiate CFConv2 net with encoding '2d'")
super(CFConv2, self).__init__(n_actions, input_shape, trainable, encoding, n_players)
def init_model(self):
model = tf.keras.Sequential()
model.add(Conv2D(4, kernel_size=3, padding='same', trainable=self.trainable, input_shape=self.input_shape))
model.add(BatchNormalization(trainable=self.trainable))
model.add(Conv2D(16, kernel_size=3, padding='same', trainable=self.trainable))
model.add(BatchNormalization(trainable=self.trainable))
model.add(Conv2D(32, kernel_size=3, padding='same', trainable=self.trainable))
model.add(BatchNormalization(trainable=self.trainable))
model.add(Flatten())
model.add(Dense(self.n_actions, activation='softmax', trainable=self.trainable))
model.compile(loss=dqn_mask_loss, optimizer='RMSprop', metrics=['accuracy'])
return model | 0.90662 | 0.403596 |
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as tfk
import tensorflow.keras.layers as tfkl
import tensorflow.keras.models as tfkm
class GENERICorama(object):
"""Generic panorama generator.
"""
def __init__(self, dataset,
BATCH_SIZE = 64, test_size = 0.25,
latent_dim = 100):
dataset = np.asarray(dataset)
assert len(dataset.shape) == 4
assert dataset.shape[-1] == 3 #3 channels
self.dimensions = dataset.shape[1:3]
self.dimensions[0] % 4 == 0
self.dimensions[1] % 4 == 0
self.dimensions[0] >= 8 == 0
self.dimensions[1] >= 8 == 0
assert type(BATCH_SIZE) == int
self.BATCH_SIZE = BATCH_SIZE
assert type(latent_dim) == int
self.latent_dim = latent_dim
train, test = train_test_split(dataset, test_size = test_size)
self.train_dataset = tf.data.Dataset.from_tensor_slices(
train).batch(self.BATCH_SIZE)
self.test_dataset = tf.data.Dataset.from_tensor_slices(
test).batch(self.BATCH_SIZE)
#Attributes to track loss
self.BEST_LOSS = -1e99
self.reset_optimizer()
self.create_model()
def reset_optimizer(self, opt = tfk.optimizers.Adam):
"""Reset the optimizer attached to this generator.
Args:
opt (`tensorflow.keras.optimizers`): default is `Adam`
"""
self.optimizer = opt(1e-4)
return
def save_model(self, epoch, loss, recon, kl, save_path = "./saved_models/"):
"""Write logs and save the model"""
train_summary_writer = tf.summary.create_file_writer(save_path)
with train_summary_writer.as_default():
tf.summary.scalar("Total Loss", loss, step=epoch)
tf.summary.scalar("KL Divergence", kl, step=epoch)
tf.summary.scalar("Reconstruction Loss", recon, step=epoch)
# save model
if loss < self.BEST_LOSS: # pragma: no cover
self.BEST_LOSS = loss
if self.model is not None:
self.model.save(save_path+"BEST_MODEL")
if self.model is not None: # pragma: no cover
self.model.save(save_path)
def create_model(self):
"""Create the generative model.
"""
self.model = None
pass | PanoramAI/generic.py | import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as tfk
import tensorflow.keras.layers as tfkl
import tensorflow.keras.models as tfkm
class GENERICorama(object):
"""Generic panorama generator.
"""
def __init__(self, dataset,
BATCH_SIZE = 64, test_size = 0.25,
latent_dim = 100):
dataset = np.asarray(dataset)
assert len(dataset.shape) == 4
assert dataset.shape[-1] == 3 #3 channels
self.dimensions = dataset.shape[1:3]
self.dimensions[0] % 4 == 0
self.dimensions[1] % 4 == 0
self.dimensions[0] >= 8 == 0
self.dimensions[1] >= 8 == 0
assert type(BATCH_SIZE) == int
self.BATCH_SIZE = BATCH_SIZE
assert type(latent_dim) == int
self.latent_dim = latent_dim
train, test = train_test_split(dataset, test_size = test_size)
self.train_dataset = tf.data.Dataset.from_tensor_slices(
train).batch(self.BATCH_SIZE)
self.test_dataset = tf.data.Dataset.from_tensor_slices(
test).batch(self.BATCH_SIZE)
#Attributes to track loss
self.BEST_LOSS = -1e99
self.reset_optimizer()
self.create_model()
def reset_optimizer(self, opt = tfk.optimizers.Adam):
"""Reset the optimizer attached to this generator.
Args:
opt (`tensorflow.keras.optimizers`): default is `Adam`
"""
self.optimizer = opt(1e-4)
return
def save_model(self, epoch, loss, recon, kl, save_path = "./saved_models/"):
"""Write logs and save the model"""
train_summary_writer = tf.summary.create_file_writer(save_path)
with train_summary_writer.as_default():
tf.summary.scalar("Total Loss", loss, step=epoch)
tf.summary.scalar("KL Divergence", kl, step=epoch)
tf.summary.scalar("Reconstruction Loss", recon, step=epoch)
# save model
if loss < self.BEST_LOSS: # pragma: no cover
self.BEST_LOSS = loss
if self.model is not None:
self.model.save(save_path+"BEST_MODEL")
if self.model is not None: # pragma: no cover
self.model.save(save_path)
def create_model(self):
"""Create the generative model.
"""
self.model = None
pass | 0.856932 | 0.671363 |
import pytest
import crummycm as ccm
from example_files.a import flat_a, nested_a, flat_a_pop_exact
ex_config = {
"flat_a_yml_0": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.yml", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH]^": "[Text](DIESEL)*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
"nested_a_yml_0": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.yml", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
"nested_md": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
},
}
},
),
"flat_a_yml_1": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.yml", 1),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH](ends_with='_str')": "[Text](DIESEL)*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
"nested_a_yml_1": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.yml", 1),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH](ends_with='_str')*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
"nested_md": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH](ends_with='_str')*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
},
}
},
),
"flat_a_json": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.json", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH]^": "[Text](DIESEL)*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
"nested_a_json": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.json", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
"nested_md": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
},
}
},
),
"flat_a_proto": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.proto", 0),
NotImplementedError,
),
"nested_a_proto": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.proto", 0),
NotImplementedError,
),
"flat_a_xml": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.xml", 0),
NotImplementedError,
),
"nested_a_xml": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.xml", 0),
NotImplementedError,
),
"flat_a_pop_exact_0": (
(
flat_a_pop_exact,
"tests/unit/template/basic/example_files/out_yml/flat_a_pop_exact.yml",
0,
),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH]^": "[Text](DIESEL)*",
"@:[some_num]*": "[ValuePlaceholder](0)*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
}
def call(temp):
raw_dict = ccm.template(temp[0], temp[1], temp[2])
return raw_dict
@pytest.mark.parametrize(
"config,expected", ex_config.values(), ids=list(ex_config.keys())
)
def test_basic_parse(config, expected):
"""test whether the user input can be parsed to a dict"""
if isinstance(expected, dict):
raw_dict = call(config)
assert expected == raw_dict
elif issubclass(expected, ValueError):
with pytest.raises(ValueError):
raw_dict = call(config)
elif issubclass(expected, FileNotFoundError):
with pytest.raises(FileNotFoundError):
raw_dict = call(config)
elif issubclass(expected, TypeError):
with pytest.raises(TypeError):
raw_dict = call(config)
elif issubclass(expected, KeyError):
with pytest.raises(KeyError):
raw_dict = call(config)
elif issubclass(expected, NotImplementedError):
with pytest.raises(NotImplementedError):
raw_dict = call(config)
else:
raise ValueError(f"expected {expected} not accounted for") | tests/unit/template/basic/test_basic_template.py | import pytest
import crummycm as ccm
from example_files.a import flat_a, nested_a, flat_a_pop_exact
ex_config = {
"flat_a_yml_0": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.yml", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH]^": "[Text](DIESEL)*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
"nested_a_yml_0": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.yml", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
"nested_md": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
},
}
},
),
"flat_a_yml_1": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.yml", 1),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH](ends_with='_str')": "[Text](DIESEL)*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
"nested_a_yml_1": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.yml", 1),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH](ends_with='_str')*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
"nested_md": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH](ends_with='_str')*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
},
}
},
),
"flat_a_json": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.json", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH]^": "[Text](DIESEL)*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
"nested_a_json": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.json", 0),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
"nested_md": {
"kd_num": "<class 'int'>[Numeric]",
"[KPH]^*": "[Text]*",
"[KPH]*": "[ValuePlaceholder]*",
"wild_card": "[ValuePlaceholder]*",
},
}
},
),
"flat_a_proto": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.proto", 0),
NotImplementedError,
),
"nested_a_proto": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.proto", 0),
NotImplementedError,
),
"flat_a_xml": (
(flat_a, "tests/unit/template/basic/example_files/out_yml/flat_a.xml", 0),
NotImplementedError,
),
"nested_a_xml": (
(nested_a, "tests/unit/template/basic/example_files/out_yml/nested_a.xml", 0),
NotImplementedError,
),
"flat_a_pop_exact_0": (
(
flat_a_pop_exact,
"tests/unit/template/basic/example_files/out_yml/flat_a_pop_exact.yml",
0,
),
{
"my_mixed": {
"kd_num": "<class 'int'>[Numeric]*",
"[KPH]^": "[Text](DIESEL)*",
"@:[some_num]*": "[ValuePlaceholder](0)*",
"wild_card": "[ValuePlaceholder]*",
}
},
),
}
def call(temp):
raw_dict = ccm.template(temp[0], temp[1], temp[2])
return raw_dict
@pytest.mark.parametrize(
"config,expected", ex_config.values(), ids=list(ex_config.keys())
)
def test_basic_parse(config, expected):
"""test whether the user input can be parsed to a dict"""
if isinstance(expected, dict):
raw_dict = call(config)
assert expected == raw_dict
elif issubclass(expected, ValueError):
with pytest.raises(ValueError):
raw_dict = call(config)
elif issubclass(expected, FileNotFoundError):
with pytest.raises(FileNotFoundError):
raw_dict = call(config)
elif issubclass(expected, TypeError):
with pytest.raises(TypeError):
raw_dict = call(config)
elif issubclass(expected, KeyError):
with pytest.raises(KeyError):
raw_dict = call(config)
elif issubclass(expected, NotImplementedError):
with pytest.raises(NotImplementedError):
raw_dict = call(config)
else:
raise ValueError(f"expected {expected} not accounted for") | 0.407569 | 0.391988 |
import json
def merge_base_poly():
details = []
with open('polyphone_final.json', 'r', encoding='utf-8') as words:
contents = json.load(words)
for single in contents:
details.append(single)
print(len(details))
results = []
with open('char_base.json', 'r', encoding='utf-8') as common:
char_json = json.load(common)
for poly in char_json:
char = poly['char']
detail = next((item for item in details if item['char'] == char), False)
if detail:
poly['pinyin'] = detail['pinyin']
else:
poly['pinyin'] = [poly['pinyin']]
results.append(poly)
# results.sort(key=lambda x: x['frequency'])
print(len(results))
with open('char_base_poly.json', 'w', encoding='utf-8') as poly:
poly.write(json.dumps(results, ensure_ascii=False))
def merge_common_base():
details = []
with open('char_common.json', 'r', encoding='utf-8') as words:
contents = json.load(words)
for single in contents:
details.append(single)
# 3500
print(len(details))
results = []
with open('char_base_poly.json', 'r', encoding='utf-8') as common:
char_json = json.load(common)
for poly in char_json:
char = poly['char']
detail = next((item for item in details if item['char'] == char), False)
if detail:
poly['frequency'] = detail['frequency']
else:
poly['frequency'] = 3
results.append(poly)
# 16146
print(len(results))
with open('char_base_common.json', 'w', encoding='utf-8') as poly:
poly.write(json.dumps(results, ensure_ascii=False))
def sort_char_base():
results0 = []
results1 = []
results2 = []
results3 = []
with open('char_base_common.json', 'r', encoding='utf-8') as common:
char_json = json.load(common)
for char in char_json:
freq = char['frequency']
char['strokes'] = int(char['strokes'])
if freq == 0:
results0.append(char)
elif freq == 1:
results1.append(char)
elif freq == 2:
results2.append(char)
else:
results3.append(char)
results0.sort(key=lambda x: x['strokes'])
print(len(results0))
results1.sort(key=lambda x: x['strokes'])
print(len(results1))
results2.sort(key=lambda x: x['strokes'])
print(len(results2))
results3.sort(key=lambda x: x['strokes'])
print(len(results3))
results = []
results.extend(results0)
results.extend(results1)
results.extend(results2)
results.extend(results3)
chars = []
idx = 0
for result in results:
idx = idx + 1
newchar = {'index': idx}
newchar.update(result)
chars.append(newchar)
print(len(results))
with open('char_base_final.json', 'w', encoding='utf-8') as poly:
poly.write(json.dumps(chars, ensure_ascii=False))
if __name__ == '__main__':
# merge_base_poly()
# merge_common_base()
sort_char_base() | scripts/hanzi_base.py | import json
def merge_base_poly():
details = []
with open('polyphone_final.json', 'r', encoding='utf-8') as words:
contents = json.load(words)
for single in contents:
details.append(single)
print(len(details))
results = []
with open('char_base.json', 'r', encoding='utf-8') as common:
char_json = json.load(common)
for poly in char_json:
char = poly['char']
detail = next((item for item in details if item['char'] == char), False)
if detail:
poly['pinyin'] = detail['pinyin']
else:
poly['pinyin'] = [poly['pinyin']]
results.append(poly)
# results.sort(key=lambda x: x['frequency'])
print(len(results))
with open('char_base_poly.json', 'w', encoding='utf-8') as poly:
poly.write(json.dumps(results, ensure_ascii=False))
def merge_common_base():
details = []
with open('char_common.json', 'r', encoding='utf-8') as words:
contents = json.load(words)
for single in contents:
details.append(single)
# 3500
print(len(details))
results = []
with open('char_base_poly.json', 'r', encoding='utf-8') as common:
char_json = json.load(common)
for poly in char_json:
char = poly['char']
detail = next((item for item in details if item['char'] == char), False)
if detail:
poly['frequency'] = detail['frequency']
else:
poly['frequency'] = 3
results.append(poly)
# 16146
print(len(results))
with open('char_base_common.json', 'w', encoding='utf-8') as poly:
poly.write(json.dumps(results, ensure_ascii=False))
def sort_char_base():
results0 = []
results1 = []
results2 = []
results3 = []
with open('char_base_common.json', 'r', encoding='utf-8') as common:
char_json = json.load(common)
for char in char_json:
freq = char['frequency']
char['strokes'] = int(char['strokes'])
if freq == 0:
results0.append(char)
elif freq == 1:
results1.append(char)
elif freq == 2:
results2.append(char)
else:
results3.append(char)
results0.sort(key=lambda x: x['strokes'])
print(len(results0))
results1.sort(key=lambda x: x['strokes'])
print(len(results1))
results2.sort(key=lambda x: x['strokes'])
print(len(results2))
results3.sort(key=lambda x: x['strokes'])
print(len(results3))
results = []
results.extend(results0)
results.extend(results1)
results.extend(results2)
results.extend(results3)
chars = []
idx = 0
for result in results:
idx = idx + 1
newchar = {'index': idx}
newchar.update(result)
chars.append(newchar)
print(len(results))
with open('char_base_final.json', 'w', encoding='utf-8') as poly:
poly.write(json.dumps(chars, ensure_ascii=False))
if __name__ == '__main__':
# merge_base_poly()
# merge_common_base()
sort_char_base() | 0.114814 | 0.101679 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, logging
from pprint import pprint
from utils import config as cfg
if cfg.ROOT_DIR.startswith('/home'):
import torch
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # set tensorflow logger to WARNING level
import tensorflow as tf
import torch
class BaseSolver(object):
root_logger = logging.getLogger('solver')
def logger(self, suffix):
return self.root_logger.getChild(suffix)
def clear_folder(self):
"""clear weight and log dir"""
logger = self.logger('clear_folder')
for f in os.listdir(self.log_dir):
logger.warning('Deleted log file ' + f)
os.remove(os.path.join(self.log_dir, f))
for f in os.listdir(self.weight_dir):
logger.warning('Deleted weight file ' + f)
os.remove(os.path.join(self.weight_dir, f))
def snapshot(self, sess, iter, filenames = None):
"""save checkpoint"""
if not os.path.exists(self.weight_dir):
os.makedirs(self.weight_dir)
if filenames is None:
filename = 'snapshot_epoch_{}.ckpt'.format(iter)
else:
filename = filenames
pth = os.path.join(self.weight_dir, filename)
self.saver.save(sess, pth)
self.logger('snapshot').info('Wrote snapshot to: {}'.format(filename))
def initialize(self, sess):
"""weight initialization"""
logger = self.logger('initialize')
if self.trained_weight is None:
sess.run(tf.global_variables_initializer())
else:
sess.run(tf.global_variables_initializer())
logger.info('Restoring whole model snapshots from {:s}'.format(self.trained_weight))
saver_restore = tf.train.Saver()
saver_restore.restore(sess, self.trained_weight)
def set_lr_decay(self, global_step):
if self.args.lr_decay_type == 'no':
lr = self.args.lr
elif self.args.lr_decay_type == 'exp':
decay_stepsize = len(self.train_dataloader)*self.args.lr_decay_step
lr = tf.train.exponential_decay(
self.args.lr,
global_step,
decay_stepsize,
self.args.lr_decay_rate,
staircase=True)
elif self.args.lr_decay_type == 'cos':
decay_stepsize = len(self.train_dataloader)*self.args.lr_decay_step
lr = tf.train.cosine_decay_restarts(
self.args.lr,
global_step,
decay_stepsize,
t_mul=2.0,
m_mul=0.8,
alpha=0.1
)
return lr
def set_optimizer(self, lr):
if self.args.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
elif self.args.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(lr, 0.9)
logger.info('Using momentum optimizer')
elif self.args.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(lr)
logger.info('Using Adam optimizer')
elif self.args.optimizer == 'adamw':
optimizer = tf.contrib.opt.AdamWOptimizer(5e-5, lr)
logger.info('Using AdamW optimizer')
elif self.args.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(lr)
logger.info('Using RMSProp optimizer')
return optimizer | utils/base_solver.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, logging
from pprint import pprint
from utils import config as cfg
if cfg.ROOT_DIR.startswith('/home'):
import torch
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # set tensorflow logger to WARNING level
import tensorflow as tf
import torch
class BaseSolver(object):
root_logger = logging.getLogger('solver')
def logger(self, suffix):
return self.root_logger.getChild(suffix)
def clear_folder(self):
"""clear weight and log dir"""
logger = self.logger('clear_folder')
for f in os.listdir(self.log_dir):
logger.warning('Deleted log file ' + f)
os.remove(os.path.join(self.log_dir, f))
for f in os.listdir(self.weight_dir):
logger.warning('Deleted weight file ' + f)
os.remove(os.path.join(self.weight_dir, f))
def snapshot(self, sess, iter, filenames = None):
"""save checkpoint"""
if not os.path.exists(self.weight_dir):
os.makedirs(self.weight_dir)
if filenames is None:
filename = 'snapshot_epoch_{}.ckpt'.format(iter)
else:
filename = filenames
pth = os.path.join(self.weight_dir, filename)
self.saver.save(sess, pth)
self.logger('snapshot').info('Wrote snapshot to: {}'.format(filename))
def initialize(self, sess):
"""weight initialization"""
logger = self.logger('initialize')
if self.trained_weight is None:
sess.run(tf.global_variables_initializer())
else:
sess.run(tf.global_variables_initializer())
logger.info('Restoring whole model snapshots from {:s}'.format(self.trained_weight))
saver_restore = tf.train.Saver()
saver_restore.restore(sess, self.trained_weight)
def set_lr_decay(self, global_step):
if self.args.lr_decay_type == 'no':
lr = self.args.lr
elif self.args.lr_decay_type == 'exp':
decay_stepsize = len(self.train_dataloader)*self.args.lr_decay_step
lr = tf.train.exponential_decay(
self.args.lr,
global_step,
decay_stepsize,
self.args.lr_decay_rate,
staircase=True)
elif self.args.lr_decay_type == 'cos':
decay_stepsize = len(self.train_dataloader)*self.args.lr_decay_step
lr = tf.train.cosine_decay_restarts(
self.args.lr,
global_step,
decay_stepsize,
t_mul=2.0,
m_mul=0.8,
alpha=0.1
)
return lr
def set_optimizer(self, lr):
if self.args.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
elif self.args.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(lr, 0.9)
logger.info('Using momentum optimizer')
elif self.args.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(lr)
logger.info('Using Adam optimizer')
elif self.args.optimizer == 'adamw':
optimizer = tf.contrib.opt.AdamWOptimizer(5e-5, lr)
logger.info('Using AdamW optimizer')
elif self.args.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(lr)
logger.info('Using RMSProp optimizer')
return optimizer | 0.529993 | 0.075312 |
from time import sleep
def coolCafe():
print("Welcome to Cathy's Café!")
for key in cafemenu_options.keys():
print(key, '--', cafemenu_options[key])
runCafeOptions()
cafemenu_options = {
1: "Coffee",
2: "Cake",
3: "Tea",
4: "Exit",
}
def coffee():
print("One hot cup of coffee coming up!")
def progress(percent=0, width=30):
# The number of hashes to show is based on the percent passed in. The
# number of blanks is whatever space is left after.
hashes = width * percent // 100
blanks = width - hashes
print('\r[', hashes * '#', blanks * ' ', ']', f' {percent:.0f}%', sep='',
end='', flush=True)
print('This will take a moment')
for i in range(101):
progress(i)
sleep(0.1)
# Newline so command prompt isn't on the same line
print()
print("Your coffee is ready! Enjoy!")
exit()
def cake():
print("I'll let you bargain for the cake. How much do you want to pay?")
x = 0
y = "cake"
print("You have: {}".format(x))
print("Tracy has: {}".format(y))
x = int(input("Your offer:"))
if x < 25:
print("No way. Goodbye")
exit()
else:
temp = x
x = y
y = temp
print("You get: {}".format(x))
print("Tracy gets: {}".format(y), "dollars")
exit()
def tea():
print("")
def runCafeOptions():
while True:
try:
option = int(input("What would you like to get?"))
if option == 1:
coffee()
elif option == 2:
cake()
elif option == 3:
tea()
# Exit menu
elif option == 4:
print('Exiting! Thank you! Good Bye...')
exit() # exit out of the (infinite) while loop
else:
print('Invalid option. Please enter a number between 1 and 4.')
except ValueError:
print('Invalid input. Please enter an integer input.') | tech_talks/cafe.py | from time import sleep
def coolCafe():
print("Welcome to Cathy's Café!")
for key in cafemenu_options.keys():
print(key, '--', cafemenu_options[key])
runCafeOptions()
cafemenu_options = {
1: "Coffee",
2: "Cake",
3: "Tea",
4: "Exit",
}
def coffee():
print("One hot cup of coffee coming up!")
def progress(percent=0, width=30):
# The number of hashes to show is based on the percent passed in. The
# number of blanks is whatever space is left after.
hashes = width * percent // 100
blanks = width - hashes
print('\r[', hashes * '#', blanks * ' ', ']', f' {percent:.0f}%', sep='',
end='', flush=True)
print('This will take a moment')
for i in range(101):
progress(i)
sleep(0.1)
# Newline so command prompt isn't on the same line
print()
print("Your coffee is ready! Enjoy!")
exit()
def cake():
print("I'll let you bargain for the cake. How much do you want to pay?")
x = 0
y = "cake"
print("You have: {}".format(x))
print("Tracy has: {}".format(y))
x = int(input("Your offer:"))
if x < 25:
print("No way. Goodbye")
exit()
else:
temp = x
x = y
y = temp
print("You get: {}".format(x))
print("Tracy gets: {}".format(y), "dollars")
exit()
def tea():
print("")
def runCafeOptions():
while True:
try:
option = int(input("What would you like to get?"))
if option == 1:
coffee()
elif option == 2:
cake()
elif option == 3:
tea()
# Exit menu
elif option == 4:
print('Exiting! Thank you! Good Bye...')
exit() # exit out of the (infinite) while loop
else:
print('Invalid option. Please enter a number between 1 and 4.')
except ValueError:
print('Invalid input. Please enter an integer input.') | 0.19544 | 0.223441 |
import redis
from rq import Queue, Connection
from flask import render_template, Blueprint, jsonify, request, current_app
from project.server.main.tasks import create_task_classify, create_task_calibrate
main_blueprint = Blueprint("main", __name__,)
from project.server.main.logger import get_logger
logger = get_logger(__name__)
@main_blueprint.route("/", methods=["GET"])
def home():
return render_template("main/home.html")
@main_blueprint.route("/classify", methods=["POST"])
def run_task_classify():
args = request.get_json(force=True)
with Connection(redis.from_url(current_app.config["REDIS_URL"])):
q = Queue("tagger", default_timeout=21600)
task = q.enqueue(create_task_classify, args)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@main_blueprint.route("/classify_one", methods=["POST"])
def run_task_classify_one():
args = request.get_json(force=True)
response_object = create_task_classify(args)
return jsonify(response_object), 202
@main_blueprint.route("/calibrate", methods=["POST"])
def run_task_calibrate():
args = request.get_json(force=True)
logger.debug(args)
with Connection(redis.from_url(current_app.config["REDIS_URL"])):
q = Queue("tagger", default_timeout=216000)
task = q.enqueue(create_task_calibrate, args)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@main_blueprint.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
with Connection(redis.from_url(current_app.config["REDIS_URL"])):
q = Queue("tagger")
task = q.fetch_job(task_id)
if task:
response_object = {
"status": "success",
"data": {
"task_id": task.get_id(),
"task_status": task.get_status(),
"task_result": task.result,
},
}
else:
response_object = {"status": "error"}
return jsonify(response_object) | project/server/main/views.py | import redis
from rq import Queue, Connection
from flask import render_template, Blueprint, jsonify, request, current_app
from project.server.main.tasks import create_task_classify, create_task_calibrate
main_blueprint = Blueprint("main", __name__,)
from project.server.main.logger import get_logger
logger = get_logger(__name__)
@main_blueprint.route("/", methods=["GET"])
def home():
return render_template("main/home.html")
@main_blueprint.route("/classify", methods=["POST"])
def run_task_classify():
args = request.get_json(force=True)
with Connection(redis.from_url(current_app.config["REDIS_URL"])):
q = Queue("tagger", default_timeout=21600)
task = q.enqueue(create_task_classify, args)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@main_blueprint.route("/classify_one", methods=["POST"])
def run_task_classify_one():
args = request.get_json(force=True)
response_object = create_task_classify(args)
return jsonify(response_object), 202
@main_blueprint.route("/calibrate", methods=["POST"])
def run_task_calibrate():
args = request.get_json(force=True)
logger.debug(args)
with Connection(redis.from_url(current_app.config["REDIS_URL"])):
q = Queue("tagger", default_timeout=216000)
task = q.enqueue(create_task_calibrate, args)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@main_blueprint.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
with Connection(redis.from_url(current_app.config["REDIS_URL"])):
q = Queue("tagger")
task = q.fetch_job(task_id)
if task:
response_object = {
"status": "success",
"data": {
"task_id": task.get_id(),
"task_status": task.get_status(),
"task_result": task.result,
},
}
else:
response_object = {"status": "error"}
return jsonify(response_object) | 0.219923 | 0.09122 |
import numpy as np
np.seterr(all='ignore')
# np.set_printoptions(threshold=sys.maxsize)
class Power(object):
"""
Container for power spectra for each component, with any shape
Attributes
----------
c11 : :class:`~numpy.ndarray`
Power spectral density for component 1 (any shape)
c22 : :class:`~numpy.ndarray`
Power spectral density for component 2 (any shape)
cZZ : :class:`~numpy.ndarray`
Power spectral density for component Z (any shape)
cPP : :class:`~numpy.ndarray`
Power spectral density for component P (any shape)
"""
def __init__(self, c11=None, c22=None, cZZ=None, cPP=None):
self.c11 = c11
self.c22 = c22
self.cZZ = cZZ
self.cPP = cPP
@staticmethod
def plot_one(f, pp, name='', fig=None, fig_grid=(1, 1), plot_spot=(0, 0),
xlabel=True, ylabel=True):
"""Plot one cross-power spectra"""
if not fig:
fig = plt.gcf()
# Plot amplitude
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+0, plot_spot[1]+0),
rowspan=2)
ax.loglog(f, np.abs(pp))
ax.set_ylimits(0, 1)
if ylabel:
ax.set_ylabel('{name} PSD')
# Plot phase
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+2, plot_spot[1]+0))
ax.semilogx(f, np.degrees(np.angle(pp)))
ax.set_ylimits(-180, 180)
if ylabel:
ax.set_ylabel('Phase(deg)')
if xlabel:
ax.set_xlabel('Frequency (Hz)')
def plot(self, f, fig=None):
"""
Plot all power spectra
Grid = Z1 Z2 ZP
12 1P
2P
"""
if not fig:
fig = plt.gcf()
if c11 is not None:
self.plot_one(f, c11, '1', fig, (2, 2), (0, 0), xlabel=False)
if c22 is not None:
self.plot_one(f, c22, '2', fig, (2, 2), (0, 1), xlabel=False,
ylabel=False)
if cZZ is not None:
self.plot_one(f, cZZ, '3', fig, (2, 2), (1, 0))
if cPP is not None:
self.plot_one(f, cPP, '4', fig, (2, 2), (1, 1), ylabel=False)
plt.show()
class Cross(object):
"""
Container for cross-power spectra for each component pairs, with any shape
Attributes
----------
c12 : :class:`~numpy.ndarray`
Cross-power spectral density for components 1 and 2 (any shape)
c1Z : :class:`~numpy.ndarray`
Cross-power spectral density for components 1 and Z (any shape)
c1P : :class:`~numpy.ndarray`
Cross-power spectral density for components 1 and P (any shape)
c2Z : :class:`~numpy.ndarray`
Cross-power spectral density for components 2 and Z (any shape)
c2P : :class:`~numpy.ndarray`
Cross-power spectral density for components 2 and P (any shape)
cZP : :class:`~numpy.ndarray`
Cross-power spectral density for components Z and P (any shape)
"""
def __init__(self, c12=None, c1Z=None, c1P=None, c2Z=None, c2P=None,
cZP=None):
self.c12 = c12
self.c1Z = c1Z
self.c1P = c1P
self.c2Z = c2Z
self.c2P = c2P
self.cZP = cZP
@staticmethod
def plot_one(f, cp, fig=None, fig_grid=(1, 1), plot_spot=(0, 0),
xlabel=True, ylabel=True):
"""Plot one cross-power spectra"""
if not fig:
fig = plt.gcf()
# Plot amplitude
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+0, plot_spot[1]+0),
rowspan=2)
ax.semilogx(f, np.abs(cp))
ax.set_ylimits(0, 1)
if ylabel:
ax.set_ylabel(f'{name} cross-spectra')
# Plot phase
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+2, plot_spot[1]+0))
ax.semilogx(f, np.degrees(np.angle(cp)))
ax.set_ylimits(-180, 180)
if ylabel:
ax.set_ylabel('Phase(deg)')
if xlabel:
ax.set_xlabel('Frequency (Hz)')
def plot(self, f, fig=None):
"""
Plot all cross-power spectra
Grid = Z1 Z2 ZP
12 1P
2P
"""
if not fig:
fig = plt.gcf()
if cZ1 is not None:
self.plot_one(f, cZ1, 'Z-1', fig, (3, 3), (0, 0))
if cZ2 is not None:
self.plot_one(f, cZ2, 'Z-2', fig, (3, 3), (0, 1),
xlabel=False, ylabel=False)
if cZP is not None:
self.plot_one(f, cZP, 'Z-P', fig, (3, 3), (0, 2),
xlabel=False, ylabel=False)
if c12 is not None:
self.plot_one(f, c12, '1-2', fig, (3, 3), (1, 1))
if c1P is not None:
self.plot_one(f, c1P, '1-P', fig, (3, 3), (1, 2),
xlabel=False, ylabel=False)
if c2P is not None:
self.plot_one(f, c2P, '2-P', fig, (3, 3), (2, 2))
plt.show()
class Rotation(object):
"""
Container for rotated spectra, with any shape
Attributes
----------
cHH : :class:`~numpy.ndarray`
Power spectral density for rotated horizontal component H (any shape)
cHZ : :class:`~numpy.ndarray`
Cross-power spectral density for components H and Z (any shape)
cHP : :class:`~numpy.ndarray`
Cross-power spectral density for components H and P (any shape)
coh : :class:`~numpy.ndarray`
Coherence between horizontal components
ph : :class:`~numpy.ndarray`
Phase of cross-power spectrum between horizontal components
tilt : float
Angle (azimuth) of tilt axis
coh_value : float
Maximum coherence
phase_value : float
Phase at maximum coherence
direc : :class:`~numpy.ndarray`
Directions for which the coherence is calculated
"""
def __init__(self, cHH=None, cHZ=None, cHP=None, coh=None, ph=None,
tilt=None, coh_value=None, phase_value=None, direc=None):
self.cHH = cHH
self.cHZ = cHZ
self.cHP = cHP
self.coh = coh
self.ph = ph
self.tilt = tilt
self.coh_value = coh_value
self.phase_value = phase_value
self.direc = direc | obstools/atacr/classes/containers.py | import numpy as np
np.seterr(all='ignore')
# np.set_printoptions(threshold=sys.maxsize)
class Power(object):
"""
Container for power spectra for each component, with any shape
Attributes
----------
c11 : :class:`~numpy.ndarray`
Power spectral density for component 1 (any shape)
c22 : :class:`~numpy.ndarray`
Power spectral density for component 2 (any shape)
cZZ : :class:`~numpy.ndarray`
Power spectral density for component Z (any shape)
cPP : :class:`~numpy.ndarray`
Power spectral density for component P (any shape)
"""
def __init__(self, c11=None, c22=None, cZZ=None, cPP=None):
self.c11 = c11
self.c22 = c22
self.cZZ = cZZ
self.cPP = cPP
@staticmethod
def plot_one(f, pp, name='', fig=None, fig_grid=(1, 1), plot_spot=(0, 0),
xlabel=True, ylabel=True):
"""Plot one cross-power spectra"""
if not fig:
fig = plt.gcf()
# Plot amplitude
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+0, plot_spot[1]+0),
rowspan=2)
ax.loglog(f, np.abs(pp))
ax.set_ylimits(0, 1)
if ylabel:
ax.set_ylabel('{name} PSD')
# Plot phase
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+2, plot_spot[1]+0))
ax.semilogx(f, np.degrees(np.angle(pp)))
ax.set_ylimits(-180, 180)
if ylabel:
ax.set_ylabel('Phase(deg)')
if xlabel:
ax.set_xlabel('Frequency (Hz)')
def plot(self, f, fig=None):
"""
Plot all power spectra
Grid = Z1 Z2 ZP
12 1P
2P
"""
if not fig:
fig = plt.gcf()
if c11 is not None:
self.plot_one(f, c11, '1', fig, (2, 2), (0, 0), xlabel=False)
if c22 is not None:
self.plot_one(f, c22, '2', fig, (2, 2), (0, 1), xlabel=False,
ylabel=False)
if cZZ is not None:
self.plot_one(f, cZZ, '3', fig, (2, 2), (1, 0))
if cPP is not None:
self.plot_one(f, cPP, '4', fig, (2, 2), (1, 1), ylabel=False)
plt.show()
class Cross(object):
"""
Container for cross-power spectra for each component pairs, with any shape
Attributes
----------
c12 : :class:`~numpy.ndarray`
Cross-power spectral density for components 1 and 2 (any shape)
c1Z : :class:`~numpy.ndarray`
Cross-power spectral density for components 1 and Z (any shape)
c1P : :class:`~numpy.ndarray`
Cross-power spectral density for components 1 and P (any shape)
c2Z : :class:`~numpy.ndarray`
Cross-power spectral density for components 2 and Z (any shape)
c2P : :class:`~numpy.ndarray`
Cross-power spectral density for components 2 and P (any shape)
cZP : :class:`~numpy.ndarray`
Cross-power spectral density for components Z and P (any shape)
"""
def __init__(self, c12=None, c1Z=None, c1P=None, c2Z=None, c2P=None,
cZP=None):
self.c12 = c12
self.c1Z = c1Z
self.c1P = c1P
self.c2Z = c2Z
self.c2P = c2P
self.cZP = cZP
@staticmethod
def plot_one(f, cp, fig=None, fig_grid=(1, 1), plot_spot=(0, 0),
xlabel=True, ylabel=True):
"""Plot one cross-power spectra"""
if not fig:
fig = plt.gcf()
# Plot amplitude
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+0, plot_spot[1]+0),
rowspan=2)
ax.semilogx(f, np.abs(cp))
ax.set_ylimits(0, 1)
if ylabel:
ax.set_ylabel(f'{name} cross-spectra')
# Plot phase
ax = plt.subplot2grid((3*fig_grid[0], 1*fig_grid[1]),
(3*plot_spot[0]+2, plot_spot[1]+0))
ax.semilogx(f, np.degrees(np.angle(cp)))
ax.set_ylimits(-180, 180)
if ylabel:
ax.set_ylabel('Phase(deg)')
if xlabel:
ax.set_xlabel('Frequency (Hz)')
def plot(self, f, fig=None):
"""
Plot all cross-power spectra
Grid = Z1 Z2 ZP
12 1P
2P
"""
if not fig:
fig = plt.gcf()
if cZ1 is not None:
self.plot_one(f, cZ1, 'Z-1', fig, (3, 3), (0, 0))
if cZ2 is not None:
self.plot_one(f, cZ2, 'Z-2', fig, (3, 3), (0, 1),
xlabel=False, ylabel=False)
if cZP is not None:
self.plot_one(f, cZP, 'Z-P', fig, (3, 3), (0, 2),
xlabel=False, ylabel=False)
if c12 is not None:
self.plot_one(f, c12, '1-2', fig, (3, 3), (1, 1))
if c1P is not None:
self.plot_one(f, c1P, '1-P', fig, (3, 3), (1, 2),
xlabel=False, ylabel=False)
if c2P is not None:
self.plot_one(f, c2P, '2-P', fig, (3, 3), (2, 2))
plt.show()
class Rotation(object):
"""
Container for rotated spectra, with any shape
Attributes
----------
cHH : :class:`~numpy.ndarray`
Power spectral density for rotated horizontal component H (any shape)
cHZ : :class:`~numpy.ndarray`
Cross-power spectral density for components H and Z (any shape)
cHP : :class:`~numpy.ndarray`
Cross-power spectral density for components H and P (any shape)
coh : :class:`~numpy.ndarray`
Coherence between horizontal components
ph : :class:`~numpy.ndarray`
Phase of cross-power spectrum between horizontal components
tilt : float
Angle (azimuth) of tilt axis
coh_value : float
Maximum coherence
phase_value : float
Phase at maximum coherence
direc : :class:`~numpy.ndarray`
Directions for which the coherence is calculated
"""
def __init__(self, cHH=None, cHZ=None, cHP=None, coh=None, ph=None,
tilt=None, coh_value=None, phase_value=None, direc=None):
self.cHH = cHH
self.cHZ = cHZ
self.cHP = cHP
self.coh = coh
self.ph = ph
self.tilt = tilt
self.coh_value = coh_value
self.phase_value = phase_value
self.direc = direc | 0.840259 | 0.615983 |
from flask import request
import os
from flask_wtf import FlaskForm
from wtforms import TextField, BooleanField, TextAreaField, SubmitField
import pandas as pd
from flask_mail import Mail, Message
import secrets
import json
import pandas as pd
import numpy as np
from utils import get_data_utils as get_data_utils
from utils import visualize_data_utils as visualize_data_utils
class ContactForm(FlaskForm):
name = TextField("Name")
email = TextField("Email")
subject = TextField("Subject")
message = TextAreaField("Message")
submit = SubmitField("Send")
def get_skill_content():
'''
Function to get the skills from the database
Args: lang = str; specifies language selected by user
Returns: skill_list
'''
skill_dict = {
"Python": [["Science Stack",5,'NumPy, pandas, SciPy, scikit learn, cvxpy, sklearn,HyperOpt'],
["Data Visualization",4,'D3, Plotly, flask, bootstrap'],
["Deployment",4,'PyInstaller, Docker, GCP, Bash'],
["OOP and Procedural",3,'Modeling, Inheritance, Class, jupyter notebooks'],
["Other Languages",4,'R, MATLAB, javascript, html, Spark, Hadoop']],
"ETL": [["Data Wrangling",5,'PCA, Standardization, Normalization'],
["Pipeline Structure",5,'Source, Target, Organization, Optimization'],
["SQL",4,'sqlite , NoSQL, PostgresSQL'],
["API",4,'REST, request, HTTP'],
["FTP",5,'bulk transport']],
"Analytics": [["Probability Distributions",4,'Bernoulli, Binomial, Exponential, Geometric, Memoryless, Normal, Poisson, Weibull'],
['Design of Experiment',5,'A/B testing, ANOVA, Factorial, Multi-armed bandit, Blocking, Balanced'],
["Data",4,'Attribute, Categorical, Feature, PCA, Predictor, Quantitative, Scaling, Structured/Unstructured, Time Series'],
["Variable Selection",4,'Backward Elimination, Forward Selection, Elastic Net, Overfitting, Ridge Regression, Stepwise Regression, Lasso Regression'],
["Model Quality",4,'AIC, BIC, Confusion Matrices, k-fold cross-validation, MLE']],
"Data Science": [["Unsupervised Machine Learning",4,'Clustering (kmeans), Deep Learning, Neural Network (CNN & DNN)'],
["Supervised Machine Learning",4,'Classification (KNN,SVM),Regression'],
["Regression",4,'AUC, ROC, R-Squared, Bayesian, Box-Cox, CART, Classification Tree, Linear, Logistic, KNN regression, Spline Regression'],
["Time Series Models",4,'ARIMA, Seasonality, Exponential Smoothing, GARCH, Holt-Winters, Moving Average, Trend, Cycles'],
["Deterministic Optimization",4,'Convex/Concave, Greedy algorithm,Integer program,NP hard,Louvain algorithm,HyperOpt']],
}
return skill_dict
def send_me_email(app,name,email,subject,message):
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = secrets.MAIL_USERNAME
app.config['MAIL_PASSWORD'] = <PASSWORD>
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
msg = Message(subject, sender = email, recipients = ['<EMAIL>'])
msg.body = message + "\nSender's Name: " + name + "\nSender's e-mail: " + email
mail.send(msg)
thanks_response = "Thanks for connecting "+str(name)+"!"
return thanks_response
def send_user_email(app,name,email,subject,message):
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = secrets.MAIL_USERNAME
app.config['MAIL_PASSWORD'] = <PASSWORD>.MAIL_PASSWORD
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
msg = Message(subject, sender = email, recipients = [email])
msg.body = "Thanks for connecting, " + name +"!"
mail.send(msg)
def get_portfolio_content():
'''
Function to get the portfolio projects from the database
Args: lang = str; specifies language selected by user
Returns: zipped
'''
#db_row = check_language('portfolio', lang)
db_row = []
# instantiate a list to save all projects in
project_list = []
# iterate through all the projects in the database
for project in db_row:
one_project = []
# add the title, description, skills and image name
one_project.extend(project[1:5])
# instantiate list for links
links = []
if project[6] != 'NaN':
links.append(["Blog Post", project[6]])
if project[5] != 'NaN':
links.append(["Code", project[5]])
one_project.append(links)
# assign single project to entire project_list
project_list.append(one_project)
# create list of lists that contains pairs of projects
if len(project_list) % 2 == 0:
pass
else:
project_list.append(['placeholder'])
iterator = iter(project_list)
zipped = zip(iterator, iterator)
return zipped
def get_garmin_demo_data():
data_path = 'static/demo_data/Month.csv'
# reads in data path of csv to dataframe
df = pd.read_csv(data_path)
# subset and rename cols
df = get_data_utils.rename_cols(df)
# remove units from df
df = get_data_utils.remove_units(df)
# convert astype for each column to appropriate type
df = get_data_utils.convert_type(df)
df = df.drop(columns = ['Time_Period'])
# combo data
#df_combo = visualize_data_utils.create_combo_chart(df)
#df_HR_cadence = visualize_data_utils.create_combo_HR_cadence(df)
#df_speed_distance = visualize_data_utils.create_combo_speed_distance(df)
df_combo_avg_distance = visualize_data_utils.create_combo_chart_Avgerage_Distance(df)
return df
def get_network_graph_data():
data_path = 'static/demo_data/board_games.csv'
df = pd.read_csv(data_path)
print(df)
ls = df.values.tolist()
print(ls)
for i in ls:
print(i,',')
df_json = df.to_json()
print(df_json)
return df
def load_JSON(file_path):
with open(file_path, 'r') as file:
data = file.read()
return data | helper.py | from flask import request
import os
from flask_wtf import FlaskForm
from wtforms import TextField, BooleanField, TextAreaField, SubmitField
import pandas as pd
from flask_mail import Mail, Message
import secrets
import json
import pandas as pd
import numpy as np
from utils import get_data_utils as get_data_utils
from utils import visualize_data_utils as visualize_data_utils
class ContactForm(FlaskForm):
name = TextField("Name")
email = TextField("Email")
subject = TextField("Subject")
message = TextAreaField("Message")
submit = SubmitField("Send")
def get_skill_content():
'''
Function to get the skills from the database
Args: lang = str; specifies language selected by user
Returns: skill_list
'''
skill_dict = {
"Python": [["Science Stack",5,'NumPy, pandas, SciPy, scikit learn, cvxpy, sklearn,HyperOpt'],
["Data Visualization",4,'D3, Plotly, flask, bootstrap'],
["Deployment",4,'PyInstaller, Docker, GCP, Bash'],
["OOP and Procedural",3,'Modeling, Inheritance, Class, jupyter notebooks'],
["Other Languages",4,'R, MATLAB, javascript, html, Spark, Hadoop']],
"ETL": [["Data Wrangling",5,'PCA, Standardization, Normalization'],
["Pipeline Structure",5,'Source, Target, Organization, Optimization'],
["SQL",4,'sqlite , NoSQL, PostgresSQL'],
["API",4,'REST, request, HTTP'],
["FTP",5,'bulk transport']],
"Analytics": [["Probability Distributions",4,'Bernoulli, Binomial, Exponential, Geometric, Memoryless, Normal, Poisson, Weibull'],
['Design of Experiment',5,'A/B testing, ANOVA, Factorial, Multi-armed bandit, Blocking, Balanced'],
["Data",4,'Attribute, Categorical, Feature, PCA, Predictor, Quantitative, Scaling, Structured/Unstructured, Time Series'],
["Variable Selection",4,'Backward Elimination, Forward Selection, Elastic Net, Overfitting, Ridge Regression, Stepwise Regression, Lasso Regression'],
["Model Quality",4,'AIC, BIC, Confusion Matrices, k-fold cross-validation, MLE']],
"Data Science": [["Unsupervised Machine Learning",4,'Clustering (kmeans), Deep Learning, Neural Network (CNN & DNN)'],
["Supervised Machine Learning",4,'Classification (KNN,SVM),Regression'],
["Regression",4,'AUC, ROC, R-Squared, Bayesian, Box-Cox, CART, Classification Tree, Linear, Logistic, KNN regression, Spline Regression'],
["Time Series Models",4,'ARIMA, Seasonality, Exponential Smoothing, GARCH, Holt-Winters, Moving Average, Trend, Cycles'],
["Deterministic Optimization",4,'Convex/Concave, Greedy algorithm,Integer program,NP hard,Louvain algorithm,HyperOpt']],
}
return skill_dict
def send_me_email(app,name,email,subject,message):
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = secrets.MAIL_USERNAME
app.config['MAIL_PASSWORD'] = <PASSWORD>
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
msg = Message(subject, sender = email, recipients = ['<EMAIL>'])
msg.body = message + "\nSender's Name: " + name + "\nSender's e-mail: " + email
mail.send(msg)
thanks_response = "Thanks for connecting "+str(name)+"!"
return thanks_response
def send_user_email(app,name,email,subject,message):
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = secrets.MAIL_USERNAME
app.config['MAIL_PASSWORD'] = <PASSWORD>.MAIL_PASSWORD
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
msg = Message(subject, sender = email, recipients = [email])
msg.body = "Thanks for connecting, " + name +"!"
mail.send(msg)
def get_portfolio_content():
'''
Function to get the portfolio projects from the database
Args: lang = str; specifies language selected by user
Returns: zipped
'''
#db_row = check_language('portfolio', lang)
db_row = []
# instantiate a list to save all projects in
project_list = []
# iterate through all the projects in the database
for project in db_row:
one_project = []
# add the title, description, skills and image name
one_project.extend(project[1:5])
# instantiate list for links
links = []
if project[6] != 'NaN':
links.append(["Blog Post", project[6]])
if project[5] != 'NaN':
links.append(["Code", project[5]])
one_project.append(links)
# assign single project to entire project_list
project_list.append(one_project)
# create list of lists that contains pairs of projects
if len(project_list) % 2 == 0:
pass
else:
project_list.append(['placeholder'])
iterator = iter(project_list)
zipped = zip(iterator, iterator)
return zipped
def get_garmin_demo_data():
data_path = 'static/demo_data/Month.csv'
# reads in data path of csv to dataframe
df = pd.read_csv(data_path)
# subset and rename cols
df = get_data_utils.rename_cols(df)
# remove units from df
df = get_data_utils.remove_units(df)
# convert astype for each column to appropriate type
df = get_data_utils.convert_type(df)
df = df.drop(columns = ['Time_Period'])
# combo data
#df_combo = visualize_data_utils.create_combo_chart(df)
#df_HR_cadence = visualize_data_utils.create_combo_HR_cadence(df)
#df_speed_distance = visualize_data_utils.create_combo_speed_distance(df)
df_combo_avg_distance = visualize_data_utils.create_combo_chart_Avgerage_Distance(df)
return df
def get_network_graph_data():
data_path = 'static/demo_data/board_games.csv'
df = pd.read_csv(data_path)
print(df)
ls = df.values.tolist()
print(ls)
for i in ls:
print(i,',')
df_json = df.to_json()
print(df_json)
return df
def load_JSON(file_path):
with open(file_path, 'r') as file:
data = file.read()
return data | 0.522202 | 0.202759 |
import lief
import pathlib
from utils import get_sample
def test_exports_trie():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_exports-trie-LLVM.bin'))
assert target.has_dyld_info
exports = target.dyld_info.exports
assert len(exports) == 6
assert exports[0].address == 0
assert exports[0].symbol.name == "_malloc"
assert exports[1].address == 0
assert exports[1].symbol.name == "_myfree"
assert exports[2].address == 0xf70
assert exports[2].symbol.name == "_myWeak"
assert exports[3].address == 0x1018
assert exports[3].symbol.name == "_myTLV"
assert exports[4].address == 0x12345678
assert exports[4].symbol.name == "_myAbs"
assert exports[5].address == 0xf60
assert exports[5].symbol.name == "_foo"
def test_bind():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_bind-LLVM.bin'))
assert target.has_dyld_info
bindings = target.dyld_info.bindings
assert len(bindings) == 7
assert bindings[0].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[0].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[0].address == 0x1028
assert bindings[0].symbol.name == "_any"
assert bindings[0].segment.name == "__DATA"
assert bindings[0].library_ordinal == -2
assert bindings[1].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[1].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[1].address == 0x1020
assert bindings[1].symbol.name == "_fromApp"
assert bindings[1].segment.name == "__DATA"
assert bindings[1].library_ordinal == -1
assert bindings[2].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[2].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[2].address == 0x1018
assert bindings[2].symbol.name == "_myfunc"
assert bindings[2].segment.name == "__DATA"
assert bindings[2].library_ordinal == 0
assert bindings[3].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[3].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[3].address == 0x1000
assert bindings[3].symbol.name == "_foo"
assert bindings[3].segment.name == "__DATA"
assert bindings[3].library.name == "libfoo.dylib"
assert bindings[4].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[4].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[4].address == 0x1008
assert bindings[4].symbol.name == "_bar"
assert bindings[4].segment.name == "__DATA"
assert bindings[4].library.name == "libbar.dylib"
assert bindings[5].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[5].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[5].address == 0x1010
assert bindings[5].symbol.name == "_malloc"
assert bindings[5].segment.name == "__DATA"
assert bindings[5].library.name == "/usr/lib/libSystem.B.dylib"
# From Weak bind
assert bindings[6].binding_class == lief.MachO.BINDING_CLASS.WEAK
assert bindings[6].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[6].address == 0x1000
assert bindings[6].symbol.name == "_foo"
assert bindings[6].segment.name == "__DATA"
def test_lazy_bind():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_lazy-bind-LLVM.bin'))
assert target.has_dyld_info
bindings = list(target.dyld_info.bindings)[1:] # Skip the 1st one (Standard one)
assert len(bindings) == 3
assert bindings[0].binding_class == lief.MachO.BINDING_CLASS.LAZY
assert bindings[0].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[0].address == 0x100001010
assert bindings[0].symbol.name == "_foo"
assert bindings[0].segment.name == "__DATA"
assert bindings[0].library.name == "libfoo.dylib"
assert bindings[1].binding_class == lief.MachO.BINDING_CLASS.LAZY
assert bindings[1].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[1].address == 0x100001018
assert bindings[1].symbol.name == "_bar"
assert bindings[1].segment.name == "__DATA"
assert bindings[1].library.name == "libbar.dylib"
assert bindings[2].binding_class == lief.MachO.BINDING_CLASS.LAZY
assert bindings[2].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[2].address == 0x100001020
assert bindings[2].symbol.name == "_malloc"
assert bindings[2].segment.name == "__DATA"
assert bindings[2].library.name == "/usr/lib/libSystem.B.dylib"
def test_rebases():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_rebase-LLVM.bin'))
assert target.has_dyld_info
relocations = target.relocations
assert len(relocations) == 10
assert relocations[0].address == 0x00001010
assert not relocations[0].pc_relative
assert relocations[0].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[0].section.name == "__data"
assert relocations[0].segment.name == "__DATA"
assert relocations[1].address == 0x00001028
assert not relocations[1].pc_relative
assert relocations[1].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[1].section.name == "__data"
assert relocations[1].segment.name == "__DATA"
assert relocations[2].address == 0x00001030
assert not relocations[2].pc_relative
assert relocations[2].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[2].section.name == "__data"
assert relocations[2].segment.name == "__DATA"
assert relocations[3].address == 0x00001038
assert not relocations[3].pc_relative
assert relocations[3].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[3].section.name == "__data"
assert relocations[3].segment.name == "__DATA"
assert relocations[4].address == 0x00001040
assert not relocations[4].pc_relative
assert relocations[4].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[4].section.name == "__data"
assert relocations[4].segment.name == "__DATA"
assert relocations[5].address == 0x00001258
assert not relocations[5].pc_relative
assert relocations[5].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[5].section.name == "__data"
assert relocations[5].segment.name == "__DATA"
assert relocations[6].address == 0x00001278
assert not relocations[6].pc_relative
assert relocations[6].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[6].section.name == "__mystuff"
assert relocations[6].segment.name == "__DATA"
assert relocations[7].address == 0x00001288
assert not relocations[7].pc_relative
assert relocations[7].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[7].section.name == "__mystuff"
assert relocations[7].segment.name == "__DATA"
assert relocations[8].address == 0x00001298
assert not relocations[8].pc_relative
assert relocations[8].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[8].section.name == "__mystuff"
assert relocations[8].segment.name == "__DATA"
assert relocations[9].address == 0x000012A8
assert not relocations[9].pc_relative
assert relocations[9].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[9].section.name == "__mystuff"
assert relocations[9].segment.name == "__DATA"
def test_threaded_opcodes(tmp_path):
bin_path = pathlib.Path(get_sample('MachO/FatMachO64_x86-64_arm64_binary_ls.bin'))
target = lief.MachO.parse(bin_path.as_posix())
target = target.take(lief.MachO.CPU_TYPES.ARM64)
assert target.has_dyld_info
relocations = target.relocations
bindings = target.dyld_info.bindings
assert len(relocations) == 39
assert len(bindings) == 82
assert relocations[38].address == 0x10000c008
assert not relocations[38].pc_relative
assert relocations[38].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[38].section.name == "__data"
assert relocations[38].segment.name == "__DATA"
assert bindings[81].binding_class == lief.MachO.BINDING_CLASS.THREADED
assert bindings[81].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[81].address == 0x100008288
assert bindings[81].symbol.name == "_optind"
assert bindings[81].segment.name == "__DATA_CONST"
assert bindings[81].library.name == "/usr/lib/libSystem.B.dylib"
output_path = f"{tmp_path}/{bin_path.name}"
lief.logging.set_level(lief.logging.LOGGING_LEVEL.DEBUG)
target.write(output_path)
lief.logging.set_level(lief.logging.LOGGING_LEVEL.INFO)
print(output_path)
fat_written_target = lief.MachO.parse(output_path)
written_target = fat_written_target.take(lief.MachO.CPU_TYPES.ARM64)
for r in written_target.relocations:
print(r)
relocations = written_target.relocations
bindings = written_target.dyld_info.bindings
checked, err = lief.MachO.check_layout(written_target)
assert checked, err
assert len(relocations) == 39
assert len(bindings) == 82
assert relocations[38].address == 0x10000c008
assert not relocations[38].pc_relative
assert relocations[38].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[38].section.name == "__data"
assert relocations[38].segment.name == "__DATA"
assert bindings[81].binding_class == lief.MachO.BINDING_CLASS.THREADED
assert bindings[81].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[81].address == 0x100008288
assert bindings[81].symbol.name == "_optind"
assert bindings[81].segment.name == "__DATA_CONST"
assert bindings[81].library.name == "/usr/lib/libSystem.B.dylib" | tests/macho/test_dyld.py | import lief
import pathlib
from utils import get_sample
def test_exports_trie():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_exports-trie-LLVM.bin'))
assert target.has_dyld_info
exports = target.dyld_info.exports
assert len(exports) == 6
assert exports[0].address == 0
assert exports[0].symbol.name == "_malloc"
assert exports[1].address == 0
assert exports[1].symbol.name == "_myfree"
assert exports[2].address == 0xf70
assert exports[2].symbol.name == "_myWeak"
assert exports[3].address == 0x1018
assert exports[3].symbol.name == "_myTLV"
assert exports[4].address == 0x12345678
assert exports[4].symbol.name == "_myAbs"
assert exports[5].address == 0xf60
assert exports[5].symbol.name == "_foo"
def test_bind():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_bind-LLVM.bin'))
assert target.has_dyld_info
bindings = target.dyld_info.bindings
assert len(bindings) == 7
assert bindings[0].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[0].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[0].address == 0x1028
assert bindings[0].symbol.name == "_any"
assert bindings[0].segment.name == "__DATA"
assert bindings[0].library_ordinal == -2
assert bindings[1].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[1].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[1].address == 0x1020
assert bindings[1].symbol.name == "_fromApp"
assert bindings[1].segment.name == "__DATA"
assert bindings[1].library_ordinal == -1
assert bindings[2].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[2].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[2].address == 0x1018
assert bindings[2].symbol.name == "_myfunc"
assert bindings[2].segment.name == "__DATA"
assert bindings[2].library_ordinal == 0
assert bindings[3].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[3].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[3].address == 0x1000
assert bindings[3].symbol.name == "_foo"
assert bindings[3].segment.name == "__DATA"
assert bindings[3].library.name == "libfoo.dylib"
assert bindings[4].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[4].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[4].address == 0x1008
assert bindings[4].symbol.name == "_bar"
assert bindings[4].segment.name == "__DATA"
assert bindings[4].library.name == "libbar.dylib"
assert bindings[5].binding_class == lief.MachO.BINDING_CLASS.STANDARD
assert bindings[5].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[5].address == 0x1010
assert bindings[5].symbol.name == "_malloc"
assert bindings[5].segment.name == "__DATA"
assert bindings[5].library.name == "/usr/lib/libSystem.B.dylib"
# From Weak bind
assert bindings[6].binding_class == lief.MachO.BINDING_CLASS.WEAK
assert bindings[6].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[6].address == 0x1000
assert bindings[6].symbol.name == "_foo"
assert bindings[6].segment.name == "__DATA"
def test_lazy_bind():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_lazy-bind-LLVM.bin'))
assert target.has_dyld_info
bindings = list(target.dyld_info.bindings)[1:] # Skip the 1st one (Standard one)
assert len(bindings) == 3
assert bindings[0].binding_class == lief.MachO.BINDING_CLASS.LAZY
assert bindings[0].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[0].address == 0x100001010
assert bindings[0].symbol.name == "_foo"
assert bindings[0].segment.name == "__DATA"
assert bindings[0].library.name == "libfoo.dylib"
assert bindings[1].binding_class == lief.MachO.BINDING_CLASS.LAZY
assert bindings[1].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[1].address == 0x100001018
assert bindings[1].symbol.name == "_bar"
assert bindings[1].segment.name == "__DATA"
assert bindings[1].library.name == "libbar.dylib"
assert bindings[2].binding_class == lief.MachO.BINDING_CLASS.LAZY
assert bindings[2].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[2].address == 0x100001020
assert bindings[2].symbol.name == "_malloc"
assert bindings[2].segment.name == "__DATA"
assert bindings[2].library.name == "/usr/lib/libSystem.B.dylib"
def test_rebases():
target = lief.parse(get_sample('MachO/MachO64_x86-64_binary_rebase-LLVM.bin'))
assert target.has_dyld_info
relocations = target.relocations
assert len(relocations) == 10
assert relocations[0].address == 0x00001010
assert not relocations[0].pc_relative
assert relocations[0].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[0].section.name == "__data"
assert relocations[0].segment.name == "__DATA"
assert relocations[1].address == 0x00001028
assert not relocations[1].pc_relative
assert relocations[1].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[1].section.name == "__data"
assert relocations[1].segment.name == "__DATA"
assert relocations[2].address == 0x00001030
assert not relocations[2].pc_relative
assert relocations[2].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[2].section.name == "__data"
assert relocations[2].segment.name == "__DATA"
assert relocations[3].address == 0x00001038
assert not relocations[3].pc_relative
assert relocations[3].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[3].section.name == "__data"
assert relocations[3].segment.name == "__DATA"
assert relocations[4].address == 0x00001040
assert not relocations[4].pc_relative
assert relocations[4].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[4].section.name == "__data"
assert relocations[4].segment.name == "__DATA"
assert relocations[5].address == 0x00001258
assert not relocations[5].pc_relative
assert relocations[5].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[5].section.name == "__data"
assert relocations[5].segment.name == "__DATA"
assert relocations[6].address == 0x00001278
assert not relocations[6].pc_relative
assert relocations[6].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[6].section.name == "__mystuff"
assert relocations[6].segment.name == "__DATA"
assert relocations[7].address == 0x00001288
assert not relocations[7].pc_relative
assert relocations[7].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[7].section.name == "__mystuff"
assert relocations[7].segment.name == "__DATA"
assert relocations[8].address == 0x00001298
assert not relocations[8].pc_relative
assert relocations[8].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[8].section.name == "__mystuff"
assert relocations[8].segment.name == "__DATA"
assert relocations[9].address == 0x000012A8
assert not relocations[9].pc_relative
assert relocations[9].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[9].section.name == "__mystuff"
assert relocations[9].segment.name == "__DATA"
def test_threaded_opcodes(tmp_path):
bin_path = pathlib.Path(get_sample('MachO/FatMachO64_x86-64_arm64_binary_ls.bin'))
target = lief.MachO.parse(bin_path.as_posix())
target = target.take(lief.MachO.CPU_TYPES.ARM64)
assert target.has_dyld_info
relocations = target.relocations
bindings = target.dyld_info.bindings
assert len(relocations) == 39
assert len(bindings) == 82
assert relocations[38].address == 0x10000c008
assert not relocations[38].pc_relative
assert relocations[38].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[38].section.name == "__data"
assert relocations[38].segment.name == "__DATA"
assert bindings[81].binding_class == lief.MachO.BINDING_CLASS.THREADED
assert bindings[81].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[81].address == 0x100008288
assert bindings[81].symbol.name == "_optind"
assert bindings[81].segment.name == "__DATA_CONST"
assert bindings[81].library.name == "/usr/lib/libSystem.B.dylib"
output_path = f"{tmp_path}/{bin_path.name}"
lief.logging.set_level(lief.logging.LOGGING_LEVEL.DEBUG)
target.write(output_path)
lief.logging.set_level(lief.logging.LOGGING_LEVEL.INFO)
print(output_path)
fat_written_target = lief.MachO.parse(output_path)
written_target = fat_written_target.take(lief.MachO.CPU_TYPES.ARM64)
for r in written_target.relocations:
print(r)
relocations = written_target.relocations
bindings = written_target.dyld_info.bindings
checked, err = lief.MachO.check_layout(written_target)
assert checked, err
assert len(relocations) == 39
assert len(bindings) == 82
assert relocations[38].address == 0x10000c008
assert not relocations[38].pc_relative
assert relocations[38].type == int(lief.MachO.REBASE_TYPES.POINTER)
assert relocations[38].section.name == "__data"
assert relocations[38].segment.name == "__DATA"
assert bindings[81].binding_class == lief.MachO.BINDING_CLASS.THREADED
assert bindings[81].binding_type == lief.MachO.BIND_TYPES.POINTER
assert bindings[81].address == 0x100008288
assert bindings[81].symbol.name == "_optind"
assert bindings[81].segment.name == "__DATA_CONST"
assert bindings[81].library.name == "/usr/lib/libSystem.B.dylib" | 0.587588 | 0.573678 |
from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth import login,authenticate
from django.contrib.auth.decorators import login_required
from .models import Profile,NeighbourHood,Post,Business
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .forms import PostForm,UpdateProfileForm,NeighbourHoodForm,BusinessForm,SignupForm
@login_required(login_url='/accounts/login')
def index(request):
posts = Post.objects.all()
posts = posts[::-1]
print(posts,"nnnnnnnnnnnnnn")
return render(request,'main/index.html',{"posts":posts})
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('index')
else:
form = SignupForm()
return render(request, 'registration/registration_form.html', {'form': form})
def home(request):
all_hoods = NeighbourHood.objects.all()
all_hoods=all_hoods[::-1]
context={
'all_hoods':all_hoods
}
return render(request,'main/home.html',context)
def create_hood(request):
if request.method =="POST":
form = NeighbourHoodForm(request.POST,request.FILES)
if form.is_valid():
hood = form.save(commit=False)
hood.admin = request.user.profile
hood.save()
return redirect('home')
else:
form = NeighbourHoodForm()
return render(request,'main/newhood.html',{'form':form})
def one_hood(request,id):
hood = NeighbourHood.objects.get(id = id)
buss = Business.objects.filter(neighbourhood=hood)
posts = Post.objects.filter(hood=hood)
posts = posts[::-1]
if request.method == "POST":
form = BusinessForm(request.POST)
if form.is_valid():
busin_form = form.save(commit=False)
busin_form.neighbourhood = hood
busin_form.user = request.user.profile
busin_form.save()
return redirect('single-hood', hood.id)
else:
form = BusinessForm()
context ={
'hood':hood,
'business':buss,
'posts':posts,
'form':form,
}
return render(request,'main/single_hood.html',context)
def hood_members(request, hood_id):
hood = NeighbourHood.objects.get(id=hood_id)
members = Profile.objects.filter(neighbourhood = hood)
return render(request,'main/members.html',{'members':members})
def create_posts(request,hood_id):
hood = NeighbourHood.objects.get(id=hood_id)
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.hood =hood
post.user = request.user.profile
post.save()
return redirect('single-hood',hood.id)
else:
form =PostForm
return render(request,'main/post.html',{'form':form})
def join_hood(request,id):
neighbourhood = get_object_or_404(NeighbourHood,id=id)
request.user.profile.neighbourhood=neighbourhood
request.user.profile.save()
return redirect('home')
def leave_hood(request,id):
hood = get_object_or_404(NeighbourHood,id=id)
request.user.profile.neighbourhood=None
request.user.profile.save()
return redirect('home')
def profile(request,username):
return render(request,'profile/prof.html')
def edit_profile(request,username):
user = User.objects.get(username=username)
if request.method == "POST":
form = UpdateProfileForm(request.POST,request.FILES,instance=request.user.profile)
if form.is_valid():
form.save()
return redirect('profile',user.username)
else:
form = UpdateProfileForm(instance=request.user.profile)
return render(request,'profile/editprof.html',{'form':form})
def search_business(request):
if request.method == 'GET':
name = request.GET.get("title")
results = Business.objects.filter(name__icontains=name).all()
print(results)
message = f'name'
params = {
'results': results,
'message': message
}
return render(request, 'results.html', params)
else:
message = "You haven't searched for any Business category"
return render(request, "results.html") | hood/views.py | from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth import login,authenticate
from django.contrib.auth.decorators import login_required
from .models import Profile,NeighbourHood,Post,Business
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .forms import PostForm,UpdateProfileForm,NeighbourHoodForm,BusinessForm,SignupForm
@login_required(login_url='/accounts/login')
def index(request):
posts = Post.objects.all()
posts = posts[::-1]
print(posts,"nnnnnnnnnnnnnn")
return render(request,'main/index.html',{"posts":posts})
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('index')
else:
form = SignupForm()
return render(request, 'registration/registration_form.html', {'form': form})
def home(request):
all_hoods = NeighbourHood.objects.all()
all_hoods=all_hoods[::-1]
context={
'all_hoods':all_hoods
}
return render(request,'main/home.html',context)
def create_hood(request):
if request.method =="POST":
form = NeighbourHoodForm(request.POST,request.FILES)
if form.is_valid():
hood = form.save(commit=False)
hood.admin = request.user.profile
hood.save()
return redirect('home')
else:
form = NeighbourHoodForm()
return render(request,'main/newhood.html',{'form':form})
def one_hood(request,id):
hood = NeighbourHood.objects.get(id = id)
buss = Business.objects.filter(neighbourhood=hood)
posts = Post.objects.filter(hood=hood)
posts = posts[::-1]
if request.method == "POST":
form = BusinessForm(request.POST)
if form.is_valid():
busin_form = form.save(commit=False)
busin_form.neighbourhood = hood
busin_form.user = request.user.profile
busin_form.save()
return redirect('single-hood', hood.id)
else:
form = BusinessForm()
context ={
'hood':hood,
'business':buss,
'posts':posts,
'form':form,
}
return render(request,'main/single_hood.html',context)
def hood_members(request, hood_id):
hood = NeighbourHood.objects.get(id=hood_id)
members = Profile.objects.filter(neighbourhood = hood)
return render(request,'main/members.html',{'members':members})
def create_posts(request,hood_id):
hood = NeighbourHood.objects.get(id=hood_id)
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.hood =hood
post.user = request.user.profile
post.save()
return redirect('single-hood',hood.id)
else:
form =PostForm
return render(request,'main/post.html',{'form':form})
def join_hood(request,id):
neighbourhood = get_object_or_404(NeighbourHood,id=id)
request.user.profile.neighbourhood=neighbourhood
request.user.profile.save()
return redirect('home')
def leave_hood(request,id):
hood = get_object_or_404(NeighbourHood,id=id)
request.user.profile.neighbourhood=None
request.user.profile.save()
return redirect('home')
def profile(request,username):
return render(request,'profile/prof.html')
def edit_profile(request,username):
user = User.objects.get(username=username)
if request.method == "POST":
form = UpdateProfileForm(request.POST,request.FILES,instance=request.user.profile)
if form.is_valid():
form.save()
return redirect('profile',user.username)
else:
form = UpdateProfileForm(instance=request.user.profile)
return render(request,'profile/editprof.html',{'form':form})
def search_business(request):
if request.method == 'GET':
name = request.GET.get("title")
results = Business.objects.filter(name__icontains=name).all()
print(results)
message = f'name'
params = {
'results': results,
'message': message
}
return render(request, 'results.html', params)
else:
message = "You haven't searched for any Business category"
return render(request, "results.html") | 0.31279 | 0.056055 |
from datetime import date
from django.test import TestCase
from django.core.exceptions import ValidationError
from ..date import SplitDateWidget, SplitDateField
class SplitDateWidgetTests(TestCase):
def test_render_assigns_ids_and_labels(self):
widget = SplitDateWidget()
content = widget.render('boop', None, {'id': 'blarg'})
self.assertRegexpMatches(content, 'id="blarg_0"')
self.assertRegexpMatches(content, 'id="blarg_1"')
self.assertRegexpMatches(content, 'id="blarg_2"')
self.assertRegexpMatches(content, 'for="blarg_0"')
self.assertRegexpMatches(content, 'for="blarg_1"')
self.assertRegexpMatches(content, 'for="blarg_2"')
def test_render_assigns_names(self):
widget = SplitDateWidget()
content = widget.render('boop', None)
self.assertRegexpMatches(content, 'name="boop_0"')
self.assertRegexpMatches(content, 'name="boop_1"')
self.assertRegexpMatches(content, 'name="boop_2"')
def test_render_assigns_hint_id_and_aria_describedby(self):
widget = SplitDateWidget()
content = widget.render('boop', None, {'id': 'foo'})
self.assertRegexpMatches(content, 'id="foo_hint"')
self.assertRegexpMatches(content, 'aria-describedby="foo_hint"')
def test_render_takes_value_as_list(self):
widget = SplitDateWidget()
content = widget.render('boop', [2006, 7, 29])
self.assertRegexpMatches(content, 'value="2006"')
self.assertRegexpMatches(content, 'value="7"')
self.assertRegexpMatches(content, 'value="29"')
def test_render_takes_value_as_date(self):
widget = SplitDateWidget()
content = widget.render('boop', date(2005, 6, 28))
self.assertRegexpMatches(content, 'value="2005"')
self.assertRegexpMatches(content, 'value="6"')
self.assertRegexpMatches(content, 'value="28"')
def test_render_does_not_raise_exception_on_empty_lists(self):
widget = SplitDateWidget()
content = widget.render('boop', [])
self.assertRegexpMatches(content, 'value=""')
def test_decompress_works_with_dates(self):
widget = SplitDateWidget()
self.assertEqual(widget.decompress(date(2005, 6, 28)), [2005, 6, 28])
def test_decompress_works_with_none(self):
widget = SplitDateWidget()
self.assertEqual(widget.decompress(None), [None, None, None])
class SplitDateFieldTests(TestCase):
def test_compress_returns_date_for_valid_dates(self):
field = SplitDateField()
self.assertEqual(field.compress([2005, 6, 28]), date(2005, 6, 28))
def test_compress_raises_validation_errors_for_invalid_dates(self):
field = SplitDateField()
with self.assertRaisesRegexp(
ValidationError,
'Invalid date: day is out of range for month.'
):
field.compress([2001, 2, 31])
def test_compress_returns_none_when_data_list_is_falsy(self):
field = SplitDateField()
self.assertEqual(field.compress(None), None)
self.assertEqual(field.compress([]), None) | frontend/tests/test_date.py | from datetime import date
from django.test import TestCase
from django.core.exceptions import ValidationError
from ..date import SplitDateWidget, SplitDateField
class SplitDateWidgetTests(TestCase):
def test_render_assigns_ids_and_labels(self):
widget = SplitDateWidget()
content = widget.render('boop', None, {'id': 'blarg'})
self.assertRegexpMatches(content, 'id="blarg_0"')
self.assertRegexpMatches(content, 'id="blarg_1"')
self.assertRegexpMatches(content, 'id="blarg_2"')
self.assertRegexpMatches(content, 'for="blarg_0"')
self.assertRegexpMatches(content, 'for="blarg_1"')
self.assertRegexpMatches(content, 'for="blarg_2"')
def test_render_assigns_names(self):
widget = SplitDateWidget()
content = widget.render('boop', None)
self.assertRegexpMatches(content, 'name="boop_0"')
self.assertRegexpMatches(content, 'name="boop_1"')
self.assertRegexpMatches(content, 'name="boop_2"')
def test_render_assigns_hint_id_and_aria_describedby(self):
widget = SplitDateWidget()
content = widget.render('boop', None, {'id': 'foo'})
self.assertRegexpMatches(content, 'id="foo_hint"')
self.assertRegexpMatches(content, 'aria-describedby="foo_hint"')
def test_render_takes_value_as_list(self):
widget = SplitDateWidget()
content = widget.render('boop', [2006, 7, 29])
self.assertRegexpMatches(content, 'value="2006"')
self.assertRegexpMatches(content, 'value="7"')
self.assertRegexpMatches(content, 'value="29"')
def test_render_takes_value_as_date(self):
widget = SplitDateWidget()
content = widget.render('boop', date(2005, 6, 28))
self.assertRegexpMatches(content, 'value="2005"')
self.assertRegexpMatches(content, 'value="6"')
self.assertRegexpMatches(content, 'value="28"')
def test_render_does_not_raise_exception_on_empty_lists(self):
widget = SplitDateWidget()
content = widget.render('boop', [])
self.assertRegexpMatches(content, 'value=""')
def test_decompress_works_with_dates(self):
widget = SplitDateWidget()
self.assertEqual(widget.decompress(date(2005, 6, 28)), [2005, 6, 28])
def test_decompress_works_with_none(self):
widget = SplitDateWidget()
self.assertEqual(widget.decompress(None), [None, None, None])
class SplitDateFieldTests(TestCase):
def test_compress_returns_date_for_valid_dates(self):
field = SplitDateField()
self.assertEqual(field.compress([2005, 6, 28]), date(2005, 6, 28))
def test_compress_raises_validation_errors_for_invalid_dates(self):
field = SplitDateField()
with self.assertRaisesRegexp(
ValidationError,
'Invalid date: day is out of range for month.'
):
field.compress([2001, 2, 31])
def test_compress_returns_none_when_data_list_is_falsy(self):
field = SplitDateField()
self.assertEqual(field.compress(None), None)
self.assertEqual(field.compress([]), None) | 0.707506 | 0.533276 |
import typing as ty
import numpy as np
from .dataset_adapters import Dataset
from .kernel_specs import (
AdditiveKernelSpec,
KernelSpec,
BaseKernelSpec,
GenericKernelSpec,
PeriodicKernelSpec,
PeriodicNoConstKernelSpec,
ConstraintBounds as CB,
ProductKernelSpec,
TopLevelKernelSpec,
)
default_constraint_heuristics = {
"PER": {
"min_periods": 5,
"min_data_points_per_period": 5,
"max_length_scale_as_mult_of_max_period": 5,
"min_length_scale_as_mult_of_min_period": 0.5,
},
"RBF": {},
"LIN": {},
}
def kernel_proto_constrained_with_data(
kernel: GenericKernelSpec, d: Dataset, heuristics=None
) -> GenericKernelSpec:
heuristics = heuristics or default_constraint_heuristics
if isinstance(kernel, PeriodicNoConstKernelSpec) or isinstance(
kernel, PeriodicKernelSpec
):
min_x_diff = np.diff(d.train_x.flatten()).min()
periodicity_min = heuristics["PER"]["min_data_points_per_period"] * min_x_diff
x_range = d.train_x.max() - d.train_x.min()
periodicity_max = x_range / heuristics["PER"]["min_periods"]
length_scale_min = (
periodicity_min
* heuristics["PER"]["min_length_scale_as_mult_of_min_period"]
)
length_scale_max = (
periodicity_max
* heuristics["PER"]["max_length_scale_as_mult_of_max_period"]
)
period_bounds = CB(periodicity_min, periodicity_max)
length_scale_bounds = CB(length_scale_min, length_scale_max)
return ty.cast(GenericKernelSpec, kernel).clone_update(
{
"period_bounds": period_bounds,
"length_scale_bounds": length_scale_bounds,
"period": period_bounds.clamp(kernel.period),
"length_scale": length_scale_bounds.clamp(kernel.length_scale),
}
)
else:
return kernel
def update_kernel_protos_constrained_with_data(
kernels: list[GenericKernelSpec], d: Dataset, heuristics=None
) -> list[GenericKernelSpec]:
return [kernel_proto_constrained_with_data(k, d, heuristics) for k in kernels]
T = ty.TypeVar("T")
def set_constraints_on_spec(
spec: T, constrained_base_kernels: list[BaseKernelSpec]
) -> T:
CBK = constrained_base_kernels
if (
isinstance(spec, TopLevelKernelSpec)
or isinstance(spec, AdditiveKernelSpec)
or isinstance(spec, ProductKernelSpec)
):
operands = [set_constraints_on_spec(subspec, CBK) for subspec in spec.operands]
return spec.clone_update({"operands": operands})
else:
base_kernel = list(filter(lambda x: type(x) == type(spec), CBK))[0]
if isinstance(spec, PeriodicNoConstKernelSpec) or isinstance(
spec, PeriodicKernelSpec
):
# FIXME: type handling here sucks
base_kernel = ty.cast(
PeriodicKernelSpec,
base_kernel,
)
return spec.clone_update(
{
"period_bounds": base_kernel.period_bounds,
"length_scale_bounds": base_kernel.length_scale_bounds,
}
)
else:
return spec | autostat/constraints.py | import typing as ty
import numpy as np
from .dataset_adapters import Dataset
from .kernel_specs import (
AdditiveKernelSpec,
KernelSpec,
BaseKernelSpec,
GenericKernelSpec,
PeriodicKernelSpec,
PeriodicNoConstKernelSpec,
ConstraintBounds as CB,
ProductKernelSpec,
TopLevelKernelSpec,
)
default_constraint_heuristics = {
"PER": {
"min_periods": 5,
"min_data_points_per_period": 5,
"max_length_scale_as_mult_of_max_period": 5,
"min_length_scale_as_mult_of_min_period": 0.5,
},
"RBF": {},
"LIN": {},
}
def kernel_proto_constrained_with_data(
kernel: GenericKernelSpec, d: Dataset, heuristics=None
) -> GenericKernelSpec:
heuristics = heuristics or default_constraint_heuristics
if isinstance(kernel, PeriodicNoConstKernelSpec) or isinstance(
kernel, PeriodicKernelSpec
):
min_x_diff = np.diff(d.train_x.flatten()).min()
periodicity_min = heuristics["PER"]["min_data_points_per_period"] * min_x_diff
x_range = d.train_x.max() - d.train_x.min()
periodicity_max = x_range / heuristics["PER"]["min_periods"]
length_scale_min = (
periodicity_min
* heuristics["PER"]["min_length_scale_as_mult_of_min_period"]
)
length_scale_max = (
periodicity_max
* heuristics["PER"]["max_length_scale_as_mult_of_max_period"]
)
period_bounds = CB(periodicity_min, periodicity_max)
length_scale_bounds = CB(length_scale_min, length_scale_max)
return ty.cast(GenericKernelSpec, kernel).clone_update(
{
"period_bounds": period_bounds,
"length_scale_bounds": length_scale_bounds,
"period": period_bounds.clamp(kernel.period),
"length_scale": length_scale_bounds.clamp(kernel.length_scale),
}
)
else:
return kernel
def update_kernel_protos_constrained_with_data(
kernels: list[GenericKernelSpec], d: Dataset, heuristics=None
) -> list[GenericKernelSpec]:
return [kernel_proto_constrained_with_data(k, d, heuristics) for k in kernels]
T = ty.TypeVar("T")
def set_constraints_on_spec(
spec: T, constrained_base_kernels: list[BaseKernelSpec]
) -> T:
CBK = constrained_base_kernels
if (
isinstance(spec, TopLevelKernelSpec)
or isinstance(spec, AdditiveKernelSpec)
or isinstance(spec, ProductKernelSpec)
):
operands = [set_constraints_on_spec(subspec, CBK) for subspec in spec.operands]
return spec.clone_update({"operands": operands})
else:
base_kernel = list(filter(lambda x: type(x) == type(spec), CBK))[0]
if isinstance(spec, PeriodicNoConstKernelSpec) or isinstance(
spec, PeriodicKernelSpec
):
# FIXME: type handling here sucks
base_kernel = ty.cast(
PeriodicKernelSpec,
base_kernel,
)
return spec.clone_update(
{
"period_bounds": base_kernel.period_bounds,
"length_scale_bounds": base_kernel.length_scale_bounds,
}
)
else:
return spec | 0.545528 | 0.41947 |
import yfinance as yf
import pandas as pd
import datetime as dt
from pandas_datareader import data as pdr
import yfinance as yf
import util as util
yf.pdr_override()
start = dt.datetime.now() - dt.timedelta(days=365)
now = dt.datetime.now()
index_change_dict = {}
def get_relative_strength(stock, index, data = None):
try:
if data == None:
stock_data = pdr.get_data_yahoo(stock, start, now)
else:
stock_data = data
stock_old = stock_data["Adj Close"][0]
stock_now = stock_data["Adj Close"][-1]
stock_change = util.get_percent_change(stock_now, stock_old)
if (index in index_change_dict):
index_change = index_change_dict[index]
else:
index_data = pdr.get_data_yahoo(index, start, now)
index_old = index_data["Adj Close"][0]
index_now = index_data["Adj Close"][-1]
index_change = util.get_percent_change(index_now, index_old)
index_change_dict[index] = index_change
return round(stock_change/index_change * 100, 2)
except Exception as e:
print("No data on " + stock)
"""
Average True Range (ATR) is a technical indicator that measures market volatility,
typically derived from a moving average of a series of ATRs.
The maximum of:
- The current high less the current low
- The absolute value of the current high less the previous close
- The absolute value of the current low less the previous close
"""
def get_average_true_range(stock):
try:
sum = 0
days = 14
df = pdr.get_data_yahoo(stock, start, now)
for i in range(1, days):
currHigh = df["High"][-i]
currLow = df["Low"][-i]
prevClose = df["Adj Close"][-i-1]
sum += max(currHigh - currLow, abs(currHigh - prevClose), abs(currLow - prevClose))
return round(sum/days, 2)
except Exception as e:
print("No data on " + stock)
def get_resistance_level(stock, level):
try:
df = pdr.get_data_yahoo(stock, start, now)
high = df["High"][-1]
low = df["Low"][-1]
close = df["Adj Close"][-1]
pivot = (high + low + close)/3
switch = {
1: (2 * pivot) - low,
2: pivot - low + high,
3: high + 2 * (pivot - low),
4: high + 3 * (pivot - low)
}
return switch.get(level)
except Exception as e:
print("No data on " + stock)
def get_support_level(stock, level):
try:
df = pdr.get_data_yahoo(stock, start, now)
high = df["High"][-1]
low = df["Low"][-1]
close = df["Adj Close"][-1]
pivot = (high + low + close)/3
switch = {
1: 2 * pivot - high,
2: pivot - high + low,
3: low - 2 * (high - pivot),
4: low - 3 * (high - pivot)
}
return switch.get(level)
except Exception as e:
print("No data on " + stock) | indicators.py | import yfinance as yf
import pandas as pd
import datetime as dt
from pandas_datareader import data as pdr
import yfinance as yf
import util as util
yf.pdr_override()
start = dt.datetime.now() - dt.timedelta(days=365)
now = dt.datetime.now()
index_change_dict = {}
def get_relative_strength(stock, index, data = None):
try:
if data == None:
stock_data = pdr.get_data_yahoo(stock, start, now)
else:
stock_data = data
stock_old = stock_data["Adj Close"][0]
stock_now = stock_data["Adj Close"][-1]
stock_change = util.get_percent_change(stock_now, stock_old)
if (index in index_change_dict):
index_change = index_change_dict[index]
else:
index_data = pdr.get_data_yahoo(index, start, now)
index_old = index_data["Adj Close"][0]
index_now = index_data["Adj Close"][-1]
index_change = util.get_percent_change(index_now, index_old)
index_change_dict[index] = index_change
return round(stock_change/index_change * 100, 2)
except Exception as e:
print("No data on " + stock)
"""
Average True Range (ATR) is a technical indicator that measures market volatility,
typically derived from a moving average of a series of ATRs.
The maximum of:
- The current high less the current low
- The absolute value of the current high less the previous close
- The absolute value of the current low less the previous close
"""
def get_average_true_range(stock):
try:
sum = 0
days = 14
df = pdr.get_data_yahoo(stock, start, now)
for i in range(1, days):
currHigh = df["High"][-i]
currLow = df["Low"][-i]
prevClose = df["Adj Close"][-i-1]
sum += max(currHigh - currLow, abs(currHigh - prevClose), abs(currLow - prevClose))
return round(sum/days, 2)
except Exception as e:
print("No data on " + stock)
def get_resistance_level(stock, level):
try:
df = pdr.get_data_yahoo(stock, start, now)
high = df["High"][-1]
low = df["Low"][-1]
close = df["Adj Close"][-1]
pivot = (high + low + close)/3
switch = {
1: (2 * pivot) - low,
2: pivot - low + high,
3: high + 2 * (pivot - low),
4: high + 3 * (pivot - low)
}
return switch.get(level)
except Exception as e:
print("No data on " + stock)
def get_support_level(stock, level):
try:
df = pdr.get_data_yahoo(stock, start, now)
high = df["High"][-1]
low = df["Low"][-1]
close = df["Adj Close"][-1]
pivot = (high + low + close)/3
switch = {
1: 2 * pivot - high,
2: pivot - high + low,
3: low - 2 * (high - pivot),
4: low - 3 * (high - pivot)
}
return switch.get(level)
except Exception as e:
print("No data on " + stock) | 0.355663 | 0.419529 |
from naoth.LogReader import LogReader
from naoth.LogReader import Parser
from matplotlib import pyplot
import numpy
class XABSLSymbols:
def __init__(self):
self.values = {}
self.decimalIdToName = {}
self.booleanIdToName = {}
self.enumIdToName = {}
class BehaviorParser(Parser):
def __init__(self):
Parser.__init__(self)
self.symbols = XABSLSymbols()
self.options = []
self.current_options = {}
def parseOption(self, o):
if o.type == 0: # Option
optionComplete = self.options[o.option.id]
self.current_options[optionComplete.name] = {
'time': o.option.timeOfExecution,
'state': optionComplete.states[o.option.activeState],
'stateTime': o.option.stateTime
}
for so in o.option.activeSubActions:
self.parseOption(so)
def parse(self, name, data):
self.current_options = {}
if name == 'BehaviorStateComplete':
message = Parser.parse(self, name, data)
# process options
self.options = message.options
# process symbols
for s in message.inputSymbolList.decimal:
self.symbols.values[s.name] = s.value
self.symbols.decimalIdToName[s.id] = s.name
for s in message.inputSymbolList.boolean:
self.symbols.values[s.name] = s.value
self.symbols.booleanIdToName[s.id] = s.name
for s in message.inputSymbolList.enumerated:
self.symbols.values[s.name] = s.value
self.symbols.enumIdToName[s.id] = s.name
return self.symbols.values, self.current_options
elif name == 'BehaviorStateSparse':
message = Parser.parse(self, name, data)
symbols_values = self.symbols.values.copy()
# process active options
for o in message.activeRootActions:
self.parseOption(o)
# process symbols
for s in message.inputSymbolList.decimal:
name = self.symbols.decimalIdToName[s.id]
symbols_values[name] = s.value
for s in message.inputSymbolList.boolean:
name = self.symbols.booleanIdToName[s.id]
symbols_values[name] = s.value
for s in message.inputSymbolList.enumerated:
name = self.symbols.enumIdToName[s.id]
symbols_values[name] = s.value
return symbols_values, self.current_options
else:
return Parser.parse(self, name, data)
def behavior(frame):
try:
if "BehaviorStateComplete" in frame.messages:
m, o = frame["BehaviorStateComplete"]
else:
m, o = frame["BehaviorStateSparse"]
return [m["robot_pose.x"], m["robot_pose.y"], m["fall_down_state"]]
except KeyError as k:
raise StopIteration
if __name__ == "__main__":
parser = BehaviorParser()
fileName = "./game.log"
log = LogReader(fileName, parser) # , filter=headYaw)
# we want only the frames which contain BehaviorState
b = [behavior(f) for f in log if "BehaviorStateComplete" in f.messages or "BehaviorStateSparse" in f.messages]
upright = filter(lambda m: m[2] == 1, b)
fall = filter(lambda m: m[2] != 1, b)
print "step 2"
du = zip(*upright)
df = zip(*fall)
pyplot.plot(du[0], du[1], '.')
pyplot.plot(df[0], df[1], 'o')
pyplot.ylabel('y')
pyplot.xlabel('x')
pyplot.show() | Utils/py/MotionAnalysis/BehaviorParser.py | from naoth.LogReader import LogReader
from naoth.LogReader import Parser
from matplotlib import pyplot
import numpy
class XABSLSymbols:
def __init__(self):
self.values = {}
self.decimalIdToName = {}
self.booleanIdToName = {}
self.enumIdToName = {}
class BehaviorParser(Parser):
def __init__(self):
Parser.__init__(self)
self.symbols = XABSLSymbols()
self.options = []
self.current_options = {}
def parseOption(self, o):
if o.type == 0: # Option
optionComplete = self.options[o.option.id]
self.current_options[optionComplete.name] = {
'time': o.option.timeOfExecution,
'state': optionComplete.states[o.option.activeState],
'stateTime': o.option.stateTime
}
for so in o.option.activeSubActions:
self.parseOption(so)
def parse(self, name, data):
self.current_options = {}
if name == 'BehaviorStateComplete':
message = Parser.parse(self, name, data)
# process options
self.options = message.options
# process symbols
for s in message.inputSymbolList.decimal:
self.symbols.values[s.name] = s.value
self.symbols.decimalIdToName[s.id] = s.name
for s in message.inputSymbolList.boolean:
self.symbols.values[s.name] = s.value
self.symbols.booleanIdToName[s.id] = s.name
for s in message.inputSymbolList.enumerated:
self.symbols.values[s.name] = s.value
self.symbols.enumIdToName[s.id] = s.name
return self.symbols.values, self.current_options
elif name == 'BehaviorStateSparse':
message = Parser.parse(self, name, data)
symbols_values = self.symbols.values.copy()
# process active options
for o in message.activeRootActions:
self.parseOption(o)
# process symbols
for s in message.inputSymbolList.decimal:
name = self.symbols.decimalIdToName[s.id]
symbols_values[name] = s.value
for s in message.inputSymbolList.boolean:
name = self.symbols.booleanIdToName[s.id]
symbols_values[name] = s.value
for s in message.inputSymbolList.enumerated:
name = self.symbols.enumIdToName[s.id]
symbols_values[name] = s.value
return symbols_values, self.current_options
else:
return Parser.parse(self, name, data)
def behavior(frame):
try:
if "BehaviorStateComplete" in frame.messages:
m, o = frame["BehaviorStateComplete"]
else:
m, o = frame["BehaviorStateSparse"]
return [m["robot_pose.x"], m["robot_pose.y"], m["fall_down_state"]]
except KeyError as k:
raise StopIteration
if __name__ == "__main__":
parser = BehaviorParser()
fileName = "./game.log"
log = LogReader(fileName, parser) # , filter=headYaw)
# we want only the frames which contain BehaviorState
b = [behavior(f) for f in log if "BehaviorStateComplete" in f.messages or "BehaviorStateSparse" in f.messages]
upright = filter(lambda m: m[2] == 1, b)
fall = filter(lambda m: m[2] != 1, b)
print "step 2"
du = zip(*upright)
df = zip(*fall)
pyplot.plot(du[0], du[1], '.')
pyplot.plot(df[0], df[1], 'o')
pyplot.ylabel('y')
pyplot.xlabel('x')
pyplot.show() | 0.438785 | 0.29 |
from starlette.requests import Request
from starlette.responses import JSONResponse
from .dataaccess import employeeda
from .permissions import Role
async def get_employees(request: Request):
employees = await employeeda.get_employees()
for e in employees:
e['role'] = Role(e['role']).name
return JSONResponse({'employees': employees})
async def edit_employee(request: Request):
username = request.path_params.get('username')
current_user, modified_user = await employeeda.get_employees(
[request.user.display_name, username])
body = await request.json()
updates = {}
if 'username' in body and body['username'] != modified_user:
return JSONResponse({'Message': 'Cannot edit username'}, status_code=400)
if 'name' in body and body['name'] != modified_user['name']:
if current_user['username'] == modified_user['username']:
# updating self
updates['name'] = body['name']
elif current_user['role'] != 1:
return JSONResponse({'Message': 'Cannot edit other users'}, status_code=403)
if 'email' in body and body['email'] != modified_user['email']:
if current_user['username'] == modified_user['username']:
# updating self
updates['email'] = body['email']
elif current_user['role'] != 1:
return JSONResponse({'Message': 'Cannot edit other users'}, status_code=403)
if 'role' in body and body['role'] != Role(modified_user['role']).name:
# changing role
new_role = Role[body['role']]
if modified_user['role'] > current_user['role'] and new_role.value >= current_user['role']:
# only allowed if a lesser role going to an equal or lesser role
updates['role'] = new_role.value
else:
return JSONResponse({'Message': 'Cannot modify someone of '
'greater permissions than yourself'}, status_code=403)
result = await employeeda.modify_employee(username, **updates)
return JSONResponse(result)
async def check_employee(user: str):
""" Check that the user exists """
email = user
username, _ = user.split('@')
name = username.replace('.', ' ').title()
exists = await employeeda.get_employee_usernames((username,))
if exists:
return username
usernames = await employeeda.get_employee_usernames()
if not usernames:
# No users yet, this user gets to be an admin!
role = Role.admin
else:
role = Role.dev
await employeeda.add_employee(
username,
role_id=role.value,
name=name,
email=email)
return username | tmeister/employees.py | from starlette.requests import Request
from starlette.responses import JSONResponse
from .dataaccess import employeeda
from .permissions import Role
async def get_employees(request: Request):
employees = await employeeda.get_employees()
for e in employees:
e['role'] = Role(e['role']).name
return JSONResponse({'employees': employees})
async def edit_employee(request: Request):
username = request.path_params.get('username')
current_user, modified_user = await employeeda.get_employees(
[request.user.display_name, username])
body = await request.json()
updates = {}
if 'username' in body and body['username'] != modified_user:
return JSONResponse({'Message': 'Cannot edit username'}, status_code=400)
if 'name' in body and body['name'] != modified_user['name']:
if current_user['username'] == modified_user['username']:
# updating self
updates['name'] = body['name']
elif current_user['role'] != 1:
return JSONResponse({'Message': 'Cannot edit other users'}, status_code=403)
if 'email' in body and body['email'] != modified_user['email']:
if current_user['username'] == modified_user['username']:
# updating self
updates['email'] = body['email']
elif current_user['role'] != 1:
return JSONResponse({'Message': 'Cannot edit other users'}, status_code=403)
if 'role' in body and body['role'] != Role(modified_user['role']).name:
# changing role
new_role = Role[body['role']]
if modified_user['role'] > current_user['role'] and new_role.value >= current_user['role']:
# only allowed if a lesser role going to an equal or lesser role
updates['role'] = new_role.value
else:
return JSONResponse({'Message': 'Cannot modify someone of '
'greater permissions than yourself'}, status_code=403)
result = await employeeda.modify_employee(username, **updates)
return JSONResponse(result)
async def check_employee(user: str):
""" Check that the user exists """
email = user
username, _ = user.split('@')
name = username.replace('.', ' ').title()
exists = await employeeda.get_employee_usernames((username,))
if exists:
return username
usernames = await employeeda.get_employee_usernames()
if not usernames:
# No users yet, this user gets to be an admin!
role = Role.admin
else:
role = Role.dev
await employeeda.add_employee(
username,
role_id=role.value,
name=name,
email=email)
return username | 0.38341 | 0.083367 |
import time
import numpy as np
import argparse
import sys
sys.path.append("../../")
import grpc
from grpc_ps import ps_service_pb2_grpc
from grpc_ps.client import ps_client
# algorithm setting
NUM_EPOCHS = 10
NUM_BATCHES = 1
MODEL_NAME = "w.b"
LEARNING_RATE = 0.1
def handler(event, context):
start_time = time.time()
worker_index = event['rank']
num_workers = event['num_workers']
host = event['host']
port = event['port']
size = event['size']
print('number of workers = {}'.format(num_workers))
print('worker index = {}'.format(worker_index))
print("host = {}".format(host))
print("port = {}".format(port))
print("size = {}".format(size))
channel = grpc.insecure_channel("{}:{}".format(host, port), options=[
('grpc.max_send_message_length', 128 * 1024 * 1024),
('grpc.max_receive_message_length', 128 * 1024 * 1024)])
stub = ps_service_pb2_grpc.ParameterServerStub(channel)
# ping
ps_client.ping(stub)
print("create and ping thrift server >>> HOST = {}, PORT = {}".format(host, port))
# register model
ps_client.register_model(stub, MODEL_NAME, num_workers, worker_index, size)
ps_client.exist_model(stub, MODEL_NAME)
print("register and check model >>> name = {}, length = {}".format(MODEL_NAME, size))
# Training the Model
train_start = time.time()
iter_counter = 0
for epoch in range(NUM_EPOCHS):
epoch_start = time.time()
for batch_index in range(NUM_BATCHES):
print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index))
batch_start = time.time()
loss = 0.0
# pull latest model
ps_client.can_pull(stub, MODEL_NAME, iter_counter, worker_index)
pull_start = time.time()
latest_model = ps_client.pull_model(stub, MODEL_NAME, iter_counter, worker_index)
pull_time = time.time() - pull_start
# push gradient to PS
w_b_grad = np.random.rand(1, size).astype(np.double).flatten()
ps_client.can_push(stub, MODEL_NAME, iter_counter, worker_index)
push_start = time.time()
ps_client.push_grad(stub, MODEL_NAME, w_b_grad, LEARNING_RATE, iter_counter, worker_index)
push_time = time.time() - push_start
ps_client.can_pull(stub, MODEL_NAME, iter_counter + 1, worker_index) # sync all workers
print('Epoch: [%d/%d], Step: [%d/%d] >>> Time: %.4f, Loss: %.4f, epoch cost %.4f, '
'batch cost %.4f s: pull model cost %.4f s, push update cost %.4f s'
% (epoch + 1, NUM_EPOCHS, batch_index, NUM_BATCHES,
time.time() - train_start, loss, time.time() - epoch_start,
time.time() - batch_start, pull_time, push_time))
iter_counter += 1
end_time = time.time()
print("Elapsed time = {} s".format(end_time - start_time)) | grpc_ps/test/ps_client_test_handler.py | import time
import numpy as np
import argparse
import sys
sys.path.append("../../")
import grpc
from grpc_ps import ps_service_pb2_grpc
from grpc_ps.client import ps_client
# algorithm setting
NUM_EPOCHS = 10
NUM_BATCHES = 1
MODEL_NAME = "w.b"
LEARNING_RATE = 0.1
def handler(event, context):
start_time = time.time()
worker_index = event['rank']
num_workers = event['num_workers']
host = event['host']
port = event['port']
size = event['size']
print('number of workers = {}'.format(num_workers))
print('worker index = {}'.format(worker_index))
print("host = {}".format(host))
print("port = {}".format(port))
print("size = {}".format(size))
channel = grpc.insecure_channel("{}:{}".format(host, port), options=[
('grpc.max_send_message_length', 128 * 1024 * 1024),
('grpc.max_receive_message_length', 128 * 1024 * 1024)])
stub = ps_service_pb2_grpc.ParameterServerStub(channel)
# ping
ps_client.ping(stub)
print("create and ping thrift server >>> HOST = {}, PORT = {}".format(host, port))
# register model
ps_client.register_model(stub, MODEL_NAME, num_workers, worker_index, size)
ps_client.exist_model(stub, MODEL_NAME)
print("register and check model >>> name = {}, length = {}".format(MODEL_NAME, size))
# Training the Model
train_start = time.time()
iter_counter = 0
for epoch in range(NUM_EPOCHS):
epoch_start = time.time()
for batch_index in range(NUM_BATCHES):
print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index))
batch_start = time.time()
loss = 0.0
# pull latest model
ps_client.can_pull(stub, MODEL_NAME, iter_counter, worker_index)
pull_start = time.time()
latest_model = ps_client.pull_model(stub, MODEL_NAME, iter_counter, worker_index)
pull_time = time.time() - pull_start
# push gradient to PS
w_b_grad = np.random.rand(1, size).astype(np.double).flatten()
ps_client.can_push(stub, MODEL_NAME, iter_counter, worker_index)
push_start = time.time()
ps_client.push_grad(stub, MODEL_NAME, w_b_grad, LEARNING_RATE, iter_counter, worker_index)
push_time = time.time() - push_start
ps_client.can_pull(stub, MODEL_NAME, iter_counter + 1, worker_index) # sync all workers
print('Epoch: [%d/%d], Step: [%d/%d] >>> Time: %.4f, Loss: %.4f, epoch cost %.4f, '
'batch cost %.4f s: pull model cost %.4f s, push update cost %.4f s'
% (epoch + 1, NUM_EPOCHS, batch_index, NUM_BATCHES,
time.time() - train_start, loss, time.time() - epoch_start,
time.time() - batch_start, pull_time, push_time))
iter_counter += 1
end_time = time.time()
print("Elapsed time = {} s".format(end_time - start_time)) | 0.277865 | 0.090374 |
from __future__ import annotations
import argparse
import io
from enum import Enum
from pathlib import Path
from typing import Type, Any
from opentrons_hardware.drivers.can_bus import (
MessageId,
FunctionCode,
NodeId,
)
class block:
"""C block generator."""
def __init__(self, output: io.StringIO, start: str, terminate: str) -> None:
"""Construct a code block context manager.
Args:
output: the buffer in which to write
start: the text that begins the block
terminate: the text that ends the block
"""
self._output = output
self._start = start
self._terminate = terminate
def __enter__(self) -> block:
"""Enter the context manager."""
self._output.write(self._start)
return self
def __exit__(self, *exc: Any) -> None:
"""Exit the context manager."""
self._output.write(self._terminate)
def run(file: Path) -> None:
"""Entry point for script."""
with io.StringIO() as output:
generate(output)
output_string = output.getvalue()
file.write_text(output_string)
print(output_string)
def generate(output: io.StringIO) -> None:
"""Generate source code into output."""
output.write("/********************************************\n")
output.write("* This is a generated file. Do not modify. *\n")
output.write("********************************************/\n")
output.write("#pragma once\n\n")
with block(
output=output,
start="namespace can_ids {\n\n",
terminate="} // namespace can_ids\n\n",
):
write_enum(FunctionCode, output)
write_enum(MessageId, output)
write_enum(NodeId, output)
def write_enum(e: Type[Enum], output: io.StringIO) -> None:
"""Generate enum class from enumeration."""
output.write(f"/** {e.__doc__} */\n")
with block(
output=output, start=f"enum class {e.__name__} {{\n", terminate="};\n\n"
):
for i in e:
output.write(f" {i.name} = 0x{i.value:x},\n")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="Generate a C++ header file defining CANBUS constants."
)
parser.add_argument(
"--target",
type=str,
required=True,
help="path of header file to generate",
)
args = parser.parse_args()
run(Path(args.target))
if __name__ == "__main__":
main() | hardware/opentrons_hardware/scripts/generate_header.py | from __future__ import annotations
import argparse
import io
from enum import Enum
from pathlib import Path
from typing import Type, Any
from opentrons_hardware.drivers.can_bus import (
MessageId,
FunctionCode,
NodeId,
)
class block:
"""C block generator."""
def __init__(self, output: io.StringIO, start: str, terminate: str) -> None:
"""Construct a code block context manager.
Args:
output: the buffer in which to write
start: the text that begins the block
terminate: the text that ends the block
"""
self._output = output
self._start = start
self._terminate = terminate
def __enter__(self) -> block:
"""Enter the context manager."""
self._output.write(self._start)
return self
def __exit__(self, *exc: Any) -> None:
"""Exit the context manager."""
self._output.write(self._terminate)
def run(file: Path) -> None:
"""Entry point for script."""
with io.StringIO() as output:
generate(output)
output_string = output.getvalue()
file.write_text(output_string)
print(output_string)
def generate(output: io.StringIO) -> None:
"""Generate source code into output."""
output.write("/********************************************\n")
output.write("* This is a generated file. Do not modify. *\n")
output.write("********************************************/\n")
output.write("#pragma once\n\n")
with block(
output=output,
start="namespace can_ids {\n\n",
terminate="} // namespace can_ids\n\n",
):
write_enum(FunctionCode, output)
write_enum(MessageId, output)
write_enum(NodeId, output)
def write_enum(e: Type[Enum], output: io.StringIO) -> None:
"""Generate enum class from enumeration."""
output.write(f"/** {e.__doc__} */\n")
with block(
output=output, start=f"enum class {e.__name__} {{\n", terminate="};\n\n"
):
for i in e:
output.write(f" {i.name} = 0x{i.value:x},\n")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="Generate a C++ header file defining CANBUS constants."
)
parser.add_argument(
"--target",
type=str,
required=True,
help="path of header file to generate",
)
args = parser.parse_args()
run(Path(args.target))
if __name__ == "__main__":
main() | 0.873363 | 0.227888 |
import random
import linecache
import vk_api
import requests
from bs4 import BeautifulSoup
import time
from vk_api import VkUpload
import configparser
import logging
import os
from datetime import datetime
def get_files(path):
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
for f in files:
if not f.startswith('.'):
yield f
def get_mal_picture():
"""
:return: Anime picture URL from MyAnimeList.net, number of attempts to find it,
and anime's ID on MAL
"""
attempts = 0
while True:
attempts += 1
mal_id = str(random.randint(1, 40000))
result = requests.get('https://myanimelist.net/anime/' + mal_id + '/a/pics')
page = result.text
soup = BeautifulSoup(page, 'html.parser')
try:
img_src = soup.find('a', class_='js-picture-gallery')['href']
except (AttributeError, TypeError):
img_src = 404
if img_src != 404:
return img_src, attempts, mal_id
else:
time.sleep(1) # Wait a second before starting a new search
def get_vndb_picture():
"""
:return: VN picture URL from vndb, number of attempts to find it,
and vndb's ID
"""
attempts = 0
while True:
attempts += 1
vndb_id = str(random.randint(1, 26400))
result = requests.get('https://vndb.org/v' + vndb_id)
page = result.text
soup = BeautifulSoup(page, 'html.parser')
try:
img_src = soup.find('div', class_='imghover--visible').img['src']
except (AttributeError, TypeError):
img_src = 404
if img_src != 404:
return img_src, attempts, vndb_id
else:
time.sleep(1) # Wait a second before starting a new search
def get_verse(filepath, min_len):
"""
:param filepath: Path to txt-file (e.g. /home/Documents/file.txt or file.txt)
:param min_len: Minimum line length
:return: A random text line (exclude ones with ':', '=' etc. last character)
"""
if filepath == '':
return '', 0
else:
attempts = 0
lines = sum(1 for line in open(filepath))
while True:
attempts += 1
line = linecache.getline(filepath, random.randint(2, lines))
line = line.rstrip()
last_sym = line[-1:]
if last_sym not in (",", ":", "=", "-") and len(line) > min_len:
return line, attempts
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
filename='events.log',
datefmt='%d-%m-%Y %H:%M:%S',
level=logging.DEBUG)
scope = 'wall,photos'
# Reading config file
config = configparser.ConfigParser()
config.read('config.ini')
login = config['Auth']['Login']
password = config['<PASSWORD>']['Password']
app_id = config['Auth']['App_ID']
txt_file = config['Post']['TxtFile']
min_length = config['Post']['LineMinimumLength']
post_interval = config['Post']['PostInterval']
attach_photo = config['Post']['AttachPhoto']
owner_id = config['Post']['OwnerID']
photo_source = config['Post']['PhotoSource']
photo_location = config['Post']['PhotoLocation']
random_line = config['Post']['RandomLine']
if app_id == '':
print('Specify app id in config.ini')
quit()
if (photo_source == 'local' or photo_source == 'rand-local') and photo_location == '':
print('Specify your photo location in config.ini')
quit()
if owner_id == '':
owner_id = None
else:
owner_id = int(owner_id)
current_position = 0
while True:
session = requests.Session()
vk_session = vk_api.VkApi(login=login, password=password,
app_id=int(app_id), scope=scope)
try:
vk_session.auth()
except vk_api.AuthError as error_msg:
print(error_msg)
logging.error(error_msg)
return
vk = vk_session.get_api()
upload = VkUpload(vk_session)
attachments = []
if post_interval == '':
# post a message in random interval between 1 and 10800 seconds
post_interval = random.randint(1, 10800)
logging.info('Random interval = true')
if attach_photo == 'yes':
# Loading a picture
if photo_source == 'mal':
image_url, p_attempts, mal_id = get_mal_picture()
image = session.get(image_url, stream=True)
photo = upload.photo_wall(photos=image.raw)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
logging.info('Attempts to find a picture: %s', str(p_attempts))
logging.info('MAL ID: %s', str(mal_id))
if photo_source == 'vndb':
image_url, p_attempts, vn_id = get_vndb_picture()
image = session.get(image_url, stream=True)
photo = upload.photo_wall(photos=image.raw)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
logging.info('Attempts to find a picture: %s', str(p_attempts))
logging.info('VNDB ID: %s', str(vn_id))
if photo_source == 'rand-local':
files = list(get_files(photo_location))
image = random.choice(files)
image = photo_location + '\\' + image
photo = upload.photo_wall(photos=image)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
if photo_source == 'local':
files = list(get_files(photo_location))
if current_position >= len(files):
current_position = 0
current_position += 1
image = photo_location + '\\' + files[current_position-1]
photo = upload.photo_wall(photos=image)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
text, v_attempts = get_verse(txt_file, int(min_length))
vk.wall.post(attachment=','.join(attachments), message=text, owner_id=owner_id)
logging.info('Sent text: "%s"', text)
logging.info('Attempts to find a text: %s', str(v_attempts))
print('Message sent')
timestamp = int(time.time())
value = datetime.fromtimestamp(timestamp + int(post_interval))
next_message = value.strftime('%H:%M:%S')
print('Next message in %s seconds (%s)' % (post_interval, next_message))
time.sleep(int(post_interval))
if __name__ == '__main__':
main() | autoposter.py | import random
import linecache
import vk_api
import requests
from bs4 import BeautifulSoup
import time
from vk_api import VkUpload
import configparser
import logging
import os
from datetime import datetime
def get_files(path):
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
for f in files:
if not f.startswith('.'):
yield f
def get_mal_picture():
"""
:return: Anime picture URL from MyAnimeList.net, number of attempts to find it,
and anime's ID on MAL
"""
attempts = 0
while True:
attempts += 1
mal_id = str(random.randint(1, 40000))
result = requests.get('https://myanimelist.net/anime/' + mal_id + '/a/pics')
page = result.text
soup = BeautifulSoup(page, 'html.parser')
try:
img_src = soup.find('a', class_='js-picture-gallery')['href']
except (AttributeError, TypeError):
img_src = 404
if img_src != 404:
return img_src, attempts, mal_id
else:
time.sleep(1) # Wait a second before starting a new search
def get_vndb_picture():
"""
:return: VN picture URL from vndb, number of attempts to find it,
and vndb's ID
"""
attempts = 0
while True:
attempts += 1
vndb_id = str(random.randint(1, 26400))
result = requests.get('https://vndb.org/v' + vndb_id)
page = result.text
soup = BeautifulSoup(page, 'html.parser')
try:
img_src = soup.find('div', class_='imghover--visible').img['src']
except (AttributeError, TypeError):
img_src = 404
if img_src != 404:
return img_src, attempts, vndb_id
else:
time.sleep(1) # Wait a second before starting a new search
def get_verse(filepath, min_len):
"""
:param filepath: Path to txt-file (e.g. /home/Documents/file.txt or file.txt)
:param min_len: Minimum line length
:return: A random text line (exclude ones with ':', '=' etc. last character)
"""
if filepath == '':
return '', 0
else:
attempts = 0
lines = sum(1 for line in open(filepath))
while True:
attempts += 1
line = linecache.getline(filepath, random.randint(2, lines))
line = line.rstrip()
last_sym = line[-1:]
if last_sym not in (",", ":", "=", "-") and len(line) > min_len:
return line, attempts
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
filename='events.log',
datefmt='%d-%m-%Y %H:%M:%S',
level=logging.DEBUG)
scope = 'wall,photos'
# Reading config file
config = configparser.ConfigParser()
config.read('config.ini')
login = config['Auth']['Login']
password = config['<PASSWORD>']['Password']
app_id = config['Auth']['App_ID']
txt_file = config['Post']['TxtFile']
min_length = config['Post']['LineMinimumLength']
post_interval = config['Post']['PostInterval']
attach_photo = config['Post']['AttachPhoto']
owner_id = config['Post']['OwnerID']
photo_source = config['Post']['PhotoSource']
photo_location = config['Post']['PhotoLocation']
random_line = config['Post']['RandomLine']
if app_id == '':
print('Specify app id in config.ini')
quit()
if (photo_source == 'local' or photo_source == 'rand-local') and photo_location == '':
print('Specify your photo location in config.ini')
quit()
if owner_id == '':
owner_id = None
else:
owner_id = int(owner_id)
current_position = 0
while True:
session = requests.Session()
vk_session = vk_api.VkApi(login=login, password=password,
app_id=int(app_id), scope=scope)
try:
vk_session.auth()
except vk_api.AuthError as error_msg:
print(error_msg)
logging.error(error_msg)
return
vk = vk_session.get_api()
upload = VkUpload(vk_session)
attachments = []
if post_interval == '':
# post a message in random interval between 1 and 10800 seconds
post_interval = random.randint(1, 10800)
logging.info('Random interval = true')
if attach_photo == 'yes':
# Loading a picture
if photo_source == 'mal':
image_url, p_attempts, mal_id = get_mal_picture()
image = session.get(image_url, stream=True)
photo = upload.photo_wall(photos=image.raw)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
logging.info('Attempts to find a picture: %s', str(p_attempts))
logging.info('MAL ID: %s', str(mal_id))
if photo_source == 'vndb':
image_url, p_attempts, vn_id = get_vndb_picture()
image = session.get(image_url, stream=True)
photo = upload.photo_wall(photos=image.raw)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
logging.info('Attempts to find a picture: %s', str(p_attempts))
logging.info('VNDB ID: %s', str(vn_id))
if photo_source == 'rand-local':
files = list(get_files(photo_location))
image = random.choice(files)
image = photo_location + '\\' + image
photo = upload.photo_wall(photos=image)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
if photo_source == 'local':
files = list(get_files(photo_location))
if current_position >= len(files):
current_position = 0
current_position += 1
image = photo_location + '\\' + files[current_position-1]
photo = upload.photo_wall(photos=image)[0]
attachments.append(
'photo{}_{}'.format(photo['owner_id'], photo['id'])
)
text, v_attempts = get_verse(txt_file, int(min_length))
vk.wall.post(attachment=','.join(attachments), message=text, owner_id=owner_id)
logging.info('Sent text: "%s"', text)
logging.info('Attempts to find a text: %s', str(v_attempts))
print('Message sent')
timestamp = int(time.time())
value = datetime.fromtimestamp(timestamp + int(post_interval))
next_message = value.strftime('%H:%M:%S')
print('Next message in %s seconds (%s)' % (post_interval, next_message))
time.sleep(int(post_interval))
if __name__ == '__main__':
main() | 0.146667 | 0.061565 |
import configparser
import datetime
import numpy
from data_providing_module import configurable_registry
from data_providing_module import data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from general_utils.config import config_util
from general_utils.logging import logger
from general_utils.mysql_management.mysql_tables import stock_data_table
from stock_data_analysis_module.data_processing_module.data_retrieval_module import ranged_data_retriever
from stock_data_analysis_module.indicators import moving_average
from stock_data_analysis_module.indicators import bollinger_band
from stock_data_analysis_module.indicators import stochastic_oscillator
_ENABLED_CONFIG_ID = "enabled"
def _standardize_price_data(price_data):
ret_data = numpy.copy(price_data)
ret_data = ret_data.flatten()
max_price = numpy.max(ret_data)
min_price = numpy.min(ret_data)
for i in range(len(ret_data)):
ret_data[i] = (ret_data[i]-min_price)/max_price
return ret_data.reshape(price_data.shape)
class IndicatorBlockProvider(data_provider_registry.DataProviderBase):
"""Data Provider that will provide data constructed using stock indicators normally used by stock traders
Details on these indicators can be found in the modules of the indicators package.
Additionally, this provider provides support for configurable parameters through the configuration file. These
parameters are listed in the Configurable Parameters section.
Configurable Parameters:
enable: Whether this provider is enabled for consumers to receive data from.
"""
def generate_prediction_data(self, *args, **kwargs):
"""Generates data for a Consumer wanting to make predictions about the next day's state.
This method is identical to generate_data for all but the return values. As such, for arguments
and further details, see generate_data.
Returns:
List[Tuple[str, numpy.ndarray, float, float]]. Broken down, for every stock, there is a tuple
containing the ticker, the data block generated, the average price, and the average volume.
The average price and volume is to allow for the original magnitudes of the prices and volumes to
be reconstructed should the predictions require it.
For a breakdown of the rows in the data block, see generate_data's documentation in the Returns section.
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
avg_high = numpy.average(high)
avg_low = numpy.average(low)
avg_close = numpy.average(close)
avg_price = ((avg_high * len(high)) + (avg_low * len(high)) + (avg_close * len(high))) / (len(high) * 3)
avg_vol = numpy.average(volume)
std_high = [(high[i] - avg_price) / avg_price
for i in range(len(high))]
std_low = [(low[i] - avg_price) / avg_price
for i in range(len(high))]
std_close = [(close[i] - avg_price) / avg_price
for i in range(len(high))]
volume = [(volume[i] - avg_vol) / avg_vol
for i in range(len(volume))]
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append((ticker, data_block, avg_price, avg_vol))
return ret_blocks
def write_default_configuration(self, section: "configparser.SectionProxy"):
"""Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.
"""
section[_ENABLED_CONFIG_ID] = "True"
def load_configuration(self, parser: "configparser.ConfigParser"):
"""Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.
"""
section = config_util.create_type_section(parser, self)
if not parser.has_option(section.name, _ENABLED_CONFIG_ID):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(
data_provider_static_names.INDICATOR_BLOCK_PROVIDER_ID, self)
def generate_data(self, *args, **kwargs):
"""Generates data using stock indicators over a set period of time
Generates blocks (numpy arrays) of data using indicators that are used by normal stock traders.
These include bollinger bands, simple moving average and the stochastic oscillator.
The types of data that get fed into these algorithms come from the high, low, closing, and volume columns
of the data tables in the database. Additionally, these values are standardized to allow algorithms to draw
conclusions based off the relative change in the stock, and not be blinded by the magnitude of the prices or
volumes.
This standardization process is performed by calculating the average price across the highs, lows, and closing
prices of the stock, then every element in each of the lists is updated according to the following equation
(assume that price is the high, low, or closing price being modified):
(price - avg_price) / avg_price
The same process is also performed on the volume data.
Additionally, consumers are required to pass in a positional argument through *args, and may pass in
keyword arguments. These are covered in the Arguments section below
Arguments:
*args:
Only one positional argument is required.
data_block_length: int This controls how many columns will
be present in the return data block. As a note the data block will always have 8 rows.
**kwargs:
Several keyword arguments are supported.
sma_period: int Controls how many days are considered in the calculation of the simple moving average.
For a given day x, the previous x-sma_period days will be used
bollinger_band_stdev: int Controls how many standard deviations will be used in the calculation
of the bollinger bands
bollinger_band_period: int Controls how many days will be used in the calculation of the bollinger
bands.
oscillator_period: int Controls the number of days used in the calculation of the stochastic oscillator
Returns:
Numpy.ndarray object with three dimensions. This is effectively a 3D matrix of data blocks, where each
data block will have 8 rows and data_block_length columns.
Each data block row corresponds to one data type or calculated indicator values, are listed below:
0: high price
1: low price
2: closing price
3: volume
4: simple moving average (SMA)
5: upper bollinger band
6: lower bollinger band
7: stochastic oscillator
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
std_high = _standardize_price_data(high)
std_close = _standardize_price_data(close)
std_low = _standardize_price_data(low)
volume = _standardize_price_data(volume)
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append(data_block)
return numpy.array(ret_blocks, dtype=numpy.float32)
def __init__(self):
"""Initializes IndicatorBlockProvider and registers the instance with the global DataProviderRegistry
"""
super(IndicatorBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self)
self.default_kwargs = {
"sma_period": 50,
"bollinger_band_stdev": 2,
"bollinger_band_period": 20,
"oscillator_period": 17
}
provider = IndicatorBlockProvider() | src/data_providing_module/data_providers/indicator_block_provider.py | import configparser
import datetime
import numpy
from data_providing_module import configurable_registry
from data_providing_module import data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from general_utils.config import config_util
from general_utils.logging import logger
from general_utils.mysql_management.mysql_tables import stock_data_table
from stock_data_analysis_module.data_processing_module.data_retrieval_module import ranged_data_retriever
from stock_data_analysis_module.indicators import moving_average
from stock_data_analysis_module.indicators import bollinger_band
from stock_data_analysis_module.indicators import stochastic_oscillator
_ENABLED_CONFIG_ID = "enabled"
def _standardize_price_data(price_data):
ret_data = numpy.copy(price_data)
ret_data = ret_data.flatten()
max_price = numpy.max(ret_data)
min_price = numpy.min(ret_data)
for i in range(len(ret_data)):
ret_data[i] = (ret_data[i]-min_price)/max_price
return ret_data.reshape(price_data.shape)
class IndicatorBlockProvider(data_provider_registry.DataProviderBase):
"""Data Provider that will provide data constructed using stock indicators normally used by stock traders
Details on these indicators can be found in the modules of the indicators package.
Additionally, this provider provides support for configurable parameters through the configuration file. These
parameters are listed in the Configurable Parameters section.
Configurable Parameters:
enable: Whether this provider is enabled for consumers to receive data from.
"""
def generate_prediction_data(self, *args, **kwargs):
"""Generates data for a Consumer wanting to make predictions about the next day's state.
This method is identical to generate_data for all but the return values. As such, for arguments
and further details, see generate_data.
Returns:
List[Tuple[str, numpy.ndarray, float, float]]. Broken down, for every stock, there is a tuple
containing the ticker, the data block generated, the average price, and the average volume.
The average price and volume is to allow for the original magnitudes of the prices and volumes to
be reconstructed should the predictions require it.
For a breakdown of the rows in the data block, see generate_data's documentation in the Returns section.
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
avg_high = numpy.average(high)
avg_low = numpy.average(low)
avg_close = numpy.average(close)
avg_price = ((avg_high * len(high)) + (avg_low * len(high)) + (avg_close * len(high))) / (len(high) * 3)
avg_vol = numpy.average(volume)
std_high = [(high[i] - avg_price) / avg_price
for i in range(len(high))]
std_low = [(low[i] - avg_price) / avg_price
for i in range(len(high))]
std_close = [(close[i] - avg_price) / avg_price
for i in range(len(high))]
volume = [(volume[i] - avg_vol) / avg_vol
for i in range(len(volume))]
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append((ticker, data_block, avg_price, avg_vol))
return ret_blocks
def write_default_configuration(self, section: "configparser.SectionProxy"):
"""Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.
"""
section[_ENABLED_CONFIG_ID] = "True"
def load_configuration(self, parser: "configparser.ConfigParser"):
"""Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.
"""
section = config_util.create_type_section(parser, self)
if not parser.has_option(section.name, _ENABLED_CONFIG_ID):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(
data_provider_static_names.INDICATOR_BLOCK_PROVIDER_ID, self)
def generate_data(self, *args, **kwargs):
"""Generates data using stock indicators over a set period of time
Generates blocks (numpy arrays) of data using indicators that are used by normal stock traders.
These include bollinger bands, simple moving average and the stochastic oscillator.
The types of data that get fed into these algorithms come from the high, low, closing, and volume columns
of the data tables in the database. Additionally, these values are standardized to allow algorithms to draw
conclusions based off the relative change in the stock, and not be blinded by the magnitude of the prices or
volumes.
This standardization process is performed by calculating the average price across the highs, lows, and closing
prices of the stock, then every element in each of the lists is updated according to the following equation
(assume that price is the high, low, or closing price being modified):
(price - avg_price) / avg_price
The same process is also performed on the volume data.
Additionally, consumers are required to pass in a positional argument through *args, and may pass in
keyword arguments. These are covered in the Arguments section below
Arguments:
*args:
Only one positional argument is required.
data_block_length: int This controls how many columns will
be present in the return data block. As a note the data block will always have 8 rows.
**kwargs:
Several keyword arguments are supported.
sma_period: int Controls how many days are considered in the calculation of the simple moving average.
For a given day x, the previous x-sma_period days will be used
bollinger_band_stdev: int Controls how many standard deviations will be used in the calculation
of the bollinger bands
bollinger_band_period: int Controls how many days will be used in the calculation of the bollinger
bands.
oscillator_period: int Controls the number of days used in the calculation of the stochastic oscillator
Returns:
Numpy.ndarray object with three dimensions. This is effectively a 3D matrix of data blocks, where each
data block will have 8 rows and data_block_length columns.
Each data block row corresponds to one data type or calculated indicator values, are listed below:
0: high price
1: low price
2: closing price
3: volume
4: simple moving average (SMA)
5: upper bollinger band
6: lower bollinger band
7: stochastic oscillator
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
std_high = _standardize_price_data(high)
std_close = _standardize_price_data(close)
std_low = _standardize_price_data(low)
volume = _standardize_price_data(volume)
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append(data_block)
return numpy.array(ret_blocks, dtype=numpy.float32)
def __init__(self):
"""Initializes IndicatorBlockProvider and registers the instance with the global DataProviderRegistry
"""
super(IndicatorBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self)
self.default_kwargs = {
"sma_period": 50,
"bollinger_band_stdev": 2,
"bollinger_band_period": 20,
"oscillator_period": 17
}
provider = IndicatorBlockProvider() | 0.542136 | 0.329823 |
from kqueen.kubeapi import KubernetesAPI
from kubernetes.client.rest import ApiException
from pprint import pprint as print
import pytest
import yaml
import kubernetes
def fake_raise(exc):
def fn(self, *args, **kwargs):
raise exc
return fn
class TestKubeApi:
def test_missing_cluster_param(self):
with pytest.raises(ValueError, match='Missing parameter cluster'):
KubernetesAPI()
def test_get_api_client(self, cluster):
api = KubernetesAPI(cluster=cluster)
api_client = api.get_api_client()
print(api_client)
def test_init(self, cluster):
cluster.save()
api = KubernetesAPI(cluster=cluster)
assert hasattr(api, 'cluster')
def test_version(self, cluster):
api = KubernetesAPI(cluster=cluster)
version = api.get_version()
print(version)
assert isinstance(version, dict)
assert 'git_version' in version
assert 'platform' in version
def test_list_nodes(self, cluster):
api = KubernetesAPI(cluster=cluster)
nodes = api.list_nodes()
assert isinstance(nodes, list)
@pytest.mark.parametrize('method_name', [
'list_nodes',
'list_pods',
'list_pods_by_node',
'count_pods_by_node',
'resources_by_node',
'list_services',
'list_deployments',
])
def test_raise_apiexception(self, cluster, monkeypatch, method_name):
# monkeypatch all kubernetes-client resources used
monkeypatch.setattr(kubernetes.client.CoreV1Api, 'list_node', fake_raise(ApiException))
monkeypatch.setattr(kubernetes.client.CoreV1Api, 'list_pod_for_all_namespaces', fake_raise(ApiException))
monkeypatch.setattr(kubernetes.client.CoreV1Api, 'list_service_for_all_namespaces', fake_raise(ApiException))
monkeypatch.setattr(kubernetes.client.ExtensionsV1beta1Api, 'list_deployment_for_all_namespaces', fake_raise(ApiException))
api = KubernetesAPI(cluster=cluster)
method = getattr(api, method_name)
with pytest.raises(ApiException):
method()
def test_pod_list(self, cluster):
api = KubernetesAPI(cluster=cluster)
pods = api.list_pods()
assert isinstance(pods, list)
def test_list_pods_by_node(self, cluster):
api = KubernetesAPI(cluster=cluster)
pods = api.list_pods_by_node()
assert isinstance(pods, dict)
def test_list_services(self, cluster):
api = KubernetesAPI(cluster=cluster)
services = api.list_services()
assert isinstance(services, list)
def test_extrace_service_addon(self, cluster):
service = {
'metadata': {
'annotations': {
'kqueen/name': 'Addon name',
'kqueen/icon': 'http://icon',
'kqueen/link': 'http://link',
'other': 'other annotation',
}
}
}
api = KubernetesAPI(cluster=cluster)
extracted = api._extract_annotation(service)
assert extracted['name'] == 'Addon name'
assert extracted['icon'] == 'http://icon'
assert 'other' not in extracted
def test_list_deployments(self, cluster):
api = KubernetesAPI(cluster=cluster)
deployments = api.list_deployments()
assert isinstance(deployments, list)
def test_resource_by_node(self, cluster):
api = KubernetesAPI(cluster=cluster)
resources = api.resources_by_node()
assert isinstance(resources, dict)
def test_resource_by_node_faked(self, cluster, monkeypatch):
def fake_list_pods(self):
with open('kqueen/fixtures/testdata_list_pods_by_node.yml', 'r') as stream:
data_loaded = yaml.load(stream)
return data_loaded
monkeypatch.setattr(KubernetesAPI, 'list_pods_by_node', fake_list_pods)
api = KubernetesAPI(cluster=cluster)
resources = api.resources_by_node()
req = {
'minion1': {
'limits': {'cpu': 5.0, 'memory': 2147483648.0},
'requests': {'cpu': 1.1, 'memory': 512102400.0}
}
}
print(resources)
assert resources == req
@pytest.mark.usefixtures('cluster')
class TestVolumes:
def test_persistent_volumes(self, cluster):
api = KubernetesAPI(cluster=cluster)
resources = api.list_persistent_volumes()
assert isinstance(resources, list)
def test_persistent_volume_claims(self, cluster):
api = KubernetesAPI(cluster=cluster)
resources = api.list_persistent_volume_claims()
assert isinstance(resources, list) | kqueen/tests/test_kubeapi.py | from kqueen.kubeapi import KubernetesAPI
from kubernetes.client.rest import ApiException
from pprint import pprint as print
import pytest
import yaml
import kubernetes
def fake_raise(exc):
def fn(self, *args, **kwargs):
raise exc
return fn
class TestKubeApi:
def test_missing_cluster_param(self):
with pytest.raises(ValueError, match='Missing parameter cluster'):
KubernetesAPI()
def test_get_api_client(self, cluster):
api = KubernetesAPI(cluster=cluster)
api_client = api.get_api_client()
print(api_client)
def test_init(self, cluster):
cluster.save()
api = KubernetesAPI(cluster=cluster)
assert hasattr(api, 'cluster')
def test_version(self, cluster):
api = KubernetesAPI(cluster=cluster)
version = api.get_version()
print(version)
assert isinstance(version, dict)
assert 'git_version' in version
assert 'platform' in version
def test_list_nodes(self, cluster):
api = KubernetesAPI(cluster=cluster)
nodes = api.list_nodes()
assert isinstance(nodes, list)
@pytest.mark.parametrize('method_name', [
'list_nodes',
'list_pods',
'list_pods_by_node',
'count_pods_by_node',
'resources_by_node',
'list_services',
'list_deployments',
])
def test_raise_apiexception(self, cluster, monkeypatch, method_name):
# monkeypatch all kubernetes-client resources used
monkeypatch.setattr(kubernetes.client.CoreV1Api, 'list_node', fake_raise(ApiException))
monkeypatch.setattr(kubernetes.client.CoreV1Api, 'list_pod_for_all_namespaces', fake_raise(ApiException))
monkeypatch.setattr(kubernetes.client.CoreV1Api, 'list_service_for_all_namespaces', fake_raise(ApiException))
monkeypatch.setattr(kubernetes.client.ExtensionsV1beta1Api, 'list_deployment_for_all_namespaces', fake_raise(ApiException))
api = KubernetesAPI(cluster=cluster)
method = getattr(api, method_name)
with pytest.raises(ApiException):
method()
def test_pod_list(self, cluster):
api = KubernetesAPI(cluster=cluster)
pods = api.list_pods()
assert isinstance(pods, list)
def test_list_pods_by_node(self, cluster):
api = KubernetesAPI(cluster=cluster)
pods = api.list_pods_by_node()
assert isinstance(pods, dict)
def test_list_services(self, cluster):
api = KubernetesAPI(cluster=cluster)
services = api.list_services()
assert isinstance(services, list)
def test_extrace_service_addon(self, cluster):
service = {
'metadata': {
'annotations': {
'kqueen/name': 'Addon name',
'kqueen/icon': 'http://icon',
'kqueen/link': 'http://link',
'other': 'other annotation',
}
}
}
api = KubernetesAPI(cluster=cluster)
extracted = api._extract_annotation(service)
assert extracted['name'] == 'Addon name'
assert extracted['icon'] == 'http://icon'
assert 'other' not in extracted
def test_list_deployments(self, cluster):
api = KubernetesAPI(cluster=cluster)
deployments = api.list_deployments()
assert isinstance(deployments, list)
def test_resource_by_node(self, cluster):
api = KubernetesAPI(cluster=cluster)
resources = api.resources_by_node()
assert isinstance(resources, dict)
def test_resource_by_node_faked(self, cluster, monkeypatch):
def fake_list_pods(self):
with open('kqueen/fixtures/testdata_list_pods_by_node.yml', 'r') as stream:
data_loaded = yaml.load(stream)
return data_loaded
monkeypatch.setattr(KubernetesAPI, 'list_pods_by_node', fake_list_pods)
api = KubernetesAPI(cluster=cluster)
resources = api.resources_by_node()
req = {
'minion1': {
'limits': {'cpu': 5.0, 'memory': 2147483648.0},
'requests': {'cpu': 1.1, 'memory': 512102400.0}
}
}
print(resources)
assert resources == req
@pytest.mark.usefixtures('cluster')
class TestVolumes:
def test_persistent_volumes(self, cluster):
api = KubernetesAPI(cluster=cluster)
resources = api.list_persistent_volumes()
assert isinstance(resources, list)
def test_persistent_volume_claims(self, cluster):
api = KubernetesAPI(cluster=cluster)
resources = api.list_persistent_volume_claims()
assert isinstance(resources, list) | 0.692642 | 0.29931 |
import sys
from PyQt5.QtCore import *
class WorkerSignals(QObject):
"""PyQt signals custom class"""
program_finished = pyqtSignal()
program_error = pyqtSignal(BaseException)
result = pyqtSignal(object)
def __init__(self) -> None:
super().__init__()
class LongWorker(QRunnable):
"""
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
"""
signals = WorkerSignals()
def __init__(self, func=None, *args, **kwargs) -> None:
super().__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def set_params(self, func, *args, **kwargs) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
@pyqtSlot()
def run(self) -> None:
"""Run method of Worker class. Tries to execute a given function and emits a signal"""
try:
if len(self.args) > 0 and len(self.kwargs) > 0:
output = self.func(*self.args, **self.kwargs)
elif len(self.args) > 0 and len(self.kwargs) == 0:
output = self.func(*self.args)
elif len(self.args) == 0 and len(self.kwargs) > 0:
output = self.func(**self.kwargs)
else:
output = self.func()
self.signals.program_finished.emit()
except Exception as error:
self.signals.program_error.emit(error)
else:
self.signals.result.emit(output)
class EmittingStream(QObject):
"""Custom class that catches sys.stdout info and gives it back to a function"""
textWritten = pyqtSignal(str)
def write(self, text) -> None:
self.textWritten.emit(str(text))
def __del__(self) -> None:
sys.stdout = sys.__stdout__ | AppVoor/resources/frontend_scripts/parallel.py | import sys
from PyQt5.QtCore import *
class WorkerSignals(QObject):
"""PyQt signals custom class"""
program_finished = pyqtSignal()
program_error = pyqtSignal(BaseException)
result = pyqtSignal(object)
def __init__(self) -> None:
super().__init__()
class LongWorker(QRunnable):
"""
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
"""
signals = WorkerSignals()
def __init__(self, func=None, *args, **kwargs) -> None:
super().__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def set_params(self, func, *args, **kwargs) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
@pyqtSlot()
def run(self) -> None:
"""Run method of Worker class. Tries to execute a given function and emits a signal"""
try:
if len(self.args) > 0 and len(self.kwargs) > 0:
output = self.func(*self.args, **self.kwargs)
elif len(self.args) > 0 and len(self.kwargs) == 0:
output = self.func(*self.args)
elif len(self.args) == 0 and len(self.kwargs) > 0:
output = self.func(**self.kwargs)
else:
output = self.func()
self.signals.program_finished.emit()
except Exception as error:
self.signals.program_error.emit(error)
else:
self.signals.result.emit(output)
class EmittingStream(QObject):
"""Custom class that catches sys.stdout info and gives it back to a function"""
textWritten = pyqtSignal(str)
def write(self, text) -> None:
self.textWritten.emit(str(text))
def __del__(self) -> None:
sys.stdout = sys.__stdout__ | 0.371023 | 0.21034 |
from metagraph import translator
from metagraph.plugins import has_scipy, has_networkx, has_grblas, has_pandas
import numpy as np
if has_scipy:
import scipy.sparse as ss
from .types import ScipyEdgeMap, ScipyEdgeSet, ScipyGraph
@translator
def edgemap_to_edgeset(x: ScipyEdgeMap, **props) -> ScipyEdgeSet:
aprops = ScipyEdgeMap.Type.compute_abstract_properties(x, {"is_directed"})
data = x.value.copy()
# Force all values to be 1's to indicate no weights
data.data = np.ones_like(data.data)
return ScipyEdgeSet(data, x.node_list, aprops=aprops)
if has_scipy and has_networkx:
import networkx as nx
from ..networkx.types import NetworkXGraph
@translator
def graph_from_networkx(x: NetworkXGraph, **props) -> ScipyGraph:
aprops = NetworkXGraph.Type.compute_abstract_properties(
x, {"node_type", "edge_type", "node_dtype", "edge_dtype", "is_directed"}
)
node_list = list(sorted(x.value.nodes()))
node_vals = None
if aprops["node_type"] == "map":
node_vals = np.array(
[x.value.nodes[n].get(x.node_weight_label) for n in node_list]
)
weight = x.edge_weight_label if aprops["edge_type"] == "map" else None
m = nx.convert_matrix.to_scipy_sparse_matrix(
x.value, nodelist=node_list, weight=weight, dtype=aprops["edge_dtype"]
)
return ScipyGraph(m, node_list, node_vals, aprops=aprops)
if has_scipy and has_grblas:
import scipy.sparse as ss
from ..graphblas.types import (
GrblasMatrixType,
GrblasGraph,
GrblasEdgeSet,
GrblasEdgeMap,
dtype_grblas_to_mg,
find_active_nodes,
)
@translator
def edgeset_from_graphblas(x: GrblasEdgeSet, **props) -> ScipyEdgeSet:
aprops = GrblasEdgeSet.Type.compute_abstract_properties(x, {"is_directed"})
active_nodes = find_active_nodes(x.value)
gm = x.value[active_nodes, active_nodes].new()
rows, cols, _ = gm.to_values()
sm = ss.coo_matrix(
(np.ones_like(rows), (rows, cols)), shape=gm.shape, dtype=bool
)
return ScipyEdgeSet(sm, node_list=active_nodes, aprops=aprops)
@translator
def edgemap_from_graphblas(x: GrblasEdgeMap, **props) -> ScipyEdgeMap:
aprops = GrblasEdgeMap.Type.compute_abstract_properties(x, {"is_directed"})
active_nodes = find_active_nodes(x.value)
gm = x.value[active_nodes, active_nodes].new()
rows, cols, vals = gm.to_values()
sm = ss.coo_matrix(
(vals, (rows, cols)),
dtype=dtype_grblas_to_mg[x.value.dtype.name],
shape=gm.shape,
)
return ScipyEdgeMap(sm, node_list=active_nodes, aprops=aprops)
@translator
def graph_from_graphblas(x: GrblasGraph, **props) -> ScipyGraph:
aprops = GrblasGraph.Type.compute_abstract_properties(
x, {"node_type", "edge_type", "node_dtype", "edge_dtype", "is_directed"}
)
node_list, node_vals = x.nodes.to_values()
if aprops["node_type"] == "set":
node_vals = None
size = len(node_list)
compressed = x.value[node_list, node_list].new()
rows, cols, vals = compressed.to_values()
if aprops["edge_type"] == "map":
dtype = dtype_grblas_to_mg[x.value.dtype.name]
matrix = ss.coo_matrix(
(vals, (rows, cols)), shape=(size, size), dtype=dtype
)
elif aprops["edge_type"] == "set":
ones = np.ones_like(rows)
matrix = ss.coo_matrix((ones, (rows, cols)), shape=(size, size), dtype=bool)
else: # pragma: no cover
raise TypeError(f"Cannot translate with edge_type={aprops['edge_type']}")
return ScipyGraph(matrix, node_list, node_vals, aprops=aprops)
if has_scipy and has_pandas:
import pandas as pd
from ..pandas.types import PandasEdgeMap, PandasEdgeSet
@translator
def edgemap_from_pandas(x: PandasEdgeMap, **props) -> ScipyEdgeMap:
is_directed = x.is_directed
node_list = pd.unique(x.value[[x.src_label, x.dst_label]].values.ravel("K"))
node_list.sort()
num_nodes = len(node_list)
id2pos = dict(map(reversed, enumerate(node_list)))
get_id_pos = lambda node_id: id2pos[node_id]
source_positions = x.value[x.src_label].map(get_id_pos)
target_positions = x.value[x.dst_label].map(get_id_pos)
weights = x.value[x.weight_label]
if not is_directed:
nonself = source_positions != target_positions
source_positions, target_positions = (
pd.concat([source_positions, target_positions[nonself]]),
pd.concat([target_positions, source_positions[nonself]]),
)
weights = pd.concat([weights, weights[nonself]])
matrix = ss.coo_matrix(
(weights, (source_positions, target_positions)),
shape=(num_nodes, num_nodes),
)
return ScipyEdgeMap(matrix, node_list, aprops={"is_directed": is_directed})
@translator
def edgeset_from_pandas(x: PandasEdgeSet, **props) -> ScipyEdgeSet:
is_directed = x.is_directed
node_list = pd.unique(x.value[[x.src_label, x.dst_label]].values.ravel("K"))
node_list.sort()
num_nodes = len(node_list)
id2pos = dict(map(reversed, enumerate(node_list)))
get_id_pos = lambda node_id: id2pos[node_id]
source_positions = x.value[x.src_label].map(get_id_pos)
target_positions = x.value[x.dst_label].map(get_id_pos)
if not is_directed:
nonself = source_positions != target_positions
source_positions, target_positions = (
pd.concat([source_positions, target_positions[nonself]]),
pd.concat([target_positions, source_positions[nonself]]),
)
matrix = ss.coo_matrix(
(np.ones(len(source_positions)), (source_positions, target_positions)),
shape=(num_nodes, num_nodes),
)
return ScipyEdgeSet(matrix, node_list, aprops={"is_directed": is_directed}) | metagraph/plugins/scipy/translators.py | from metagraph import translator
from metagraph.plugins import has_scipy, has_networkx, has_grblas, has_pandas
import numpy as np
if has_scipy:
import scipy.sparse as ss
from .types import ScipyEdgeMap, ScipyEdgeSet, ScipyGraph
@translator
def edgemap_to_edgeset(x: ScipyEdgeMap, **props) -> ScipyEdgeSet:
aprops = ScipyEdgeMap.Type.compute_abstract_properties(x, {"is_directed"})
data = x.value.copy()
# Force all values to be 1's to indicate no weights
data.data = np.ones_like(data.data)
return ScipyEdgeSet(data, x.node_list, aprops=aprops)
if has_scipy and has_networkx:
import networkx as nx
from ..networkx.types import NetworkXGraph
@translator
def graph_from_networkx(x: NetworkXGraph, **props) -> ScipyGraph:
aprops = NetworkXGraph.Type.compute_abstract_properties(
x, {"node_type", "edge_type", "node_dtype", "edge_dtype", "is_directed"}
)
node_list = list(sorted(x.value.nodes()))
node_vals = None
if aprops["node_type"] == "map":
node_vals = np.array(
[x.value.nodes[n].get(x.node_weight_label) for n in node_list]
)
weight = x.edge_weight_label if aprops["edge_type"] == "map" else None
m = nx.convert_matrix.to_scipy_sparse_matrix(
x.value, nodelist=node_list, weight=weight, dtype=aprops["edge_dtype"]
)
return ScipyGraph(m, node_list, node_vals, aprops=aprops)
if has_scipy and has_grblas:
import scipy.sparse as ss
from ..graphblas.types import (
GrblasMatrixType,
GrblasGraph,
GrblasEdgeSet,
GrblasEdgeMap,
dtype_grblas_to_mg,
find_active_nodes,
)
@translator
def edgeset_from_graphblas(x: GrblasEdgeSet, **props) -> ScipyEdgeSet:
aprops = GrblasEdgeSet.Type.compute_abstract_properties(x, {"is_directed"})
active_nodes = find_active_nodes(x.value)
gm = x.value[active_nodes, active_nodes].new()
rows, cols, _ = gm.to_values()
sm = ss.coo_matrix(
(np.ones_like(rows), (rows, cols)), shape=gm.shape, dtype=bool
)
return ScipyEdgeSet(sm, node_list=active_nodes, aprops=aprops)
@translator
def edgemap_from_graphblas(x: GrblasEdgeMap, **props) -> ScipyEdgeMap:
aprops = GrblasEdgeMap.Type.compute_abstract_properties(x, {"is_directed"})
active_nodes = find_active_nodes(x.value)
gm = x.value[active_nodes, active_nodes].new()
rows, cols, vals = gm.to_values()
sm = ss.coo_matrix(
(vals, (rows, cols)),
dtype=dtype_grblas_to_mg[x.value.dtype.name],
shape=gm.shape,
)
return ScipyEdgeMap(sm, node_list=active_nodes, aprops=aprops)
@translator
def graph_from_graphblas(x: GrblasGraph, **props) -> ScipyGraph:
aprops = GrblasGraph.Type.compute_abstract_properties(
x, {"node_type", "edge_type", "node_dtype", "edge_dtype", "is_directed"}
)
node_list, node_vals = x.nodes.to_values()
if aprops["node_type"] == "set":
node_vals = None
size = len(node_list)
compressed = x.value[node_list, node_list].new()
rows, cols, vals = compressed.to_values()
if aprops["edge_type"] == "map":
dtype = dtype_grblas_to_mg[x.value.dtype.name]
matrix = ss.coo_matrix(
(vals, (rows, cols)), shape=(size, size), dtype=dtype
)
elif aprops["edge_type"] == "set":
ones = np.ones_like(rows)
matrix = ss.coo_matrix((ones, (rows, cols)), shape=(size, size), dtype=bool)
else: # pragma: no cover
raise TypeError(f"Cannot translate with edge_type={aprops['edge_type']}")
return ScipyGraph(matrix, node_list, node_vals, aprops=aprops)
if has_scipy and has_pandas:
import pandas as pd
from ..pandas.types import PandasEdgeMap, PandasEdgeSet
@translator
def edgemap_from_pandas(x: PandasEdgeMap, **props) -> ScipyEdgeMap:
is_directed = x.is_directed
node_list = pd.unique(x.value[[x.src_label, x.dst_label]].values.ravel("K"))
node_list.sort()
num_nodes = len(node_list)
id2pos = dict(map(reversed, enumerate(node_list)))
get_id_pos = lambda node_id: id2pos[node_id]
source_positions = x.value[x.src_label].map(get_id_pos)
target_positions = x.value[x.dst_label].map(get_id_pos)
weights = x.value[x.weight_label]
if not is_directed:
nonself = source_positions != target_positions
source_positions, target_positions = (
pd.concat([source_positions, target_positions[nonself]]),
pd.concat([target_positions, source_positions[nonself]]),
)
weights = pd.concat([weights, weights[nonself]])
matrix = ss.coo_matrix(
(weights, (source_positions, target_positions)),
shape=(num_nodes, num_nodes),
)
return ScipyEdgeMap(matrix, node_list, aprops={"is_directed": is_directed})
@translator
def edgeset_from_pandas(x: PandasEdgeSet, **props) -> ScipyEdgeSet:
is_directed = x.is_directed
node_list = pd.unique(x.value[[x.src_label, x.dst_label]].values.ravel("K"))
node_list.sort()
num_nodes = len(node_list)
id2pos = dict(map(reversed, enumerate(node_list)))
get_id_pos = lambda node_id: id2pos[node_id]
source_positions = x.value[x.src_label].map(get_id_pos)
target_positions = x.value[x.dst_label].map(get_id_pos)
if not is_directed:
nonself = source_positions != target_positions
source_positions, target_positions = (
pd.concat([source_positions, target_positions[nonself]]),
pd.concat([target_positions, source_positions[nonself]]),
)
matrix = ss.coo_matrix(
(np.ones(len(source_positions)), (source_positions, target_positions)),
shape=(num_nodes, num_nodes),
)
return ScipyEdgeSet(matrix, node_list, aprops={"is_directed": is_directed}) | 0.583441 | 0.394376 |
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
AveragePooling2D,
ZeroPadding2D,
GlobalAveragePooling2D,
)
from tensorflow.keras.layers import (
Flatten,
Dense,
Dropout,
BatchNormalization,
Activation,
Convolution2D,
)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate
from tensorflow.keras import optimizers, regularizers
from tensorflow.keras.initializers import he_normal
import settings
USE_BN = True
LRN2D_NORM = True
DROPOUT = 0.2
CONCAT_AXIS = 3
WEIGHT_DECAY = 1e-4
DATA_FORMAT = "channels_last"
def conv_block(
x, nb_filter, nb_row, nb_col, border_mode="same", subsample=(1, 1), bias=False
):
"""
x = Convolution2D(
nb_filter,
nb_row,
nb_col,
subsample=subsample,
border_mode=border_mode,
bias=bias,
init="he_normal",
dim_ordering="tf",
W_regularizer=regularizers.l2(weight_decay),
)(x)
"""
x = Conv2D(
nb_filter,
(nb_row, nb_col),
strides=subsample,
padding=border_mode,
use_bias=bias,
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(x)
x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
x = Activation("relu")(x)
return x
def inception_module1(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2, branch3, branch4) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->3x3
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = Conv2D(
filters=branch2[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
# 1x1->3x3+3x3
pathway3 = Conv2D(
filters=branch3[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = Conv2D(
filters=branch3[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway3)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = Conv2D(
filters=branch3[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway3)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
# 3x3->1x1
pathway4 = AveragePooling2D(
pool_size=(3, 3), strides=1, padding=padding, data_format=DATA_FORMAT
)(x)
pathway4 = Conv2D(
filters=branch4[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway4)
pathway4 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway4)
)
return concatenate([pathway1, pathway2, pathway3, pathway4], axis=concat_axis)
def inception_reduce1(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->3x3+3x3
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = Conv2D(
filters=branch2[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = Conv2D(
filters=branch2[1],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
# 3x3->1x1
pathway3 = MaxPooling2D(
pool_size=(3, 3), strides=2, padding=padding, data_format=DATA_FORMAT
)(x)
return concatenate([pathway1, pathway2, pathway3], axis=concat_axis)
def inception_module2(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2, branch3, branch4) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->1x7->7x1
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = conv_block(pathway2, branch2[1], 1, 7)
pathway2 = conv_block(pathway2, branch2[2], 7, 1)
# 1x1->7x1->1x7->7x1->1x7
pathway3 = Conv2D(
filters=branch3[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = conv_block(pathway3, branch3[1], 7, 1)
pathway3 = conv_block(pathway3, branch3[2], 1, 7)
pathway3 = conv_block(pathway3, branch3[3], 7, 1)
pathway3 = conv_block(pathway3, branch3[4], 1, 7)
# 3x3->1x1
pathway4 = AveragePooling2D(
pool_size=(3, 3), strides=1, padding=padding, data_format=DATA_FORMAT
)(x)
pathway4 = Conv2D(
filters=branch4[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway4)
pathway4 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway4)
)
return concatenate([pathway1, pathway2, pathway3, pathway4], axis=concat_axis)
def inception_reduce2(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1->3x3
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
pathway1 = Conv2D(
filters=branch1[1],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway1)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->1x7->7x1->3x3
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = conv_block(pathway2, branch2[1], 1, 7)
pathway2 = conv_block(pathway2, branch2[2], 7, 1)
pathway2 = Conv2D(
filters=branch2[3],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
# 3x3->1x1
pathway3 = MaxPooling2D(
pool_size=(3, 3), strides=2, padding=padding, data_format=DATA_FORMAT
)(x)
return concatenate([pathway1, pathway2, pathway3], axis=concat_axis)
def inception_module3(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2, branch3, branch4) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->1x3+3x1
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2_1 = conv_block(pathway2, branch2[1], 1, 3)
pathway2_2 = conv_block(pathway2, branch2[2], 3, 1)
# 1x1->3x3->1x3+3x1
pathway3 = Conv2D(
filters=branch3[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = Conv2D(
filters=branch3[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway3)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3_1 = conv_block(pathway3, branch3[2], 1, 3)
pathway3_2 = conv_block(pathway3, branch3[3], 3, 1)
# 3x3->1x1
pathway4 = AveragePooling2D(
pool_size=(3, 3), strides=1, padding=padding, data_format=DATA_FORMAT
)(x)
pathway4 = Conv2D(
filters=branch4[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway4)
pathway4 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway4)
)
return concatenate(
[pathway1, pathway2_1, pathway2_2, pathway3_1, pathway3_2, pathway4],
axis=concat_axis,
)
def create_model(img_input):
x = Conv2D(
192,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(img_input)
x = inception_module1(
x, params=[(64,), (48, 64), (64, 96), (32,)], concat_axis=CONCAT_AXIS
) # 3a 256
x = inception_module1(
x, params=[(64,), (48, 64), (64, 96), (64,)], concat_axis=CONCAT_AXIS
) # 3b 288
x = inception_module1(
x, params=[(64,), (48, 64), (64, 96), (64,)], concat_axis=CONCAT_AXIS
) # 3c 288
x = inception_reduce1(x, params=[(384,), (64, 96)], concat_axis=CONCAT_AXIS) # 768
x = inception_module2(
x,
params=[(192,), (128, 128, 192), (128, 128, 128, 128, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4a 768
x = inception_module2(
x,
params=[(192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4b 768
x = inception_module2(
x,
params=[(192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4c 768
x = inception_module2(
x,
params=[(192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4d 768
x = inception_module2(
x,
params=[(192,), (192, 192, 192), (192, 192, 192, 192, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4e 768
x = inception_reduce2(
x, params=[(192, 320), (192, 192, 192, 192)], concat_axis=CONCAT_AXIS
) # 1280
x = inception_module3(
x,
params=[(320,), (384, 384, 384), (448, 384, 384, 384), (192,)],
concat_axis=CONCAT_AXIS,
) # 4e 2048
x = inception_module3(
x,
params=[(320,), (384, 384, 384), (448, 384, 384, 384), (192,)],
concat_axis=CONCAT_AXIS,
) # 4e 2048
x = GlobalAveragePooling2D()(x)
x = Dropout(DROPOUT)(x)
x = Dense(
settings.NUM_CLASSES,
activation=None,
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(x)
return x
def get_model():
img_input = Input(shape=settings.IMG_SHAPE)
output = create_model(img_input)
model = Model(img_input, output)
return model | nets/inception_v3.py | import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
AveragePooling2D,
ZeroPadding2D,
GlobalAveragePooling2D,
)
from tensorflow.keras.layers import (
Flatten,
Dense,
Dropout,
BatchNormalization,
Activation,
Convolution2D,
)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate
from tensorflow.keras import optimizers, regularizers
from tensorflow.keras.initializers import he_normal
import settings
USE_BN = True
LRN2D_NORM = True
DROPOUT = 0.2
CONCAT_AXIS = 3
WEIGHT_DECAY = 1e-4
DATA_FORMAT = "channels_last"
def conv_block(
x, nb_filter, nb_row, nb_col, border_mode="same", subsample=(1, 1), bias=False
):
"""
x = Convolution2D(
nb_filter,
nb_row,
nb_col,
subsample=subsample,
border_mode=border_mode,
bias=bias,
init="he_normal",
dim_ordering="tf",
W_regularizer=regularizers.l2(weight_decay),
)(x)
"""
x = Conv2D(
nb_filter,
(nb_row, nb_col),
strides=subsample,
padding=border_mode,
use_bias=bias,
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(x)
x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
x = Activation("relu")(x)
return x
def inception_module1(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2, branch3, branch4) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->3x3
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = Conv2D(
filters=branch2[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
# 1x1->3x3+3x3
pathway3 = Conv2D(
filters=branch3[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = Conv2D(
filters=branch3[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway3)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = Conv2D(
filters=branch3[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway3)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
# 3x3->1x1
pathway4 = AveragePooling2D(
pool_size=(3, 3), strides=1, padding=padding, data_format=DATA_FORMAT
)(x)
pathway4 = Conv2D(
filters=branch4[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway4)
pathway4 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway4)
)
return concatenate([pathway1, pathway2, pathway3, pathway4], axis=concat_axis)
def inception_reduce1(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->3x3+3x3
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = Conv2D(
filters=branch2[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = Conv2D(
filters=branch2[1],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
# 3x3->1x1
pathway3 = MaxPooling2D(
pool_size=(3, 3), strides=2, padding=padding, data_format=DATA_FORMAT
)(x)
return concatenate([pathway1, pathway2, pathway3], axis=concat_axis)
def inception_module2(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2, branch3, branch4) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->1x7->7x1
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = conv_block(pathway2, branch2[1], 1, 7)
pathway2 = conv_block(pathway2, branch2[2], 7, 1)
# 1x1->7x1->1x7->7x1->1x7
pathway3 = Conv2D(
filters=branch3[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = conv_block(pathway3, branch3[1], 7, 1)
pathway3 = conv_block(pathway3, branch3[2], 1, 7)
pathway3 = conv_block(pathway3, branch3[3], 7, 1)
pathway3 = conv_block(pathway3, branch3[4], 1, 7)
# 3x3->1x1
pathway4 = AveragePooling2D(
pool_size=(3, 3), strides=1, padding=padding, data_format=DATA_FORMAT
)(x)
pathway4 = Conv2D(
filters=branch4[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway4)
pathway4 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway4)
)
return concatenate([pathway1, pathway2, pathway3, pathway4], axis=concat_axis)
def inception_reduce2(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1->3x3
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
pathway1 = Conv2D(
filters=branch1[1],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway1)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->1x7->7x1->3x3
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2 = conv_block(pathway2, branch2[1], 1, 7)
pathway2 = conv_block(pathway2, branch2[2], 7, 1)
pathway2 = Conv2D(
filters=branch2[3],
kernel_size=(3, 3),
strides=2,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway2)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
# 3x3->1x1
pathway3 = MaxPooling2D(
pool_size=(3, 3), strides=2, padding=padding, data_format=DATA_FORMAT
)(x)
return concatenate([pathway1, pathway2, pathway3], axis=concat_axis)
def inception_module3(
x,
params,
concat_axis,
padding="same",
data_format=DATA_FORMAT,
use_bias=True,
kernel_initializer="he_normal",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lrn2d_norm=LRN2D_NORM,
weight_decay=WEIGHT_DECAY,
):
(branch1, branch2, branch3, branch4) = params
if weight_decay:
kernel_regularizer = regularizers.l2(weight_decay)
bias_regularizer = regularizers.l2(weight_decay)
else:
kernel_regularizer = None
bias_regularizer = None
# 1x1
pathway1 = Conv2D(
filters=branch1[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway1 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway1)
)
# 1x1->1x3+3x1
pathway2 = Conv2D(
filters=branch2[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway2 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway2)
)
pathway2_1 = conv_block(pathway2, branch2[1], 1, 3)
pathway2_2 = conv_block(pathway2, branch2[2], 3, 1)
# 1x1->3x3->1x3+3x1
pathway3 = Conv2D(
filters=branch3[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(x)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3 = Conv2D(
filters=branch3[1],
kernel_size=(3, 3),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway3)
pathway3 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway3)
)
pathway3_1 = conv_block(pathway3, branch3[2], 1, 3)
pathway3_2 = conv_block(pathway3, branch3[3], 3, 1)
# 3x3->1x1
pathway4 = AveragePooling2D(
pool_size=(3, 3), strides=1, padding=padding, data_format=DATA_FORMAT
)(x)
pathway4 = Conv2D(
filters=branch4[0],
kernel_size=(1, 1),
strides=1,
padding=padding,
data_format=data_format,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
)(pathway4)
pathway4 = Activation("relu")(
BatchNormalization(momentum=0.9, epsilon=1e-5)(pathway4)
)
return concatenate(
[pathway1, pathway2_1, pathway2_2, pathway3_1, pathway3_2, pathway4],
axis=concat_axis,
)
def create_model(img_input):
x = Conv2D(
192,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(img_input)
x = inception_module1(
x, params=[(64,), (48, 64), (64, 96), (32,)], concat_axis=CONCAT_AXIS
) # 3a 256
x = inception_module1(
x, params=[(64,), (48, 64), (64, 96), (64,)], concat_axis=CONCAT_AXIS
) # 3b 288
x = inception_module1(
x, params=[(64,), (48, 64), (64, 96), (64,)], concat_axis=CONCAT_AXIS
) # 3c 288
x = inception_reduce1(x, params=[(384,), (64, 96)], concat_axis=CONCAT_AXIS) # 768
x = inception_module2(
x,
params=[(192,), (128, 128, 192), (128, 128, 128, 128, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4a 768
x = inception_module2(
x,
params=[(192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4b 768
x = inception_module2(
x,
params=[(192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4c 768
x = inception_module2(
x,
params=[(192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4d 768
x = inception_module2(
x,
params=[(192,), (192, 192, 192), (192, 192, 192, 192, 192), (192,)],
concat_axis=CONCAT_AXIS,
) # 4e 768
x = inception_reduce2(
x, params=[(192, 320), (192, 192, 192, 192)], concat_axis=CONCAT_AXIS
) # 1280
x = inception_module3(
x,
params=[(320,), (384, 384, 384), (448, 384, 384, 384), (192,)],
concat_axis=CONCAT_AXIS,
) # 4e 2048
x = inception_module3(
x,
params=[(320,), (384, 384, 384), (448, 384, 384, 384), (192,)],
concat_axis=CONCAT_AXIS,
) # 4e 2048
x = GlobalAveragePooling2D()(x)
x = Dropout(DROPOUT)(x)
x = Dense(
settings.NUM_CLASSES,
activation=None,
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
)(x)
return x
def get_model():
img_input = Input(shape=settings.IMG_SHAPE)
output = create_model(img_input)
model = Model(img_input, output)
return model | 0.898093 | 0.555435 |
import argparse
import contextlib
import os
import sys
import path
import schema
import ui
import tbump.config
from tbump.file_bumper import FileBumper
from tbump.git_bumper import GitBumper
TBUMP_VERSION = "1.0.0"
@contextlib.contextmanager
def bump_git(git_bumper, new_version, dry_run=False):
git_bumper.check_state(new_version)
yield
git_bumper.bump(new_version, dry_run=dry_run)
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("new_version")
parser.add_argument("-C", "--cwd", dest="working_dir")
parser.add_argument("--non-interactive", dest="interactive", action="store_false")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--version", action="version", version=TBUMP_VERSION)
args = parser.parse_args(args=args)
interactive = args.interactive
working_dir = args.working_dir
new_version = args.new_version
dry_run = args.dry_run
if working_dir:
os.chdir(working_dir)
try:
config = tbump.config.parse(path.Path("tbump.toml"))
except IOError as io_error:
ui.fatal("Could not read config file:", io_error)
except Exception as e:
ui.fatal("Invalid config:", e)
bumping_message = [
"Bumping from",
ui.reset, ui.bold, config.current_version,
ui.reset, "to",
ui.reset, ui.bold, new_version
]
if dry_run:
bumping_message.extend([ui.reset, ui.brown, "(dry run)"])
ui.info_1(*bumping_message)
working_path = path.Path.getcwd()
git_bumper = GitBumper(working_path)
git_bumper.set_config(config)
file_bumper = FileBumper(working_path)
file_bumper.set_config(config)
with bump_git(git_bumper, new_version, dry_run=dry_run):
changes = file_bumper.compute_changes(new_version)
file_bumper.apply_changes(changes, dry_run=dry_run)
if interactive and not dry_run:
push_ok = ui.ask_yes_no("OK to push", default=False)
if push_ok:
git_bumper.push(new_version) | tbump/main.py | import argparse
import contextlib
import os
import sys
import path
import schema
import ui
import tbump.config
from tbump.file_bumper import FileBumper
from tbump.git_bumper import GitBumper
TBUMP_VERSION = "1.0.0"
@contextlib.contextmanager
def bump_git(git_bumper, new_version, dry_run=False):
git_bumper.check_state(new_version)
yield
git_bumper.bump(new_version, dry_run=dry_run)
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("new_version")
parser.add_argument("-C", "--cwd", dest="working_dir")
parser.add_argument("--non-interactive", dest="interactive", action="store_false")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--version", action="version", version=TBUMP_VERSION)
args = parser.parse_args(args=args)
interactive = args.interactive
working_dir = args.working_dir
new_version = args.new_version
dry_run = args.dry_run
if working_dir:
os.chdir(working_dir)
try:
config = tbump.config.parse(path.Path("tbump.toml"))
except IOError as io_error:
ui.fatal("Could not read config file:", io_error)
except Exception as e:
ui.fatal("Invalid config:", e)
bumping_message = [
"Bumping from",
ui.reset, ui.bold, config.current_version,
ui.reset, "to",
ui.reset, ui.bold, new_version
]
if dry_run:
bumping_message.extend([ui.reset, ui.brown, "(dry run)"])
ui.info_1(*bumping_message)
working_path = path.Path.getcwd()
git_bumper = GitBumper(working_path)
git_bumper.set_config(config)
file_bumper = FileBumper(working_path)
file_bumper.set_config(config)
with bump_git(git_bumper, new_version, dry_run=dry_run):
changes = file_bumper.compute_changes(new_version)
file_bumper.apply_changes(changes, dry_run=dry_run)
if interactive and not dry_run:
push_ok = ui.ask_yes_no("OK to push", default=False)
if push_ok:
git_bumper.push(new_version) | 0.128088 | 0.082994 |
from collections import OrderedDict
from typing import List, Dict
import mysql.connector
from mysql.connector.errors import DatabaseError, ProgrammingError
from slugify import slugify
from wwdtm.panelist import utility
#region Retrieval Functions
def retrieve_all(database_connection: mysql.connector.connect) -> List[Dict]:
"""Returns a list of OrderedDicts containing panelist details for
all panelists
Arguments:
database_connection (mysql.connector.connect)
"""
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT panelistid, panelist, panelistslug, "
"panelistgender "
"FROM ww_panelists "
"WHERE panelistslug != 'multiple' "
"ORDER BY panelist ASC;")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
panelists = []
for row in result:
panelist = OrderedDict()
panelist["id"] = row["panelistid"]
panelist["name"] = row["panelist"]
if row["panelistslug"]:
panelist["slug"] = row["panelistslug"]
else:
panelist["slug"] = slugify(panelist["name"])
panelist["gender"] = row["panelistgender"]
panelists.append(panelist)
return panelists
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_all_ids(database_connection: mysql.connector.connect
) -> List[int]:
"""Return a list of all panelist IDs, sorted by panelist names
Arguments:
database_connection (mysql.connector.connect)
"""
try:
cursor = database_connection.cursor()
query = ("SELECT panelistid FROM ww_panelists "
"WHERE panelistslug != 'multiple' "
"ORDER BY panelist ASC;")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
panelists = []
for row in result:
panelists.append(row[0])
return panelists
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict with panelist information based on the
requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT panelist, panelistgender, panelistslug "
"FROM ww_panelists "
"WHERE panelistid = %s;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchone()
cursor.close()
if result:
panelist_dict = OrderedDict()
panelist_dict["id"] = panelist_id
panelist_dict["name"] = result["panelist"]
if result["panelistslug"]:
panelist_dict["slug"] = result["panelistslug"]
else:
panelist_dict["slug"] = slugify(panelist_dict["name"])
panelist_dict["gender"] = result["panelistgender"]
return panelist_dict
return None
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect) -> Dict:
"""Returns an OrderedDict with panelist information based on the
requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if panelist_id:
return retrieve_by_id(panelist_id, database_connection, True)
return None
def retrieve_scores_grouped_list_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False
) -> Dict:
"""Returns an OrderedDict containing two lists, one with panelist
scores and one with corresponding number of instances a panelist
has scored that amount, for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT MIN(pm.panelistscore) AS min, "
"MAX(pm.panelistscore) AS max "
"FROM ww_showpnlmap pm;")
cursor.execute(query)
result = cursor.fetchone()
if not result:
return None
min_score = result["min"]
max_score = result["max"]
scores = OrderedDict()
for score in range(min_score, max_score + 1):
scores[score] = 0
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT pm.panelistscore AS score, "
"COUNT(pm.panelistscore) AS score_count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"GROUP BY pm.panelistscore "
"ORDER BY pm.panelistscore ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
for row in result:
scores[row["score"]] = row["score_count"]
scores_list = OrderedDict()
scores_list["score"] = list(scores.keys())
scores_list["count"] = list(scores.values())
return scores_list
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_grouped_list_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict containing two lists, one with panelist
scores and one with corresponding number of instances a panelist
has scored that amount, for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_grouped_list_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_scores_grouped_ordered_pair_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False
) -> List[tuple]:
"""Returns an list of tuples containing a score and the
corresponding number of instances a panelist has scored that amount
for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT MIN(pm.panelistscore) AS min, "
"MAX(pm.panelistscore) AS max "
"FROM ww_showpnlmap pm;")
cursor.execute(query)
result = cursor.fetchone()
if not result:
return None
min_score = result["min"]
max_score = result["max"]
scores = OrderedDict()
for score in range(min_score, max_score + 1):
scores[score] = 0
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT pm.panelistscore AS score, "
"COUNT(pm.panelistscore) AS score_count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"GROUP BY pm.panelistscore "
"ORDER BY pm.panelistscore ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
for row in result:
scores[row["score"]] = row["score_count"]
return list(scores.items())
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_grouped_ordered_pair_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> List[tuple]:
"""Returns an list of tuples containing a score and the
corresponding number of instances a panelist has scored that amount
for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_grouped_ordered_pair_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_scores_list_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict containing two lists, one with show dates
and one with corresponding scores for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate, pm.panelistscore "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
show_list = []
score_list = []
for shows in result:
show_list.append(shows["showdate"].isoformat())
score_list.append(shows["panelistscore"])
scores = OrderedDict()
scores["shows"] = show_list
scores["scores"] = score_list
return scores
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_list_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict containing two lists, one with show dates
and one with corresponding scores for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_list_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_scores_ordered_pair_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False
) -> List[tuple]:
"""Returns an list of tuples containing a show date and the
corresponding score for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate, pm.panelistscore "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
scores = []
for show in result:
show_date = show["showdate"].isoformat()
score = show["panelistscore"]
scores.append((show_date, score))
return scores
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_ordered_pair_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> List[tuple]:
"""Returns an list of tuples containing a show date and the
corresponding score for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_ordered_pair_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_yearly_appearances_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict containing a list of years and the
corresponding number of appearances the panelist has made for the
requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated or not
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
years = OrderedDict()
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT DISTINCT YEAR(s.showdate) AS year FROM ww_shows s "
"ORDER BY YEAR(s.showdate) ASC")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
for row in result:
years[row["year"]] = 0
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
cursor.close()
if not result:
return None
for row in result:
years[row["year"]] = row["count"]
return years
def retrieve_yearly_appearances_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict containing a list of years and the
corresponding number of appearances the panelist has made for the
requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if panelist_id:
return retrieve_yearly_appearances_by_id(panelist_id,
database_connection,
True)
return None
#endregion | wwdtm/panelist/info.py | from collections import OrderedDict
from typing import List, Dict
import mysql.connector
from mysql.connector.errors import DatabaseError, ProgrammingError
from slugify import slugify
from wwdtm.panelist import utility
#region Retrieval Functions
def retrieve_all(database_connection: mysql.connector.connect) -> List[Dict]:
"""Returns a list of OrderedDicts containing panelist details for
all panelists
Arguments:
database_connection (mysql.connector.connect)
"""
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT panelistid, panelist, panelistslug, "
"panelistgender "
"FROM ww_panelists "
"WHERE panelistslug != 'multiple' "
"ORDER BY panelist ASC;")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
panelists = []
for row in result:
panelist = OrderedDict()
panelist["id"] = row["panelistid"]
panelist["name"] = row["panelist"]
if row["panelistslug"]:
panelist["slug"] = row["panelistslug"]
else:
panelist["slug"] = slugify(panelist["name"])
panelist["gender"] = row["panelistgender"]
panelists.append(panelist)
return panelists
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_all_ids(database_connection: mysql.connector.connect
) -> List[int]:
"""Return a list of all panelist IDs, sorted by panelist names
Arguments:
database_connection (mysql.connector.connect)
"""
try:
cursor = database_connection.cursor()
query = ("SELECT panelistid FROM ww_panelists "
"WHERE panelistslug != 'multiple' "
"ORDER BY panelist ASC;")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
panelists = []
for row in result:
panelists.append(row[0])
return panelists
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict with panelist information based on the
requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT panelist, panelistgender, panelistslug "
"FROM ww_panelists "
"WHERE panelistid = %s;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchone()
cursor.close()
if result:
panelist_dict = OrderedDict()
panelist_dict["id"] = panelist_id
panelist_dict["name"] = result["panelist"]
if result["panelistslug"]:
panelist_dict["slug"] = result["panelistslug"]
else:
panelist_dict["slug"] = slugify(panelist_dict["name"])
panelist_dict["gender"] = result["panelistgender"]
return panelist_dict
return None
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect) -> Dict:
"""Returns an OrderedDict with panelist information based on the
requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if panelist_id:
return retrieve_by_id(panelist_id, database_connection, True)
return None
def retrieve_scores_grouped_list_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False
) -> Dict:
"""Returns an OrderedDict containing two lists, one with panelist
scores and one with corresponding number of instances a panelist
has scored that amount, for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT MIN(pm.panelistscore) AS min, "
"MAX(pm.panelistscore) AS max "
"FROM ww_showpnlmap pm;")
cursor.execute(query)
result = cursor.fetchone()
if not result:
return None
min_score = result["min"]
max_score = result["max"]
scores = OrderedDict()
for score in range(min_score, max_score + 1):
scores[score] = 0
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT pm.panelistscore AS score, "
"COUNT(pm.panelistscore) AS score_count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"GROUP BY pm.panelistscore "
"ORDER BY pm.panelistscore ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
for row in result:
scores[row["score"]] = row["score_count"]
scores_list = OrderedDict()
scores_list["score"] = list(scores.keys())
scores_list["count"] = list(scores.values())
return scores_list
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_grouped_list_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict containing two lists, one with panelist
scores and one with corresponding number of instances a panelist
has scored that amount, for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_grouped_list_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_scores_grouped_ordered_pair_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False
) -> List[tuple]:
"""Returns an list of tuples containing a score and the
corresponding number of instances a panelist has scored that amount
for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT MIN(pm.panelistscore) AS min, "
"MAX(pm.panelistscore) AS max "
"FROM ww_showpnlmap pm;")
cursor.execute(query)
result = cursor.fetchone()
if not result:
return None
min_score = result["min"]
max_score = result["max"]
scores = OrderedDict()
for score in range(min_score, max_score + 1):
scores[score] = 0
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT pm.panelistscore AS score, "
"COUNT(pm.panelistscore) AS score_count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"GROUP BY pm.panelistscore "
"ORDER BY pm.panelistscore ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
for row in result:
scores[row["score"]] = row["score_count"]
return list(scores.items())
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_grouped_ordered_pair_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> List[tuple]:
"""Returns an list of tuples containing a score and the
corresponding number of instances a panelist has scored that amount
for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_grouped_ordered_pair_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_scores_list_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict containing two lists, one with show dates
and one with corresponding scores for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate, pm.panelistscore "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
show_list = []
score_list = []
for shows in result:
show_list.append(shows["showdate"].isoformat())
score_list.append(shows["panelistscore"])
scores = OrderedDict()
scores["shows"] = show_list
scores["scores"] = score_list
return scores
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_list_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict containing two lists, one with show dates
and one with corresponding scores for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_list_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_scores_ordered_pair_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False
) -> List[tuple]:
"""Returns an list of tuples containing a show date and the
corresponding score for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate, pm.panelistscore "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistid = %s "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
cursor.close()
if not result:
return None
scores = []
for show in result:
show_date = show["showdate"].isoformat()
score = show["panelistscore"]
scores.append((show_date, score))
return scores
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err
def retrieve_scores_ordered_pair_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> List[tuple]:
"""Returns an list of tuples containing a show date and the
corresponding score for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_ordered_pair_by_id(panelist_id,
database_connection,
pre_validated_id=True)
def retrieve_yearly_appearances_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict containing a list of years and the
corresponding number of appearances the panelist has made for the
requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated or not
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
years = OrderedDict()
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT DISTINCT YEAR(s.showdate) AS year FROM ww_shows s "
"ORDER BY YEAR(s.showdate) ASC")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
for row in result:
years[row["year"]] = 0
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
cursor.close()
if not result:
return None
for row in result:
years[row["year"]] = row["count"]
return years
def retrieve_yearly_appearances_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> Dict:
"""Returns an OrderedDict containing a list of years and the
corresponding number of appearances the panelist has made for the
requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if panelist_id:
return retrieve_yearly_appearances_by_id(panelist_id,
database_connection,
True)
return None
#endregion | 0.745769 | 0.27133 |
import numpy as np
import matplotlib.pyplot as plt
filename1 = './init_field_hit.dat'
filename2 = './init_field_hit_2.dat'
dataIn1 = np.loadtxt(filename1,dtype=np.double)
dataIn2 = np.loadtxt(filename2,dtype=np.double)
N = 129
U1 = np.empty((N-1,N-1,N-1),dtype=np.double)
V1 = np.empty((N-1,N-1,N-1),dtype=np.double)
W1 = np.empty((N-1,N-1,N-1),dtype=np.double)
U2 = np.empty((N-1,N-1,N-1),dtype=np.double)
V2 = np.empty((N-1,N-1,N-1),dtype=np.double)
W2 = np.empty((N-1,N-1,N-1),dtype=np.double)
dx = 2*np.pi/N
dy = 2*np.pi/N
dz = 2*np.pi/N
for k in range(0,N-1):
for j in range(0,N-1):
for i in range(0,N-1):
ii = k*N*N + j*N + i
U1[i,j,k] = dataIn1[ii,3]
V1[i,j,k] = dataIn1[ii,4]
W1[i,j,k] = dataIn1[ii,5]
U2[i,j,k] = dataIn2[ii,3]
V2[i,j,k] = dataIn2[ii,4]
W2[i,j,k] = dataIn2[ii,5]
#%%
uprime1 = 0
uprime2 = 0
q1 = 0
q2 = 0
for k in range(0,N-1):
for j in range(0,N-1):
for i in range(0,N-1):
uprime1 += (U1[i,j,k]**2 + V1[i,j,k]**2 + W1[i,j,k]**2)/3
q1 += (U1[i,j,k]**2 + V1[i,j,k]**2 + W1[i,j,k]**2)
uprime2 += (U2[i,j,k]**2 + V2[i,j,k]**2 + W2[i,j,k]**2)/3
q2 += (U2[i,j,k]**2 + V2[i,j,k]**2 + W2[i,j,k]**2)
uprime1 = uprime1/(N-1)/(N-1)/(N-1)
q1 = q1/(N-1)/(N-1)/(N-1)
uprime1 = np.sqrt(uprime1)
q1 = np.sqrt(q1)
uprime2 = uprime2/(N-1)/(N-1)/(N-1)
q2 = q2/(N-1)/(N-1)/(N-1)
uprime2 = np.sqrt(uprime2)
q2 = np.sqrt(q2)
#%%
uprimeGoal = 1
U1 = U1*uprimeGoal/uprime1
V1 = V1*uprimeGoal/uprime1
W1 = W1*uprimeGoal/uprime1
U2 = U2*uprimeGoal/uprime2
V2 = V2*uprimeGoal/uprime2
W2 = W2*uprimeGoal/uprime2
#%%
#Need to get the source field for the poisson eqn.
#these are rough perturbations for the field, 2nd order central should be enough
S = np.empty((N-1,N-1,N-1),dtype=np.double)
Ux = np.empty((N-1,N-1,N-1),dtype=np.double)
Uy = np.empty((N-1,N-1,N-1),dtype=np.double)
Uz = np.empty((N-1,N-1,N-1),dtype=np.double)
Vx = np.empty((N-1,N-1,N-1),dtype=np.double)
Vy = np.empty((N-1,N-1,N-1),dtype=np.double)
Vz = np.empty((N-1,N-1,N-1),dtype=np.double)
Wx = np.empty((N-1,N-1,N-1),dtype=np.double)
Wy = np.empty((N-1,N-1,N-1),dtype=np.double)
Wz = np.empty((N-1,N-1,N-1),dtype=np.double)
for k in range(0,N-1):
for j in range(0, N-1):
for i in range(0, N-1):
if i==0:
Ux[0,j,k] = (U1[1,j,k] - U1[-1,j,k])/(2*dx)
Vx[0,j,k] = (V1[1,j,k] - V1[-1,j,k])/(2*dx)
Wx[0,j,k] = (W1[1,j,k] - W1[-1,j,k])/(2*dx)
elif i==(N-2):
Ux[-1,j,k] = (U1[0,j,k] - U1[-2,j,k])/(2*dx)
Vx[-1,j,k] = (V1[0,j,k] - V1[-2,j,k])/(2*dx)
Wx[-1,j,k] = (W1[0,j,k] - W1[-2,j,k])/(2*dx)
else:
Ux[i,j,k] = (U1[i+1,j,k] - U1[i-1,j,k])/(2*dx)
Vx[i,j,k] = (V1[i+1,j,k] - V1[i-1,j,k])/(2*dx)
Wx[i,j,k] = (W1[i+1,j,k] - W1[i-1,j,k])/(2*dx)
if j==0:
Uy[i,0,k] = (U1[i,1,k] - U1[i,-1,k])/(2*dx)
Vy[i,0,k] = (V1[i,1,k] - V1[i,-1,k])/(2*dx)
Wy[i,0,k] = (W1[i,1,k] - W1[i,-1,k])/(2*dx)
elif j==(N-2):
Uy[i,-1,k] = (U1[i,0,k] - U1[i,-2,k])/(2*dx)
Vy[i,-1,k] = (V1[i,0,k] - V1[i,-2,k])/(2*dx)
Wy[i,-1,k] = (W1[i,0,k] - W1[i,-2,k])/(2*dx)
else:
Uy[i,j,k] = (U1[i,j+1,k] - U1[i,j-1,k])/(2*dx)
Vy[i,j,k] = (V1[i,j+1,k] - V1[i,j-1,k])/(2*dx)
Wy[i,j,k] = (W1[i,j+1,k] - W1[i,j-1,k])/(2*dx)
if k==0:
Uz[i,j,0] = (U1[i,j,1] - U1[i,j,-1])/(2*dx)
Vz[i,j,0] = (V1[i,j,1] - V1[i,j,-1])/(2*dx)
Wz[i,j,0] = (W1[i,j,1] - W1[i,j,-1])/(2*dx)
elif k==(N-2):
Uz[i,j,-1] = (U1[i,j,0] - U1[i,j,-2])/(2*dx)
Vz[i,j,-1] = (V1[i,j,0] - V1[i,j,-2])/(2*dx)
Wz[i,j,-1] = (W1[i,j,0] - W1[i,j,-2])/(2*dx)
else:
Uz[i,j,k] = (U1[i,j,k+1] - U1[i,j,k-1])/(2*dx)
Vz[i,j,k] = (V1[i,j,k+1] - V1[i,j,k-1])/(2*dx)
Wz[i,j,k] = (W1[i,j,k+1] - W1[i,j,k-1])/(2*dx)
S = -(Ux*Ux + Vy*Vy + Wz*Wz + 2*Uy*Vx + 2*Vz*Wy + 2*Uz*Wx)
#%%
ptilde1 = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde2 = np.empty((N-1,N-1,N-1),dtype=np.double)
ppadtemp = np.empty((N+1,N+1,N+1),dtype=np.double)
r = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde1[:,:,:] = 0.0
ptilde2[:,:,:] = 0.0
r[:,:,:] = 0.0
omega = 2 / ( 1 + np.sin(np.pi/(N)) )
for kk in range(0,2000):
ppadtemp[:,:,:] = 0.0
ppadtemp[1:-1,1:-1, 1:-1] = ptilde1
ppadtemp[0 ,1:-1, 1:-1] = ptilde1[-1,:,:]
ppadtemp[-1 ,1:-1, 1:-1] = ptilde1[ 0,:,:]
ppadtemp[1:-1,0 , 1:-1] = ptilde1[:,-1,:]
ppadtemp[1:-1,-1 , 1:-1] = ptilde1[:, 0,:]
ppadtemp[1:-1,1:-1, 0] = ptilde1[:,:,-1]
ppadtemp[1:-1,1:-1, -1] = ptilde1[:,:, 0]
r[:,:,:] = 0.0
r -= S*dx*dx
#r[i,j,k] -= 0 #6*ptilde1[i,j,k]
r[:,:,:] += ppadtemp[0:-2,1:-1,1:-1]
r[:,:,:] += ppadtemp[2: ,1:-1,1:-1]
r[:,:,:] += ppadtemp[1:-1,0:-2,1:-1]
r[:,:,:] += ppadtemp[1:-1,2: ,1:-1]
r[:,:,:] += ppadtemp[1:-1,1:-1,0:-2]
r[:,:,:] += ppadtemp[1:-1,1:-1,2:]
ptilde2 = (1/6)*r
res_norm = np.sum((ptilde2-ptilde1)**2)
ptilde1 = ptilde2
print(kk)
print(res_norm)
if res_norm < 0.001:
break
p1 = ptilde1
#%%
for k in range(0,N-1):
for j in range(0, N-1):
for i in range(0, N-1):
if i==0:
Ux[0,j,k] = (U2[1,j,k] - U2[-1,j,k])/(2*dx)
Vx[0,j,k] = (V2[1,j,k] - V2[-1,j,k])/(2*dx)
Wx[0,j,k] = (W2[1,j,k] - W2[-1,j,k])/(2*dx)
elif i==(N-2):
Ux[-1,j,k] = (U2[0,j,k] - U2[-2,j,k])/(2*dx)
Vx[-1,j,k] = (V2[0,j,k] - V2[-2,j,k])/(2*dx)
Wx[-1,j,k] = (W2[0,j,k] - W2[-2,j,k])/(2*dx)
else:
Ux[i,j,k] = (U2[i+1,j,k] - U2[i-1,j,k])/(2*dx)
Vx[i,j,k] = (V2[i+1,j,k] - V2[i-1,j,k])/(2*dx)
Wx[i,j,k] = (W2[i+1,j,k] - W2[i-1,j,k])/(2*dx)
if j==0:
Uy[i,0,k] = (U2[i,1,k] - U2[i,-1,k])/(2*dx)
Vy[i,0,k] = (V2[i,1,k] - V2[i,-1,k])/(2*dx)
Wy[i,0,k] = (W2[i,1,k] - W2[i,-1,k])/(2*dx)
elif j==(N-2):
Uy[i,-1,k] = (U2[i,0,k] - U2[i,-2,k])/(2*dx)
Vy[i,-1,k] = (V2[i,0,k] - V2[i,-2,k])/(2*dx)
Wy[i,-1,k] = (W2[i,0,k] - W2[i,-2,k])/(2*dx)
else:
Uy[i,j,k] = (U2[i,j+1,k] - U2[i,j-1,k])/(2*dx)
Vy[i,j,k] = (V2[i,j+1,k] - V2[i,j-1,k])/(2*dx)
Wy[i,j,k] = (W2[i,j+1,k] - W2[i,j-1,k])/(2*dx)
if k==0:
Uz[i,j,0] = (U2[i,j,1] - U2[i,j,-1])/(2*dx)
Vz[i,j,0] = (V2[i,j,1] - V2[i,j,-1])/(2*dx)
Wz[i,j,0] = (W2[i,j,1] - W2[i,j,-1])/(2*dx)
elif k==(N-2):
Uz[i,j,-1] = (U2[i,j,0] - U2[i,j,-2])/(2*dx)
Vz[i,j,-1] = (V2[i,j,0] - V2[i,j,-2])/(2*dx)
Wz[i,j,-1] = (W2[i,j,0] - W2[i,j,-2])/(2*dx)
else:
Uz[i,j,k] = (U2[i,j,k+1] - U2[i,j,k-1])/(2*dx)
Vz[i,j,k] = (V2[i,j,k+1] - V2[i,j,k-1])/(2*dx)
Wz[i,j,k] = (W2[i,j,k+1] - W2[i,j,k-1])/(2*dx)
S = -(Ux*Ux + Vy*Vy + Wz*Wz + 2*Uy*Vx + 2*Vz*Wy + 2*Uz*Wx)
#%%
ptilde1 = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde2 = np.empty((N-1,N-1,N-1),dtype=np.double)
ppadtemp = np.empty((N+1,N+1,N+1),dtype=np.double)
r = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde1[:,:,:] = 0.0
ptilde2[:,:,:] = 0.0
r[:,:,:] = 0.0
omega = 2 / ( 1 + np.sin(np.pi/(N)) )
for kk in range(0,2000):
ppadtemp[:,:,:] = 0.0
ppadtemp[1:-1,1:-1, 1:-1] = ptilde1
ppadtemp[0 ,1:-1, 1:-1] = ptilde1[-1,:,:]
ppadtemp[-1 ,1:-1, 1:-1] = ptilde1[ 0,:,:]
ppadtemp[1:-1,0 , 1:-1] = ptilde1[:,-1,:]
ppadtemp[1:-1,-1 , 1:-1] = ptilde1[:, 0,:]
ppadtemp[1:-1,1:-1, 0] = ptilde1[:,:,-1]
ppadtemp[1:-1,1:-1, -1] = ptilde1[:,:, 0]
r[:,:,:] = 0.0
r -= S*dx*dx
#r[i,j,k] -= 0 #6*ptilde1[i,j,k]
r[:,:,:] += ppadtemp[0:-2,1:-1,1:-1]
r[:,:,:] += ppadtemp[2: ,1:-1,1:-1]
r[:,:,:] += ppadtemp[1:-1,0:-2,1:-1]
r[:,:,:] += ppadtemp[1:-1,2: ,1:-1]
r[:,:,:] += ppadtemp[1:-1,1:-1,0:-2]
r[:,:,:] += ppadtemp[1:-1,1:-1,2:]
ptilde2 = (1/6)*r
res_norm = np.sum((ptilde2-ptilde1)**2)
ptilde1 = ptilde2
print(kk)
print(res_norm)
if res_norm < 0.001:
break
p2 = ptilde1
#%%
Pxx = np.empty((N-1,N-1,N-1),dtype=np.double)
Pyy = np.empty((N-1,N-1,N-1),dtype=np.double)
Pzz = np.empty((N-1,N-1,N-1),dtype=np.double)
PS = np.empty((N-1,N-1,N-1),dtype=np.double)
for k in range(0,N-1):
for j in range(0, N-1):
for i in range(0, N-1):
if i==0:
Pxx[0,j,k] = (ptilde1[1,j,k] -2*ptilde1[0,j,k] + ptilde1[-1,j,k])/(dx*dx)
elif i==(N-2):
Pxx[-1,j,k] = (ptilde1[0,j,k] -2*ptilde1[-1,j,k] + ptilde1[-2,j,k])/(dx*dx)
else:
Pxx[i,j,k] = (ptilde1[i+1,j,k] -2*ptilde1[i,j,k] + ptilde1[i-1,j,k])/(dx*dx)
if j==0:
Pyy[i,0,k] = (ptilde1[i,1,k] -2*ptilde1[i,0,k] + ptilde1[i,-1,k])/(dx*dx)
elif j==(N-2):
Pyy[i,-1,k] = (ptilde1[i,0,k] -2*ptilde1[i,-1,k] + ptilde1[i,-2,k])/(dx*dx)
else:
Pyy[i,j,k] = (ptilde1[i,j+1,k] -2*ptilde1[i,j,k] + ptilde1[i,j-1,k])/(dx*dx)
if k==0:
Pzz[i,j,0] = (ptilde1[i,j,1] -2*ptilde1[i,j,0] + ptilde1[i,j,-1])/(dx*dx)
elif k==(N-2):
Pzz[i,j,-1] = (ptilde1[i,j,0] -2*ptilde1[i,j,-1] + ptilde1[i,j,-2])/(dx*dx)
else:
Pzz[i,j,k] = (ptilde1[i,j,k+1] -2*ptilde1[i,j,k] + ptilde1[i,j,k-1])/(dx*dx)
PS = Pxx + Pyy + Pzz
#%%
#Chop the domain into three chunks
#Chunk1
U1a = U1[:,:,0:42]
V1a = V1[:,:,0:42]
W1a = W1[:,:,0:42]
P1a = p1[:,:,0:42]
#Chunk2
U2a = U1[:,:,42:84]
V2a = V1[:,:,42:84]
W2a = W1[:,:,42:84]
P2a = p1[:,:,42:84]
#Chunk3
U3a = U1[:,:,84:126]
V3a = V1[:,:,84:126]
W3a = W1[:,:,84:126]
P3a = p1[:,:,84:126]
#Chunk4
U4a = U2[:,:,0:42]
V4a = V2[:,:,0:42]
W4a = W2[:,:,0:42]
P4a = p2[:,:,0:42]
#Chunk5
U5a = U2[:,:,42:84]
V5a = V2[:,:,42:84]
W5a = W2[:,:,42:84]
P5a = p2[:,:,42:84]
#Chunk6
U6a = U2[:,:,84:126]
V6a = V2[:,:,84:126]
W6a = W2[:,:,84:126]
P6a = p2[:,:,84:126]
totalX = 512
currentX = 128*6
totalOverlap = currentX - totalX
#%%
Ufinal = np.empty((totalX,N-1,42),dtype=np.double)
Vfinal = np.empty((totalX,N-1,42),dtype=np.double)
Wfinal = np.empty((totalX,N-1,42),dtype=np.double)
Pfinal = np.empty((totalX,N-1,42),dtype=np.double)
#%%
Ufinal[42:86,:,:] = U1a[42:86,:,:]
Vfinal[42:86,:,:] = V1a[42:86,:,:]
Wfinal[42:86,:,:] = W1a[42:86,:,:]
Pfinal[42:86,:,:] = P1a[42:86,:,:]
Ufinal[128:172,:,:] = U2a[42:86,:,:]
Vfinal[128:172,:,:] = V2a[42:86,:,:]
Wfinal[128:172,:,:] = W2a[42:86,:,:]
Pfinal[128:172,:,:] = P2a[42:86,:,:]
Ufinal[214:258,:,:] = U3a[42:86,:,:]
Vfinal[214:258,:,:] = V3a[42:86,:,:]
Wfinal[214:258,:,:] = W3a[42:86,:,:]
Pfinal[214:258,:,:] = P3a[42:86,:,:]
Ufinal[300:344,:,:] = U4a[42:86,:,:]
Vfinal[300:344,:,:] = V4a[42:86,:,:]
Wfinal[300:344,:,:] = W4a[42:86,:,:]
Pfinal[300:344,:,:] = P4a[42:86,:,:]
Ufinal[386:430,:,:] = U5a[42:86,:,:]
Vfinal[386:430,:,:] = V5a[42:86,:,:]
Wfinal[386:430,:,:] = W5a[42:86,:,:]
Pfinal[386:430,:,:] = P5a[42:86,:,:]
Ufinal[472:512,:,:] = U6a[42:82,:,:]
Vfinal[472:512,:,:] = V6a[42:82,:,:]
Wfinal[472:512,:,:] = W6a[42:82,:,:]
Pfinal[472:512,:,:] = P6a[42:82,:,:]
for i in range(0,42):
#beta = 1 - np.cos((np.pi/2.0)*float(i)/41.0)
beta = float(i)/41.0
theta = (np.pi/2.0)*beta
Ufinal[i,:,:] = np.cos(theta)*U6a[82+i,:,:] + np.sin(theta)*U1a[i,:,:]
Vfinal[i,:,:] = np.cos(theta)*V6a[82+i,:,:] + np.sin(theta)*V1a[i,:,:]
Wfinal[i,:,:] = np.cos(theta)*W6a[82+i,:,:] + np.sin(theta)*W1a[i,:,:]
Pfinal[i,:,:] = np.cos(theta)*P6a[82+i,:,:] + np.sin(theta)*P1a[i,:,:]
Ufinal[86+i,:,:] = np.cos(theta)*U1a[86+i,:,:] + np.sin(theta)*U2a[i,:,:]
Vfinal[86+i,:,:] = np.cos(theta)*V1a[86+i,:,:] + np.sin(theta)*V2a[i,:,:]
Wfinal[86+i,:,:] = np.cos(theta)*W1a[86+i,:,:] + np.sin(theta)*W2a[i,:,:]
Pfinal[86+i,:,:] = np.cos(theta)*P1a[86+i,:,:] + np.sin(theta)*P2a[i,:,:]
Ufinal[172+i,:,:] = np.cos(theta)*U2a[86+i,:,:] + np.sin(theta)*U3a[i,:,:]
Vfinal[172+i,:,:] = np.cos(theta)*V2a[86+i,:,:] + np.sin(theta)*V3a[i,:,:]
Wfinal[172+i,:,:] = np.cos(theta)*W2a[86+i,:,:] + np.sin(theta)*W3a[i,:,:]
Pfinal[172+i,:,:] = np.cos(theta)*P2a[86+i,:,:] + np.sin(theta)*P3a[i,:,:]
Ufinal[258+i,:,:] = np.cos(theta)*U3a[86+i,:,:] + np.sin(theta)*U4a[i,:,:]
Vfinal[258+i,:,:] = np.cos(theta)*V3a[86+i,:,:] + np.sin(theta)*V4a[i,:,:]
Wfinal[258+i,:,:] = np.cos(theta)*W3a[86+i,:,:] + np.sin(theta)*W4a[i,:,:]
Pfinal[258+i,:,:] = np.cos(theta)*P3a[86+i,:,:] + np.sin(theta)*P4a[i,:,:]
Ufinal[344+i,:,:] = np.cos(theta)*U4a[86+i,:,:] + np.sin(theta)*U5a[i,:,:]
Vfinal[344+i,:,:] = np.cos(theta)*V4a[86+i,:,:] + np.sin(theta)*V5a[i,:,:]
Wfinal[344+i,:,:] = np.cos(theta)*W4a[86+i,:,:] + np.sin(theta)*W5a[i,:,:]
Pfinal[344+i,:,:] = np.cos(theta)*P4a[86+i,:,:] + np.sin(theta)*P5a[i,:,:]
Ufinal[430+i,:,:] = np.cos(theta)*U5a[86+i,:,:] + np.sin(theta)*U6a[i,:,:]
Vfinal[430+i,:,:] = np.cos(theta)*V5a[86+i,:,:] + np.sin(theta)*V6a[i,:,:]
Wfinal[430+i,:,:] = np.cos(theta)*W5a[86+i,:,:] + np.sin(theta)*W6a[i,:,:]
Pfinal[430+i,:,:] = np.cos(theta)*P5a[86+i,:,:] + np.sin(theta)*P6a[i,:,:]
#%%
f = open('U_uprime1_N128_k8_512x128x42.dat','w');
g = open('V_uprime1_N128_k8_512x128x42.dat','w');
h = open('W_uprime1_N128_k8_512x128x42.dat','w');
pp = open('P_uprime1_N128_k8_512x128x42.dat','w')
for k in range(0,42):
for j in range(0,128):
for i in range(0,512):
f.write("".join([str(Ufinal[i,j,k]), "\n"]))
g.write("".join([str(Vfinal[i,j,k]), "\n"]))
h.write("".join([str(Wfinal[i,j,k]), "\n"]))
pp.write("".join([str(Pfinal[i,j,k]), "\n"]))
f.close()
g.close()
h.close()
pp.close() | MiscTools/loadinithit_withpressure_largedomain.py | import numpy as np
import matplotlib.pyplot as plt
filename1 = './init_field_hit.dat'
filename2 = './init_field_hit_2.dat'
dataIn1 = np.loadtxt(filename1,dtype=np.double)
dataIn2 = np.loadtxt(filename2,dtype=np.double)
N = 129
U1 = np.empty((N-1,N-1,N-1),dtype=np.double)
V1 = np.empty((N-1,N-1,N-1),dtype=np.double)
W1 = np.empty((N-1,N-1,N-1),dtype=np.double)
U2 = np.empty((N-1,N-1,N-1),dtype=np.double)
V2 = np.empty((N-1,N-1,N-1),dtype=np.double)
W2 = np.empty((N-1,N-1,N-1),dtype=np.double)
dx = 2*np.pi/N
dy = 2*np.pi/N
dz = 2*np.pi/N
for k in range(0,N-1):
for j in range(0,N-1):
for i in range(0,N-1):
ii = k*N*N + j*N + i
U1[i,j,k] = dataIn1[ii,3]
V1[i,j,k] = dataIn1[ii,4]
W1[i,j,k] = dataIn1[ii,5]
U2[i,j,k] = dataIn2[ii,3]
V2[i,j,k] = dataIn2[ii,4]
W2[i,j,k] = dataIn2[ii,5]
#%%
uprime1 = 0
uprime2 = 0
q1 = 0
q2 = 0
for k in range(0,N-1):
for j in range(0,N-1):
for i in range(0,N-1):
uprime1 += (U1[i,j,k]**2 + V1[i,j,k]**2 + W1[i,j,k]**2)/3
q1 += (U1[i,j,k]**2 + V1[i,j,k]**2 + W1[i,j,k]**2)
uprime2 += (U2[i,j,k]**2 + V2[i,j,k]**2 + W2[i,j,k]**2)/3
q2 += (U2[i,j,k]**2 + V2[i,j,k]**2 + W2[i,j,k]**2)
uprime1 = uprime1/(N-1)/(N-1)/(N-1)
q1 = q1/(N-1)/(N-1)/(N-1)
uprime1 = np.sqrt(uprime1)
q1 = np.sqrt(q1)
uprime2 = uprime2/(N-1)/(N-1)/(N-1)
q2 = q2/(N-1)/(N-1)/(N-1)
uprime2 = np.sqrt(uprime2)
q2 = np.sqrt(q2)
#%%
uprimeGoal = 1
U1 = U1*uprimeGoal/uprime1
V1 = V1*uprimeGoal/uprime1
W1 = W1*uprimeGoal/uprime1
U2 = U2*uprimeGoal/uprime2
V2 = V2*uprimeGoal/uprime2
W2 = W2*uprimeGoal/uprime2
#%%
#Need to get the source field for the poisson eqn.
#these are rough perturbations for the field, 2nd order central should be enough
S = np.empty((N-1,N-1,N-1),dtype=np.double)
Ux = np.empty((N-1,N-1,N-1),dtype=np.double)
Uy = np.empty((N-1,N-1,N-1),dtype=np.double)
Uz = np.empty((N-1,N-1,N-1),dtype=np.double)
Vx = np.empty((N-1,N-1,N-1),dtype=np.double)
Vy = np.empty((N-1,N-1,N-1),dtype=np.double)
Vz = np.empty((N-1,N-1,N-1),dtype=np.double)
Wx = np.empty((N-1,N-1,N-1),dtype=np.double)
Wy = np.empty((N-1,N-1,N-1),dtype=np.double)
Wz = np.empty((N-1,N-1,N-1),dtype=np.double)
for k in range(0,N-1):
for j in range(0, N-1):
for i in range(0, N-1):
if i==0:
Ux[0,j,k] = (U1[1,j,k] - U1[-1,j,k])/(2*dx)
Vx[0,j,k] = (V1[1,j,k] - V1[-1,j,k])/(2*dx)
Wx[0,j,k] = (W1[1,j,k] - W1[-1,j,k])/(2*dx)
elif i==(N-2):
Ux[-1,j,k] = (U1[0,j,k] - U1[-2,j,k])/(2*dx)
Vx[-1,j,k] = (V1[0,j,k] - V1[-2,j,k])/(2*dx)
Wx[-1,j,k] = (W1[0,j,k] - W1[-2,j,k])/(2*dx)
else:
Ux[i,j,k] = (U1[i+1,j,k] - U1[i-1,j,k])/(2*dx)
Vx[i,j,k] = (V1[i+1,j,k] - V1[i-1,j,k])/(2*dx)
Wx[i,j,k] = (W1[i+1,j,k] - W1[i-1,j,k])/(2*dx)
if j==0:
Uy[i,0,k] = (U1[i,1,k] - U1[i,-1,k])/(2*dx)
Vy[i,0,k] = (V1[i,1,k] - V1[i,-1,k])/(2*dx)
Wy[i,0,k] = (W1[i,1,k] - W1[i,-1,k])/(2*dx)
elif j==(N-2):
Uy[i,-1,k] = (U1[i,0,k] - U1[i,-2,k])/(2*dx)
Vy[i,-1,k] = (V1[i,0,k] - V1[i,-2,k])/(2*dx)
Wy[i,-1,k] = (W1[i,0,k] - W1[i,-2,k])/(2*dx)
else:
Uy[i,j,k] = (U1[i,j+1,k] - U1[i,j-1,k])/(2*dx)
Vy[i,j,k] = (V1[i,j+1,k] - V1[i,j-1,k])/(2*dx)
Wy[i,j,k] = (W1[i,j+1,k] - W1[i,j-1,k])/(2*dx)
if k==0:
Uz[i,j,0] = (U1[i,j,1] - U1[i,j,-1])/(2*dx)
Vz[i,j,0] = (V1[i,j,1] - V1[i,j,-1])/(2*dx)
Wz[i,j,0] = (W1[i,j,1] - W1[i,j,-1])/(2*dx)
elif k==(N-2):
Uz[i,j,-1] = (U1[i,j,0] - U1[i,j,-2])/(2*dx)
Vz[i,j,-1] = (V1[i,j,0] - V1[i,j,-2])/(2*dx)
Wz[i,j,-1] = (W1[i,j,0] - W1[i,j,-2])/(2*dx)
else:
Uz[i,j,k] = (U1[i,j,k+1] - U1[i,j,k-1])/(2*dx)
Vz[i,j,k] = (V1[i,j,k+1] - V1[i,j,k-1])/(2*dx)
Wz[i,j,k] = (W1[i,j,k+1] - W1[i,j,k-1])/(2*dx)
S = -(Ux*Ux + Vy*Vy + Wz*Wz + 2*Uy*Vx + 2*Vz*Wy + 2*Uz*Wx)
#%%
ptilde1 = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde2 = np.empty((N-1,N-1,N-1),dtype=np.double)
ppadtemp = np.empty((N+1,N+1,N+1),dtype=np.double)
r = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde1[:,:,:] = 0.0
ptilde2[:,:,:] = 0.0
r[:,:,:] = 0.0
omega = 2 / ( 1 + np.sin(np.pi/(N)) )
for kk in range(0,2000):
ppadtemp[:,:,:] = 0.0
ppadtemp[1:-1,1:-1, 1:-1] = ptilde1
ppadtemp[0 ,1:-1, 1:-1] = ptilde1[-1,:,:]
ppadtemp[-1 ,1:-1, 1:-1] = ptilde1[ 0,:,:]
ppadtemp[1:-1,0 , 1:-1] = ptilde1[:,-1,:]
ppadtemp[1:-1,-1 , 1:-1] = ptilde1[:, 0,:]
ppadtemp[1:-1,1:-1, 0] = ptilde1[:,:,-1]
ppadtemp[1:-1,1:-1, -1] = ptilde1[:,:, 0]
r[:,:,:] = 0.0
r -= S*dx*dx
#r[i,j,k] -= 0 #6*ptilde1[i,j,k]
r[:,:,:] += ppadtemp[0:-2,1:-1,1:-1]
r[:,:,:] += ppadtemp[2: ,1:-1,1:-1]
r[:,:,:] += ppadtemp[1:-1,0:-2,1:-1]
r[:,:,:] += ppadtemp[1:-1,2: ,1:-1]
r[:,:,:] += ppadtemp[1:-1,1:-1,0:-2]
r[:,:,:] += ppadtemp[1:-1,1:-1,2:]
ptilde2 = (1/6)*r
res_norm = np.sum((ptilde2-ptilde1)**2)
ptilde1 = ptilde2
print(kk)
print(res_norm)
if res_norm < 0.001:
break
p1 = ptilde1
#%%
for k in range(0,N-1):
for j in range(0, N-1):
for i in range(0, N-1):
if i==0:
Ux[0,j,k] = (U2[1,j,k] - U2[-1,j,k])/(2*dx)
Vx[0,j,k] = (V2[1,j,k] - V2[-1,j,k])/(2*dx)
Wx[0,j,k] = (W2[1,j,k] - W2[-1,j,k])/(2*dx)
elif i==(N-2):
Ux[-1,j,k] = (U2[0,j,k] - U2[-2,j,k])/(2*dx)
Vx[-1,j,k] = (V2[0,j,k] - V2[-2,j,k])/(2*dx)
Wx[-1,j,k] = (W2[0,j,k] - W2[-2,j,k])/(2*dx)
else:
Ux[i,j,k] = (U2[i+1,j,k] - U2[i-1,j,k])/(2*dx)
Vx[i,j,k] = (V2[i+1,j,k] - V2[i-1,j,k])/(2*dx)
Wx[i,j,k] = (W2[i+1,j,k] - W2[i-1,j,k])/(2*dx)
if j==0:
Uy[i,0,k] = (U2[i,1,k] - U2[i,-1,k])/(2*dx)
Vy[i,0,k] = (V2[i,1,k] - V2[i,-1,k])/(2*dx)
Wy[i,0,k] = (W2[i,1,k] - W2[i,-1,k])/(2*dx)
elif j==(N-2):
Uy[i,-1,k] = (U2[i,0,k] - U2[i,-2,k])/(2*dx)
Vy[i,-1,k] = (V2[i,0,k] - V2[i,-2,k])/(2*dx)
Wy[i,-1,k] = (W2[i,0,k] - W2[i,-2,k])/(2*dx)
else:
Uy[i,j,k] = (U2[i,j+1,k] - U2[i,j-1,k])/(2*dx)
Vy[i,j,k] = (V2[i,j+1,k] - V2[i,j-1,k])/(2*dx)
Wy[i,j,k] = (W2[i,j+1,k] - W2[i,j-1,k])/(2*dx)
if k==0:
Uz[i,j,0] = (U2[i,j,1] - U2[i,j,-1])/(2*dx)
Vz[i,j,0] = (V2[i,j,1] - V2[i,j,-1])/(2*dx)
Wz[i,j,0] = (W2[i,j,1] - W2[i,j,-1])/(2*dx)
elif k==(N-2):
Uz[i,j,-1] = (U2[i,j,0] - U2[i,j,-2])/(2*dx)
Vz[i,j,-1] = (V2[i,j,0] - V2[i,j,-2])/(2*dx)
Wz[i,j,-1] = (W2[i,j,0] - W2[i,j,-2])/(2*dx)
else:
Uz[i,j,k] = (U2[i,j,k+1] - U2[i,j,k-1])/(2*dx)
Vz[i,j,k] = (V2[i,j,k+1] - V2[i,j,k-1])/(2*dx)
Wz[i,j,k] = (W2[i,j,k+1] - W2[i,j,k-1])/(2*dx)
S = -(Ux*Ux + Vy*Vy + Wz*Wz + 2*Uy*Vx + 2*Vz*Wy + 2*Uz*Wx)
#%%
ptilde1 = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde2 = np.empty((N-1,N-1,N-1),dtype=np.double)
ppadtemp = np.empty((N+1,N+1,N+1),dtype=np.double)
r = np.empty((N-1,N-1,N-1),dtype=np.double)
ptilde1[:,:,:] = 0.0
ptilde2[:,:,:] = 0.0
r[:,:,:] = 0.0
omega = 2 / ( 1 + np.sin(np.pi/(N)) )
for kk in range(0,2000):
ppadtemp[:,:,:] = 0.0
ppadtemp[1:-1,1:-1, 1:-1] = ptilde1
ppadtemp[0 ,1:-1, 1:-1] = ptilde1[-1,:,:]
ppadtemp[-1 ,1:-1, 1:-1] = ptilde1[ 0,:,:]
ppadtemp[1:-1,0 , 1:-1] = ptilde1[:,-1,:]
ppadtemp[1:-1,-1 , 1:-1] = ptilde1[:, 0,:]
ppadtemp[1:-1,1:-1, 0] = ptilde1[:,:,-1]
ppadtemp[1:-1,1:-1, -1] = ptilde1[:,:, 0]
r[:,:,:] = 0.0
r -= S*dx*dx
#r[i,j,k] -= 0 #6*ptilde1[i,j,k]
r[:,:,:] += ppadtemp[0:-2,1:-1,1:-1]
r[:,:,:] += ppadtemp[2: ,1:-1,1:-1]
r[:,:,:] += ppadtemp[1:-1,0:-2,1:-1]
r[:,:,:] += ppadtemp[1:-1,2: ,1:-1]
r[:,:,:] += ppadtemp[1:-1,1:-1,0:-2]
r[:,:,:] += ppadtemp[1:-1,1:-1,2:]
ptilde2 = (1/6)*r
res_norm = np.sum((ptilde2-ptilde1)**2)
ptilde1 = ptilde2
print(kk)
print(res_norm)
if res_norm < 0.001:
break
p2 = ptilde1
#%%
Pxx = np.empty((N-1,N-1,N-1),dtype=np.double)
Pyy = np.empty((N-1,N-1,N-1),dtype=np.double)
Pzz = np.empty((N-1,N-1,N-1),dtype=np.double)
PS = np.empty((N-1,N-1,N-1),dtype=np.double)
for k in range(0,N-1):
for j in range(0, N-1):
for i in range(0, N-1):
if i==0:
Pxx[0,j,k] = (ptilde1[1,j,k] -2*ptilde1[0,j,k] + ptilde1[-1,j,k])/(dx*dx)
elif i==(N-2):
Pxx[-1,j,k] = (ptilde1[0,j,k] -2*ptilde1[-1,j,k] + ptilde1[-2,j,k])/(dx*dx)
else:
Pxx[i,j,k] = (ptilde1[i+1,j,k] -2*ptilde1[i,j,k] + ptilde1[i-1,j,k])/(dx*dx)
if j==0:
Pyy[i,0,k] = (ptilde1[i,1,k] -2*ptilde1[i,0,k] + ptilde1[i,-1,k])/(dx*dx)
elif j==(N-2):
Pyy[i,-1,k] = (ptilde1[i,0,k] -2*ptilde1[i,-1,k] + ptilde1[i,-2,k])/(dx*dx)
else:
Pyy[i,j,k] = (ptilde1[i,j+1,k] -2*ptilde1[i,j,k] + ptilde1[i,j-1,k])/(dx*dx)
if k==0:
Pzz[i,j,0] = (ptilde1[i,j,1] -2*ptilde1[i,j,0] + ptilde1[i,j,-1])/(dx*dx)
elif k==(N-2):
Pzz[i,j,-1] = (ptilde1[i,j,0] -2*ptilde1[i,j,-1] + ptilde1[i,j,-2])/(dx*dx)
else:
Pzz[i,j,k] = (ptilde1[i,j,k+1] -2*ptilde1[i,j,k] + ptilde1[i,j,k-1])/(dx*dx)
PS = Pxx + Pyy + Pzz
#%%
#Chop the domain into three chunks
#Chunk1
U1a = U1[:,:,0:42]
V1a = V1[:,:,0:42]
W1a = W1[:,:,0:42]
P1a = p1[:,:,0:42]
#Chunk2
U2a = U1[:,:,42:84]
V2a = V1[:,:,42:84]
W2a = W1[:,:,42:84]
P2a = p1[:,:,42:84]
#Chunk3
U3a = U1[:,:,84:126]
V3a = V1[:,:,84:126]
W3a = W1[:,:,84:126]
P3a = p1[:,:,84:126]
#Chunk4
U4a = U2[:,:,0:42]
V4a = V2[:,:,0:42]
W4a = W2[:,:,0:42]
P4a = p2[:,:,0:42]
#Chunk5
U5a = U2[:,:,42:84]
V5a = V2[:,:,42:84]
W5a = W2[:,:,42:84]
P5a = p2[:,:,42:84]
#Chunk6
U6a = U2[:,:,84:126]
V6a = V2[:,:,84:126]
W6a = W2[:,:,84:126]
P6a = p2[:,:,84:126]
totalX = 512
currentX = 128*6
totalOverlap = currentX - totalX
#%%
Ufinal = np.empty((totalX,N-1,42),dtype=np.double)
Vfinal = np.empty((totalX,N-1,42),dtype=np.double)
Wfinal = np.empty((totalX,N-1,42),dtype=np.double)
Pfinal = np.empty((totalX,N-1,42),dtype=np.double)
#%%
Ufinal[42:86,:,:] = U1a[42:86,:,:]
Vfinal[42:86,:,:] = V1a[42:86,:,:]
Wfinal[42:86,:,:] = W1a[42:86,:,:]
Pfinal[42:86,:,:] = P1a[42:86,:,:]
Ufinal[128:172,:,:] = U2a[42:86,:,:]
Vfinal[128:172,:,:] = V2a[42:86,:,:]
Wfinal[128:172,:,:] = W2a[42:86,:,:]
Pfinal[128:172,:,:] = P2a[42:86,:,:]
Ufinal[214:258,:,:] = U3a[42:86,:,:]
Vfinal[214:258,:,:] = V3a[42:86,:,:]
Wfinal[214:258,:,:] = W3a[42:86,:,:]
Pfinal[214:258,:,:] = P3a[42:86,:,:]
Ufinal[300:344,:,:] = U4a[42:86,:,:]
Vfinal[300:344,:,:] = V4a[42:86,:,:]
Wfinal[300:344,:,:] = W4a[42:86,:,:]
Pfinal[300:344,:,:] = P4a[42:86,:,:]
Ufinal[386:430,:,:] = U5a[42:86,:,:]
Vfinal[386:430,:,:] = V5a[42:86,:,:]
Wfinal[386:430,:,:] = W5a[42:86,:,:]
Pfinal[386:430,:,:] = P5a[42:86,:,:]
Ufinal[472:512,:,:] = U6a[42:82,:,:]
Vfinal[472:512,:,:] = V6a[42:82,:,:]
Wfinal[472:512,:,:] = W6a[42:82,:,:]
Pfinal[472:512,:,:] = P6a[42:82,:,:]
for i in range(0,42):
#beta = 1 - np.cos((np.pi/2.0)*float(i)/41.0)
beta = float(i)/41.0
theta = (np.pi/2.0)*beta
Ufinal[i,:,:] = np.cos(theta)*U6a[82+i,:,:] + np.sin(theta)*U1a[i,:,:]
Vfinal[i,:,:] = np.cos(theta)*V6a[82+i,:,:] + np.sin(theta)*V1a[i,:,:]
Wfinal[i,:,:] = np.cos(theta)*W6a[82+i,:,:] + np.sin(theta)*W1a[i,:,:]
Pfinal[i,:,:] = np.cos(theta)*P6a[82+i,:,:] + np.sin(theta)*P1a[i,:,:]
Ufinal[86+i,:,:] = np.cos(theta)*U1a[86+i,:,:] + np.sin(theta)*U2a[i,:,:]
Vfinal[86+i,:,:] = np.cos(theta)*V1a[86+i,:,:] + np.sin(theta)*V2a[i,:,:]
Wfinal[86+i,:,:] = np.cos(theta)*W1a[86+i,:,:] + np.sin(theta)*W2a[i,:,:]
Pfinal[86+i,:,:] = np.cos(theta)*P1a[86+i,:,:] + np.sin(theta)*P2a[i,:,:]
Ufinal[172+i,:,:] = np.cos(theta)*U2a[86+i,:,:] + np.sin(theta)*U3a[i,:,:]
Vfinal[172+i,:,:] = np.cos(theta)*V2a[86+i,:,:] + np.sin(theta)*V3a[i,:,:]
Wfinal[172+i,:,:] = np.cos(theta)*W2a[86+i,:,:] + np.sin(theta)*W3a[i,:,:]
Pfinal[172+i,:,:] = np.cos(theta)*P2a[86+i,:,:] + np.sin(theta)*P3a[i,:,:]
Ufinal[258+i,:,:] = np.cos(theta)*U3a[86+i,:,:] + np.sin(theta)*U4a[i,:,:]
Vfinal[258+i,:,:] = np.cos(theta)*V3a[86+i,:,:] + np.sin(theta)*V4a[i,:,:]
Wfinal[258+i,:,:] = np.cos(theta)*W3a[86+i,:,:] + np.sin(theta)*W4a[i,:,:]
Pfinal[258+i,:,:] = np.cos(theta)*P3a[86+i,:,:] + np.sin(theta)*P4a[i,:,:]
Ufinal[344+i,:,:] = np.cos(theta)*U4a[86+i,:,:] + np.sin(theta)*U5a[i,:,:]
Vfinal[344+i,:,:] = np.cos(theta)*V4a[86+i,:,:] + np.sin(theta)*V5a[i,:,:]
Wfinal[344+i,:,:] = np.cos(theta)*W4a[86+i,:,:] + np.sin(theta)*W5a[i,:,:]
Pfinal[344+i,:,:] = np.cos(theta)*P4a[86+i,:,:] + np.sin(theta)*P5a[i,:,:]
Ufinal[430+i,:,:] = np.cos(theta)*U5a[86+i,:,:] + np.sin(theta)*U6a[i,:,:]
Vfinal[430+i,:,:] = np.cos(theta)*V5a[86+i,:,:] + np.sin(theta)*V6a[i,:,:]
Wfinal[430+i,:,:] = np.cos(theta)*W5a[86+i,:,:] + np.sin(theta)*W6a[i,:,:]
Pfinal[430+i,:,:] = np.cos(theta)*P5a[86+i,:,:] + np.sin(theta)*P6a[i,:,:]
#%%
f = open('U_uprime1_N128_k8_512x128x42.dat','w');
g = open('V_uprime1_N128_k8_512x128x42.dat','w');
h = open('W_uprime1_N128_k8_512x128x42.dat','w');
pp = open('P_uprime1_N128_k8_512x128x42.dat','w')
for k in range(0,42):
for j in range(0,128):
for i in range(0,512):
f.write("".join([str(Ufinal[i,j,k]), "\n"]))
g.write("".join([str(Vfinal[i,j,k]), "\n"]))
h.write("".join([str(Wfinal[i,j,k]), "\n"]))
pp.write("".join([str(Pfinal[i,j,k]), "\n"]))
f.close()
g.close()
h.close()
pp.close() | 0.042712 | 0.328893 |
from argparse import ArgumentParser
from glob import glob
import logging
from collections import OrderedDict
import json
from ssl import CERT_NONE, create_default_context
from parsedmarc import IMAPError, get_dmarc_reports_from_inbox, \
parse_report_file, elastic, kafkaclient, splunk, save_output, \
watch_inbox, email_results, SMTPError, ParserError, __version__
logger = logging.getLogger("parsedmarc")
def _main():
"""Called when the module is executed"""
def process_reports(reports_):
output_str = "{0}\n".format(json.dumps(reports_,
ensure_ascii=False,
indent=2))
if not args.silent:
print(output_str)
if args.kafka_hosts:
try:
kafka_client = kafkaclient.KafkaClient(args.kafka_hosts)
except Exception as error_:
logger.error("Kafka Error: {0}".format(error_.__str__()))
if args.save_aggregate:
for report in reports_["aggregate_reports"]:
try:
if args.elasticsearch_host:
elastic.save_aggregate_report_to_elasticsearch(
report, index=es_aggregate_index)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if args.kafka_hosts:
kafka_client.save_aggregate_reports_to_kafka(
report, kafka_aggregate_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if args.hec:
try:
aggregate_reports_ = reports_["aggregate_reports"]
if len(aggregate_reports_) > 0:
hec_client.save_aggregate_reports_to_splunk(
aggregate_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
if args.save_forensic:
for report in reports_["forensic_reports"]:
try:
if args.elasticsearch_host:
elastic.save_forensic_report_to_elasticsearch(
report, index=es_forensic_index)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if args.kafka_hosts:
kafka_client.save_forensic_reports_to_kafka(
report, kafka_forensic_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if args.hec:
try:
forensic_reports_ = reports_["forensic_reports"]
if len(forensic_reports_) > 0:
hec_client.save_forensic_reports_to_splunk(
forensic_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
arg_parser = ArgumentParser(description="Parses DMARC reports")
arg_parser.add_argument("file_path", nargs="*",
help="one or more paths to aggregate or forensic "
"report files or emails")
strip_attachment_help = "Remove attachment payloads from forensic " \
"report output"
arg_parser.add_argument("--strip-attachment-payloads",
help=strip_attachment_help, action="store_true")
arg_parser.add_argument("-o", "--output",
help="Write output files to the given directory")
arg_parser.add_argument("-n", "--nameservers", nargs="+",
help="nameservers to query "
"(Default is Cloudflare's nameservers)")
arg_parser.add_argument("-t", "--timeout",
help="number of seconds to wait for an answer "
"from DNS (Default: 2.0)",
type=float,
default=6.0)
arg_parser.add_argument("-H", "--host", help="IMAP hostname or IP address")
arg_parser.add_argument("-u", "--user", help="IMAP user")
arg_parser.add_argument("-p", "--password", help="IMAP password")
arg_parser.add_argument("--imap-port", default=None, help="IMAP port")
arg_parser.add_argument("--imap-skip-certificate-verification",
action="store_true",
default=False,
help="Skip certificate verification for IMAP")
arg_parser.add_argument("--imap-no-ssl", action="store_true",
default=False,
help="Do not use SSL/TLS when connecting to IMAP")
arg_parser.add_argument("-r", "--reports-folder", default="INBOX",
help="The IMAP folder containing the reports\n"
"(Default: INBOX)")
arg_parser.add_argument("-a", "--archive-folder",
help="Specifies the IMAP folder to move "
"messages to after processing them\n"
"(Default: Archive)",
default="Archive")
arg_parser.add_argument("-d", "--delete",
help="Delete the reports after processing them",
action="store_true", default=False)
arg_parser.add_argument("-E", "--elasticsearch-host", nargs="*",
help="One or more Elasticsearch "
"hostnames or URLs to use (e.g. "
"localhost:9200)")
arg_parser.add_argument("--elasticsearch-index-prefix",
help="Prefix to add in front of the "
"dmarc_aggregate and dmarc_forensic "
"Elasticsearch index names, joined by _")
arg_parser.add_argument("--elasticsearch-index-suffix",
help="Append this suffix to the "
"dmarc_aggregate and dmarc_forensic "
"Elasticsearch index names, joined by _")
arg_parser.add_argument("--hec", help="URL to a Splunk HTTP Event "
"Collector (HEC)")
arg_parser.add_argument("--hec-token", help="The authorization token for "
"a Splunk "
"HTTP Event Collector (HEC)")
arg_parser.add_argument("--hec-index", help="The index to use when "
"sending events to the "
"Splunk HTTP Event Collector "
"(HEC)")
arg_parser.add_argument("--hec-skip-certificate-verification",
action="store_true",
default=False,
help="Skip certificate verification for Splunk "
"HEC")
arg_parser.add_argument("-K", "--kafka-hosts", nargs="*",
help="A list of one or more Kafka hostnames"
" or URLs")
arg_parser.add_argument("--kafka-aggregate-topic",
help="The Kafka topic to publish aggregate "
"reports to (Default: dmarc_aggregate)",
default="dmarc_aggregate")
arg_parser.add_argument("--kafka-forensic_topic",
help="The Kafka topic to publish forensic reports"
" to (Default: dmarc_forensic)",
default="dmarc_forensic")
arg_parser.add_argument("--save-aggregate", action="store_true",
default=False,
help="Save aggregate reports to search indexes")
arg_parser.add_argument("--save-forensic", action="store_true",
default=False,
help="Save forensic reports to search indexes")
arg_parser.add_argument("-O", "--outgoing-host",
help="Email the results using this host")
arg_parser.add_argument("-U", "--outgoing-user",
help="Email the results using this user")
arg_parser.add_argument("-P", "--outgoing-password",
help="Email the results using this password")
arg_parser.add_argument("--outgoing-port",
help="Email the results using this port")
arg_parser.add_argument("--outgoing-ssl",
help="Use SSL/TLS instead of STARTTLS (more "
"secure, and required by some providers, "
"like Gmail)")
arg_parser.add_argument("-F", "--outgoing-from",
help="Email the results using this from address")
arg_parser.add_argument("-T", "--outgoing-to", nargs="+",
help="Email the results to these addresses")
arg_parser.add_argument("-S", "--outgoing-subject",
help="Email the results using this subject")
arg_parser.add_argument("-A", "--outgoing-attachment",
help="Email the results using this filename")
arg_parser.add_argument("-M", "--outgoing-message",
help="Email the results using this message")
arg_parser.add_argument("-w", "--watch", action="store_true",
help="Use an IMAP IDLE connection to process "
"reports as they arrive in the inbox")
arg_parser.add_argument("--test",
help="Do not move or delete IMAP messages",
action="store_true", default=False)
arg_parser.add_argument("-s", "--silent", action="store_true",
help="Only print errors and warnings")
arg_parser.add_argument("--debug", action="store_true",
help="Print debugging information")
arg_parser.add_argument("-v", "--version", action="version",
version=__version__)
aggregate_reports = []
forensic_reports = []
args = arg_parser.parse_args()
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.WARNING)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
if args.host is None and len(args.file_path) == 0:
arg_parser.print_help()
exit(1)
es_aggregate_index = "dmarc_aggregate"
es_forensic_index = "dmarc_forensic"
if args.elasticsearch_index_prefix:
prefix = args.elasticsearch_index_prefix
es_aggregate_index = "{0}_{1}".format(prefix, es_aggregate_index)
es_forensic_index = "{0}_{1}".format(prefix, es_forensic_index)
if args.elasticsearch_index_suffix:
suffix = args.elasticsearch_index_suffix
es_aggregate_index = "{0}_{1}".format(es_aggregate_index, suffix)
es_forensic_index = "{0}_{1}".format(es_forensic_index, suffix)
if args.save_aggregate or args.save_forensic:
if (args.elasticsearch_host is None and args.hec is None
and args.kafka_hosts is None):
args.elasticsearch_host = ["localhost:9200"]
try:
if args.elasticsearch_host:
elastic.set_hosts(args.elasticsearch_host)
elastic.create_indexes([es_aggregate_index, es_forensic_index])
except elastic.ElasticsearchError as error:
logger.error("Elasticsearch Error: {0}".format(error.__str__()))
exit(1)
if args.hec:
if args.hec_token is None or args.hec_index is None:
logger.error("HEC token and HEC index are required when "
"using HEC URL")
exit(1)
verify = True
if args.hec_skip_certificate_verification:
verify = False
hec_client = splunk.HECClient(args.hec, args.hec_token,
args.hec_index,
verify=verify)
kafka_aggregate_topic = args.kafka_aggregate_topic
kafka_forensic_topic = args.kafka_forensic_topic
file_paths = []
for file_path in args.file_path:
file_paths += glob(file_path)
file_paths = list(set(file_paths))
for file_path in file_paths:
try:
sa = args.strip_attachment_payloads
file_results = parse_report_file(file_path,
nameservers=args.nameservers,
timeout=args.timeout,
strip_attachment_payloads=sa)
if file_results["report_type"] == "aggregate":
aggregate_reports.append(file_results["report"])
elif file_results["report_type"] == "forensic":
forensic_reports.append(file_results["report"])
except ParserError as error:
logger.error("Failed to parse {0} - {1}".format(file_path,
error))
if args.host:
try:
if args.user is None or args.password is None:
logger.error("user and password must be specified if"
"host is specified")
rf = args.reports_folder
af = args.archive_folder
ns = args.nameservers
sa = args.strip_attachment_payloads
ssl = True
ssl_context = None
if args.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if args.imap_no_ssl:
ssl = False
reports = get_dmarc_reports_from_inbox(host=args.host,
port=args.imap_port,
ssl=ssl,
ssl_context=ssl_context,
user=args.user,
password=<PASSWORD>,
reports_folder=rf,
archive_folder=af,
delete=args.delete,
nameservers=ns,
test=args.test,
strip_attachment_payloads=sa
)
aggregate_reports += reports["aggregate_reports"]
forensic_reports += reports["forensic_reports"]
except IMAPError as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if args.output:
save_output(results, output_directory=args.output)
process_reports(results)
if args.outgoing_host:
if args.outgoing_from is None or args.outgoing_to is None:
logger.error("--outgoing-from and --outgoing-to must "
"be provided if --outgoing-host is used")
exit(1)
try:
email_results(results, args.outgoing_host, args.outgoing_from,
args.outgoing_to, use_ssl=args.outgoing_ssl,
user=args.outgoing_user,
password=args.outgoing_password,
subject=args.outgoing_subject)
except SMTPError as error:
logger.error("SMTP Error: {0}".format(error.__str__()))
exit(1)
if args.host and args.watch:
logger.info("Watching for email - Quit with ctrl-c")
ssl = True
ssl_context = None
if args.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if args.imap_no_ssl:
ssl = False
try:
sa = args.strip_attachment_payloads
watch_inbox(args.host, args.user, args.password, process_reports,
port=args.imap_port, ssl=ssl, ssl_context=ssl_context,
reports_folder=args.reports_folder,
archive_folder=args.archive_folder, delete=args.delete,
test=args.test, nameservers=args.nameservers,
dns_timeout=args.timeout, strip_attachment_payloads=sa)
except IMAPError as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
if __name__ == "__main__":
_main() | parsedmarc/cli.py | from argparse import ArgumentParser
from glob import glob
import logging
from collections import OrderedDict
import json
from ssl import CERT_NONE, create_default_context
from parsedmarc import IMAPError, get_dmarc_reports_from_inbox, \
parse_report_file, elastic, kafkaclient, splunk, save_output, \
watch_inbox, email_results, SMTPError, ParserError, __version__
logger = logging.getLogger("parsedmarc")
def _main():
"""Called when the module is executed"""
def process_reports(reports_):
output_str = "{0}\n".format(json.dumps(reports_,
ensure_ascii=False,
indent=2))
if not args.silent:
print(output_str)
if args.kafka_hosts:
try:
kafka_client = kafkaclient.KafkaClient(args.kafka_hosts)
except Exception as error_:
logger.error("Kafka Error: {0}".format(error_.__str__()))
if args.save_aggregate:
for report in reports_["aggregate_reports"]:
try:
if args.elasticsearch_host:
elastic.save_aggregate_report_to_elasticsearch(
report, index=es_aggregate_index)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if args.kafka_hosts:
kafka_client.save_aggregate_reports_to_kafka(
report, kafka_aggregate_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if args.hec:
try:
aggregate_reports_ = reports_["aggregate_reports"]
if len(aggregate_reports_) > 0:
hec_client.save_aggregate_reports_to_splunk(
aggregate_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
if args.save_forensic:
for report in reports_["forensic_reports"]:
try:
if args.elasticsearch_host:
elastic.save_forensic_report_to_elasticsearch(
report, index=es_forensic_index)
except elastic.AlreadySaved as warning:
logger.warning(warning.__str__())
except elastic.ElasticsearchError as error_:
logger.error("Elasticsearch Error: {0}".format(
error_.__str__()))
try:
if args.kafka_hosts:
kafka_client.save_forensic_reports_to_kafka(
report, kafka_forensic_topic)
except Exception as error_:
logger.error("Kafka Error: {0}".format(
error_.__str__()))
if args.hec:
try:
forensic_reports_ = reports_["forensic_reports"]
if len(forensic_reports_) > 0:
hec_client.save_forensic_reports_to_splunk(
forensic_reports_)
except splunk.SplunkError as e:
logger.error("Splunk HEC error: {0}".format(e.__str__()))
arg_parser = ArgumentParser(description="Parses DMARC reports")
arg_parser.add_argument("file_path", nargs="*",
help="one or more paths to aggregate or forensic "
"report files or emails")
strip_attachment_help = "Remove attachment payloads from forensic " \
"report output"
arg_parser.add_argument("--strip-attachment-payloads",
help=strip_attachment_help, action="store_true")
arg_parser.add_argument("-o", "--output",
help="Write output files to the given directory")
arg_parser.add_argument("-n", "--nameservers", nargs="+",
help="nameservers to query "
"(Default is Cloudflare's nameservers)")
arg_parser.add_argument("-t", "--timeout",
help="number of seconds to wait for an answer "
"from DNS (Default: 2.0)",
type=float,
default=6.0)
arg_parser.add_argument("-H", "--host", help="IMAP hostname or IP address")
arg_parser.add_argument("-u", "--user", help="IMAP user")
arg_parser.add_argument("-p", "--password", help="IMAP password")
arg_parser.add_argument("--imap-port", default=None, help="IMAP port")
arg_parser.add_argument("--imap-skip-certificate-verification",
action="store_true",
default=False,
help="Skip certificate verification for IMAP")
arg_parser.add_argument("--imap-no-ssl", action="store_true",
default=False,
help="Do not use SSL/TLS when connecting to IMAP")
arg_parser.add_argument("-r", "--reports-folder", default="INBOX",
help="The IMAP folder containing the reports\n"
"(Default: INBOX)")
arg_parser.add_argument("-a", "--archive-folder",
help="Specifies the IMAP folder to move "
"messages to after processing them\n"
"(Default: Archive)",
default="Archive")
arg_parser.add_argument("-d", "--delete",
help="Delete the reports after processing them",
action="store_true", default=False)
arg_parser.add_argument("-E", "--elasticsearch-host", nargs="*",
help="One or more Elasticsearch "
"hostnames or URLs to use (e.g. "
"localhost:9200)")
arg_parser.add_argument("--elasticsearch-index-prefix",
help="Prefix to add in front of the "
"dmarc_aggregate and dmarc_forensic "
"Elasticsearch index names, joined by _")
arg_parser.add_argument("--elasticsearch-index-suffix",
help="Append this suffix to the "
"dmarc_aggregate and dmarc_forensic "
"Elasticsearch index names, joined by _")
arg_parser.add_argument("--hec", help="URL to a Splunk HTTP Event "
"Collector (HEC)")
arg_parser.add_argument("--hec-token", help="The authorization token for "
"a Splunk "
"HTTP Event Collector (HEC)")
arg_parser.add_argument("--hec-index", help="The index to use when "
"sending events to the "
"Splunk HTTP Event Collector "
"(HEC)")
arg_parser.add_argument("--hec-skip-certificate-verification",
action="store_true",
default=False,
help="Skip certificate verification for Splunk "
"HEC")
arg_parser.add_argument("-K", "--kafka-hosts", nargs="*",
help="A list of one or more Kafka hostnames"
" or URLs")
arg_parser.add_argument("--kafka-aggregate-topic",
help="The Kafka topic to publish aggregate "
"reports to (Default: dmarc_aggregate)",
default="dmarc_aggregate")
arg_parser.add_argument("--kafka-forensic_topic",
help="The Kafka topic to publish forensic reports"
" to (Default: dmarc_forensic)",
default="dmarc_forensic")
arg_parser.add_argument("--save-aggregate", action="store_true",
default=False,
help="Save aggregate reports to search indexes")
arg_parser.add_argument("--save-forensic", action="store_true",
default=False,
help="Save forensic reports to search indexes")
arg_parser.add_argument("-O", "--outgoing-host",
help="Email the results using this host")
arg_parser.add_argument("-U", "--outgoing-user",
help="Email the results using this user")
arg_parser.add_argument("-P", "--outgoing-password",
help="Email the results using this password")
arg_parser.add_argument("--outgoing-port",
help="Email the results using this port")
arg_parser.add_argument("--outgoing-ssl",
help="Use SSL/TLS instead of STARTTLS (more "
"secure, and required by some providers, "
"like Gmail)")
arg_parser.add_argument("-F", "--outgoing-from",
help="Email the results using this from address")
arg_parser.add_argument("-T", "--outgoing-to", nargs="+",
help="Email the results to these addresses")
arg_parser.add_argument("-S", "--outgoing-subject",
help="Email the results using this subject")
arg_parser.add_argument("-A", "--outgoing-attachment",
help="Email the results using this filename")
arg_parser.add_argument("-M", "--outgoing-message",
help="Email the results using this message")
arg_parser.add_argument("-w", "--watch", action="store_true",
help="Use an IMAP IDLE connection to process "
"reports as they arrive in the inbox")
arg_parser.add_argument("--test",
help="Do not move or delete IMAP messages",
action="store_true", default=False)
arg_parser.add_argument("-s", "--silent", action="store_true",
help="Only print errors and warnings")
arg_parser.add_argument("--debug", action="store_true",
help="Print debugging information")
arg_parser.add_argument("-v", "--version", action="version",
version=__version__)
aggregate_reports = []
forensic_reports = []
args = arg_parser.parse_args()
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.WARNING)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
if args.host is None and len(args.file_path) == 0:
arg_parser.print_help()
exit(1)
es_aggregate_index = "dmarc_aggregate"
es_forensic_index = "dmarc_forensic"
if args.elasticsearch_index_prefix:
prefix = args.elasticsearch_index_prefix
es_aggregate_index = "{0}_{1}".format(prefix, es_aggregate_index)
es_forensic_index = "{0}_{1}".format(prefix, es_forensic_index)
if args.elasticsearch_index_suffix:
suffix = args.elasticsearch_index_suffix
es_aggregate_index = "{0}_{1}".format(es_aggregate_index, suffix)
es_forensic_index = "{0}_{1}".format(es_forensic_index, suffix)
if args.save_aggregate or args.save_forensic:
if (args.elasticsearch_host is None and args.hec is None
and args.kafka_hosts is None):
args.elasticsearch_host = ["localhost:9200"]
try:
if args.elasticsearch_host:
elastic.set_hosts(args.elasticsearch_host)
elastic.create_indexes([es_aggregate_index, es_forensic_index])
except elastic.ElasticsearchError as error:
logger.error("Elasticsearch Error: {0}".format(error.__str__()))
exit(1)
if args.hec:
if args.hec_token is None or args.hec_index is None:
logger.error("HEC token and HEC index are required when "
"using HEC URL")
exit(1)
verify = True
if args.hec_skip_certificate_verification:
verify = False
hec_client = splunk.HECClient(args.hec, args.hec_token,
args.hec_index,
verify=verify)
kafka_aggregate_topic = args.kafka_aggregate_topic
kafka_forensic_topic = args.kafka_forensic_topic
file_paths = []
for file_path in args.file_path:
file_paths += glob(file_path)
file_paths = list(set(file_paths))
for file_path in file_paths:
try:
sa = args.strip_attachment_payloads
file_results = parse_report_file(file_path,
nameservers=args.nameservers,
timeout=args.timeout,
strip_attachment_payloads=sa)
if file_results["report_type"] == "aggregate":
aggregate_reports.append(file_results["report"])
elif file_results["report_type"] == "forensic":
forensic_reports.append(file_results["report"])
except ParserError as error:
logger.error("Failed to parse {0} - {1}".format(file_path,
error))
if args.host:
try:
if args.user is None or args.password is None:
logger.error("user and password must be specified if"
"host is specified")
rf = args.reports_folder
af = args.archive_folder
ns = args.nameservers
sa = args.strip_attachment_payloads
ssl = True
ssl_context = None
if args.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if args.imap_no_ssl:
ssl = False
reports = get_dmarc_reports_from_inbox(host=args.host,
port=args.imap_port,
ssl=ssl,
ssl_context=ssl_context,
user=args.user,
password=<PASSWORD>,
reports_folder=rf,
archive_folder=af,
delete=args.delete,
nameservers=ns,
test=args.test,
strip_attachment_payloads=sa
)
aggregate_reports += reports["aggregate_reports"]
forensic_reports += reports["forensic_reports"]
except IMAPError as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
results = OrderedDict([("aggregate_reports", aggregate_reports),
("forensic_reports", forensic_reports)])
if args.output:
save_output(results, output_directory=args.output)
process_reports(results)
if args.outgoing_host:
if args.outgoing_from is None or args.outgoing_to is None:
logger.error("--outgoing-from and --outgoing-to must "
"be provided if --outgoing-host is used")
exit(1)
try:
email_results(results, args.outgoing_host, args.outgoing_from,
args.outgoing_to, use_ssl=args.outgoing_ssl,
user=args.outgoing_user,
password=args.outgoing_password,
subject=args.outgoing_subject)
except SMTPError as error:
logger.error("SMTP Error: {0}".format(error.__str__()))
exit(1)
if args.host and args.watch:
logger.info("Watching for email - Quit with ctrl-c")
ssl = True
ssl_context = None
if args.imap_skip_certificate_verification:
logger.debug("Skipping IMAP certificate verification")
ssl_context = create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = CERT_NONE
if args.imap_no_ssl:
ssl = False
try:
sa = args.strip_attachment_payloads
watch_inbox(args.host, args.user, args.password, process_reports,
port=args.imap_port, ssl=ssl, ssl_context=ssl_context,
reports_folder=args.reports_folder,
archive_folder=args.archive_folder, delete=args.delete,
test=args.test, nameservers=args.nameservers,
dns_timeout=args.timeout, strip_attachment_payloads=sa)
except IMAPError as error:
logger.error("IMAP Error: {0}".format(error.__str__()))
exit(1)
if __name__ == "__main__":
_main() | 0.386763 | 0.089177 |
import re
from troposphere import (
AWS_REGION,
AWS_ACCOUNT_ID
)
from troposphere import (
ImportValue,
Parameter,
GetAtt,
Sub,
Ref
)
from troposphere.iam import (
Role
)
from troposphere.s3 import Bucket
from troposphere.awslambda import Function
from troposphere.kms import (
Key, Alias
)
from ozone.filters.regexes import (
S3_ARN_PREFIX, S3_NAME, S3_ARN,
IAM_ROLE_NAME, IAM_ROLE_ARN,
LAMBDA_NAME, LAMBDA_ARN,
LAMBDA_LAYER_VERSION, LAMBDA_LAYER_ARN,
KMS_KEY_ARN, KMS_KEY_ID,
KMS_ALIAS, KMS_ALIAS_ARN
)
def s3_bucket(bucket, any_object=False):
"""
Args:
bucket: represents the bucket object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the bucket name is given
full ARN if full ARN is given and match S3 bucket ARN pattern
"""
arn_pat = re.compile(S3_ARN)
name_pat = re.compile(S3_NAME)
if isinstance(bucket, (ImportValue, GetAtt, Sub, Ref)):
return bucket
elif isinstance(bucket, Parameter):
if any_object:
return Sub('arn:aws:s3:::{bucket}/*')
else:
return Sub('arn:aws:s3:::{bucket}')
elif isinstance(bucket, Bucket):
return GetAtt(bucket, 'Arn')
elif isinstance(bucket, str):
if arn_pat.match(bucket):
return bucket
elif name_pat.match(bucket):
if any_object:
return f'{S3_ARN_PREFIX}{bucket}/*'
else:
return f'{S3_ARN_PREFIX}{bucket}'
else:
raise ValueError('The S3 ARN must follow', S3_ARN)
else:
raise ValueError(
'The S3 ARN must be computed with a function or follow the pattern',
S3_ARN
)
def iam_role(role):
"""
Args:
role: represents the role object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the role name is given
full ARN if full ARN is given and match IAM role ARN pattern
"""
arn_pattern = re.compile(IAM_ROLE_ARN)
name_pattern = re.compile(IAM_ROLE_NAME)
if isinstance(role, str):
if name_pattern.match(role):
role_arn = Sub(f'arn:aws:iam::${{AWS::AccountId}}:role/{role}')
elif role.startswith('arn:aws:iam::') and arn_pattern.match(role):
role_arn = role
else:
raise ValueError(
'Role ARN must follow either the name or full arn patterns',
IAM_ROLE_NAME,
IAM_ROLE_ARN
)
elif isinstance(role, (Parameter, Role)):
role_arn = GetAtt(role, 'Arn')
elif isinstance(role, (GetAtt, Sub, Ref, ImportValue)):
role_arn = role
else:
raise TypeError('role expected to be of type', str, ImportValue, Role, Sub, GetAtt, Ref)
return role_arn
def lambda_function(function):
"""
Args:
function: represents the function object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the function name is given
full ARN if full ARN is given and match function ARN pattern
"""
arn_pattern = re.compile(LAMBDA_ARN)
name_pattern = re.compile(LAMBDA_NAME)
if isinstance(function, str):
if name_pattern.match(function):
function_arn = Sub(f'arn:aws:lambda:${{AWS::Region}}:${{AWS::AccountId}}:function:{function}')
elif function.startswith('arn:aws:lambda:') and arn_pattern.match(function):
function_arn = function
else:
raise ValueError(
'Function ARN must follow either the name or full arn patterns',
LAMBDA_NAME,
LAMBDA_ARN
)
elif isinstance(function, (Parameter, Function)):
function_arn = GetAtt(function, 'Arn')
elif isinstance(function, (ImportValue, GetAtt, Sub, Ref)):
function_arn = function
else:
raise TypeError('Function expected to be of type', str, Role, Sub, GetAtt, Ref, ImportValue)
return function_arn
def lambda_layer(layer):
"""
Args:
layer: represents the layer object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the layer name is given
full ARN if full ARN is given and match Lambda layer ARN pattern
"""
arn_pattern = re.compile(LAMBDA_LAYER_ARN)
version_pattern = re.compile(LAMBDA_LAYER_VERSION)
if isinstance(layer, (GetAtt, Ref, Sub, ImportValue)):
return layer
elif isinstance(layer, str):
if arn_pattern.match(layer):
return layer
elif version_pattern.match(layer):
return Sub(f'arn:aws:lambda:${{AWS::Region}}:${{AWS::AccountId}}:layer:{layer}')
else:
raise ValueError(
"Layer ARN expected of format"
f"{LAMBDA_LAYER_ARN} or {LAMBDA_LAYER_VERSION}"
)
else:
raise ValueError(
'Layer does not comply to any required patterns of Functions'
)
def kms_key(key):
"""
Args:
key: represents the key object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the key name is given
full ARN if full ARN is given and match KMS key ARN pattern
"""
arn_pattern = re.compile(KMS_KEY_ARN)
id_pattern = re.compile(KMS_KEY_ID)
if isinstance(key, (Ref, Sub, ImportValue, GetAtt)):
return key
if isinstance(key, (Parameter, Key)):
return GetAtt(key, 'Arn')
if isinstance(key, str):
if arn_pattern.match(key):
return key
if id_pattern.match(key):
return Sub(f'arn:aws:kms:${{AWS::Region}}:${{AWS::AccountId}}:key/{key}')
else:
raise ValueError('Key does not match pattern', KMS_KEY_ARN, KMS_KEY_ID)
def kms_alias(alias):
"""
Args:
alias: represents the alias object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the alias name is given
full ARN if full ARN is given and match KMS Key alias ARN pattern
"""
arn_pattern = re.compile(KMS_ALIAS_ARN)
alias_pattern = re.compile(KMS_ALIAS)
if isinstance(alias, (Ref, Sub, ImportValue, GetAtt)):
return alias
if isinstance(alias, (Parameter, Alias)):
return GetAtt(alias, 'Arn')
if isinstance(alias, str):
if arn_pattern.match(alias):
return alias
if alias_pattern.match(alias):
return Sub(f'arn:aws:kms:${{AWS::Region}}:${{AWS::AccountId}}:{alias}')
else:
raise ValueError('Alias does not match pattern', alias, KMS_ALIAS, KMS_ALIAS_ARN) | ozone/filters/arns.py | import re
from troposphere import (
AWS_REGION,
AWS_ACCOUNT_ID
)
from troposphere import (
ImportValue,
Parameter,
GetAtt,
Sub,
Ref
)
from troposphere.iam import (
Role
)
from troposphere.s3 import Bucket
from troposphere.awslambda import Function
from troposphere.kms import (
Key, Alias
)
from ozone.filters.regexes import (
S3_ARN_PREFIX, S3_NAME, S3_ARN,
IAM_ROLE_NAME, IAM_ROLE_ARN,
LAMBDA_NAME, LAMBDA_ARN,
LAMBDA_LAYER_VERSION, LAMBDA_LAYER_ARN,
KMS_KEY_ARN, KMS_KEY_ID,
KMS_ALIAS, KMS_ALIAS_ARN
)
def s3_bucket(bucket, any_object=False):
"""
Args:
bucket: represents the bucket object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the bucket name is given
full ARN if full ARN is given and match S3 bucket ARN pattern
"""
arn_pat = re.compile(S3_ARN)
name_pat = re.compile(S3_NAME)
if isinstance(bucket, (ImportValue, GetAtt, Sub, Ref)):
return bucket
elif isinstance(bucket, Parameter):
if any_object:
return Sub('arn:aws:s3:::{bucket}/*')
else:
return Sub('arn:aws:s3:::{bucket}')
elif isinstance(bucket, Bucket):
return GetAtt(bucket, 'Arn')
elif isinstance(bucket, str):
if arn_pat.match(bucket):
return bucket
elif name_pat.match(bucket):
if any_object:
return f'{S3_ARN_PREFIX}{bucket}/*'
else:
return f'{S3_ARN_PREFIX}{bucket}'
else:
raise ValueError('The S3 ARN must follow', S3_ARN)
else:
raise ValueError(
'The S3 ARN must be computed with a function or follow the pattern',
S3_ARN
)
def iam_role(role):
"""
Args:
role: represents the role object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the role name is given
full ARN if full ARN is given and match IAM role ARN pattern
"""
arn_pattern = re.compile(IAM_ROLE_ARN)
name_pattern = re.compile(IAM_ROLE_NAME)
if isinstance(role, str):
if name_pattern.match(role):
role_arn = Sub(f'arn:aws:iam::${{AWS::AccountId}}:role/{role}')
elif role.startswith('arn:aws:iam::') and arn_pattern.match(role):
role_arn = role
else:
raise ValueError(
'Role ARN must follow either the name or full arn patterns',
IAM_ROLE_NAME,
IAM_ROLE_ARN
)
elif isinstance(role, (Parameter, Role)):
role_arn = GetAtt(role, 'Arn')
elif isinstance(role, (GetAtt, Sub, Ref, ImportValue)):
role_arn = role
else:
raise TypeError('role expected to be of type', str, ImportValue, Role, Sub, GetAtt, Ref)
return role_arn
def lambda_function(function):
"""
Args:
function: represents the function object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the function name is given
full ARN if full ARN is given and match function ARN pattern
"""
arn_pattern = re.compile(LAMBDA_ARN)
name_pattern = re.compile(LAMBDA_NAME)
if isinstance(function, str):
if name_pattern.match(function):
function_arn = Sub(f'arn:aws:lambda:${{AWS::Region}}:${{AWS::AccountId}}:function:{function}')
elif function.startswith('arn:aws:lambda:') and arn_pattern.match(function):
function_arn = function
else:
raise ValueError(
'Function ARN must follow either the name or full arn patterns',
LAMBDA_NAME,
LAMBDA_ARN
)
elif isinstance(function, (Parameter, Function)):
function_arn = GetAtt(function, 'Arn')
elif isinstance(function, (ImportValue, GetAtt, Sub, Ref)):
function_arn = function
else:
raise TypeError('Function expected to be of type', str, Role, Sub, GetAtt, Ref, ImportValue)
return function_arn
def lambda_layer(layer):
"""
Args:
layer: represents the layer object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the layer name is given
full ARN if full ARN is given and match Lambda layer ARN pattern
"""
arn_pattern = re.compile(LAMBDA_LAYER_ARN)
version_pattern = re.compile(LAMBDA_LAYER_VERSION)
if isinstance(layer, (GetAtt, Ref, Sub, ImportValue)):
return layer
elif isinstance(layer, str):
if arn_pattern.match(layer):
return layer
elif version_pattern.match(layer):
return Sub(f'arn:aws:lambda:${{AWS::Region}}:${{AWS::AccountId}}:layer:{layer}')
else:
raise ValueError(
"Layer ARN expected of format"
f"{LAMBDA_LAYER_ARN} or {LAMBDA_LAYER_VERSION}"
)
else:
raise ValueError(
'Layer does not comply to any required patterns of Functions'
)
def kms_key(key):
"""
Args:
key: represents the key object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the key name is given
full ARN if full ARN is given and match KMS key ARN pattern
"""
arn_pattern = re.compile(KMS_KEY_ARN)
id_pattern = re.compile(KMS_KEY_ID)
if isinstance(key, (Ref, Sub, ImportValue, GetAtt)):
return key
if isinstance(key, (Parameter, Key)):
return GetAtt(key, 'Arn')
if isinstance(key, str):
if arn_pattern.match(key):
return key
if id_pattern.match(key):
return Sub(f'arn:aws:kms:${{AWS::Region}}:${{AWS::AccountId}}:key/{key}')
else:
raise ValueError('Key does not match pattern', KMS_KEY_ARN, KMS_KEY_ID)
def kms_alias(alias):
"""
Args:
alias: represents the alias object, or a function
Returns:
untouched if one of the functions supported
string of the full ARN if the alias name is given
full ARN if full ARN is given and match KMS Key alias ARN pattern
"""
arn_pattern = re.compile(KMS_ALIAS_ARN)
alias_pattern = re.compile(KMS_ALIAS)
if isinstance(alias, (Ref, Sub, ImportValue, GetAtt)):
return alias
if isinstance(alias, (Parameter, Alias)):
return GetAtt(alias, 'Arn')
if isinstance(alias, str):
if arn_pattern.match(alias):
return alias
if alias_pattern.match(alias):
return Sub(f'arn:aws:kms:${{AWS::Region}}:${{AWS::AccountId}}:{alias}')
else:
raise ValueError('Alias does not match pattern', alias, KMS_ALIAS, KMS_ALIAS_ARN) | 0.631026 | 0.25326 |
# todo: daemonize?
# todo: kickass idea: make all timers use one thread that will sleep smartly
# to send all events correctly.
import threading
import time
import wx
from python_toolbox.wx_tools.timing import cute_base_timer
wxEVT_THREAD_TIMER = wx.NewEventType()
EVT_THREAD_TIMER = wx.PyEventBinder(wxEVT_THREAD_TIMER, 1)
'''Event saying that a `ThreadTimer` has fired.'''
class ThreadTimer(cute_base_timer.CuteBaseTimer):
'''
A timer for a wxPython app which runs on a different thread.
This solved a problem of wxPython timers being late when the program is
busy.
'''
n = 0
'''The number of created thread timers.'''
_EventHandlerGrokker__event_code = EVT_THREAD_TIMER
def __init__(self, parent):
'''
Construct the ThreadTimer.
`parent` is the parent window.
'''
cute_base_timer.CuteBaseTimer.__init__(self, parent)
self.parent = parent
'''The parent window.'''
ThreadTimer.n += 1
self.wx_id = wx.NewId()
'''The ID of this timer, given by wxPython.'''
self.__init_thread()
self.alive = False
'''Flag saying whether this timer is running.'''
def __init_thread(self):
'''Create the thread.'''
thread_name = ''.join(('Thread used by ThreadTimer no. ', str(self.n)))
self.thread = Thread(self, name=thread_name)
# Overwriting previous thread, so it'll get garbage-collected,
# hopefully
def start(self, interval):
'''Start the timer.'''
if self.alive:
self.stop()
self.interval = interval
self.alive = True
self.thread.start()
def stop(self):
'''Stop the timer.'''
self.alive = False
self.thread.retired = True
self.__init_thread()
# Crutch for compatibilty with wx.Timer:
Start = start
Stop = stop
def GetId(self):
'''Get the wx ID of this timer.'''
return self.wx_id
class Thread(threading.Thread):
'''Thread used as a timer for wxPython programs.'''
def __init__(self, parent, name):
threading.Thread.__init__(self, name=name)
self.parent = parent
self.retired = False
def run(self):
'''Run the thread. Internal function.'''
interval_in_seconds = self.parent.interval / 1000.0
def sleep():
time.sleep(interval_in_seconds)
sleep()
try:
while self.parent.alive is True and self.retired is False:
event = wx.PyEvent(self.parent.wx_id)
event.SetEventType(wxEVT_THREAD_TIMER)
wx.PostEvent(self.parent.parent, event)
sleep()
except:
return # Just so it wouldn't raise an error when `wx` is shutting
# down | python_toolbox/wx_tools/timing/thread_timer.py |
# todo: daemonize?
# todo: kickass idea: make all timers use one thread that will sleep smartly
# to send all events correctly.
import threading
import time
import wx
from python_toolbox.wx_tools.timing import cute_base_timer
wxEVT_THREAD_TIMER = wx.NewEventType()
EVT_THREAD_TIMER = wx.PyEventBinder(wxEVT_THREAD_TIMER, 1)
'''Event saying that a `ThreadTimer` has fired.'''
class ThreadTimer(cute_base_timer.CuteBaseTimer):
'''
A timer for a wxPython app which runs on a different thread.
This solved a problem of wxPython timers being late when the program is
busy.
'''
n = 0
'''The number of created thread timers.'''
_EventHandlerGrokker__event_code = EVT_THREAD_TIMER
def __init__(self, parent):
'''
Construct the ThreadTimer.
`parent` is the parent window.
'''
cute_base_timer.CuteBaseTimer.__init__(self, parent)
self.parent = parent
'''The parent window.'''
ThreadTimer.n += 1
self.wx_id = wx.NewId()
'''The ID of this timer, given by wxPython.'''
self.__init_thread()
self.alive = False
'''Flag saying whether this timer is running.'''
def __init_thread(self):
'''Create the thread.'''
thread_name = ''.join(('Thread used by ThreadTimer no. ', str(self.n)))
self.thread = Thread(self, name=thread_name)
# Overwriting previous thread, so it'll get garbage-collected,
# hopefully
def start(self, interval):
'''Start the timer.'''
if self.alive:
self.stop()
self.interval = interval
self.alive = True
self.thread.start()
def stop(self):
'''Stop the timer.'''
self.alive = False
self.thread.retired = True
self.__init_thread()
# Crutch for compatibilty with wx.Timer:
Start = start
Stop = stop
def GetId(self):
'''Get the wx ID of this timer.'''
return self.wx_id
class Thread(threading.Thread):
'''Thread used as a timer for wxPython programs.'''
def __init__(self, parent, name):
threading.Thread.__init__(self, name=name)
self.parent = parent
self.retired = False
def run(self):
'''Run the thread. Internal function.'''
interval_in_seconds = self.parent.interval / 1000.0
def sleep():
time.sleep(interval_in_seconds)
sleep()
try:
while self.parent.alive is True and self.retired is False:
event = wx.PyEvent(self.parent.wx_id)
event.SetEventType(wxEVT_THREAD_TIMER)
wx.PostEvent(self.parent.parent, event)
sleep()
except:
return # Just so it wouldn't raise an error when `wx` is shutting
# down | 0.277865 | 0.129926 |
__author__ = '<NAME>'
__date__ = '2021-11-07'
__copyright__ = '(C) 2021, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterFile,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.dip import Interpolar
import os
import numpy as np
from qgis.PyQt.QtGui import QIcon
class GetPointValue(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return GetPointValue()
def name(self):
return 'getpointvalue'
def displayName(self):
return self.tr('Estimate point value from Raster', 'Estimar valor de ponto a partir de Raster')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('sampling,sample,amostra,pegar,get,interpolate,interpolar,bilinear,cell').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = 'This tool estimates the value of the points from Raster, making the proper interpolation of the nearest pixels (cells).'
txt_pt = 'Esta ferramenta estima o valor dos pontos a partir de Raster, fazendo a devida interpolação dos pixels (células) mais próximos.'
figure = 'images/tutorial/raster_getpointvalue.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
INPUT = 'INPUT'
BAND = 'BAND'
POINTS = 'POINTS'
RESAMPLING = 'RESAMPLING'
PREFIX = 'PREFIX'
OUTPUT = 'OUTPUT'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT,
self.tr('Input Raster', 'Raster de entrada'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterBand(
self.BAND,
self.tr('Band number', 'Número da banda'),
parentLayerParameterName=self.INPUT,
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
self.POINTS,
self.tr('Vector Layer de Pontos', 'Camada Vetorial de Pontos'),
[QgsProcessing.TypeVectorPoint]
)
)
opcoes = [self.tr('Nearest','Vizinho mais próximo'),
self.tr('Bilinear'),
self.tr('Bicubic','Bicúbica')
]
self.addParameter(
QgsProcessingParameterEnum(
self.RESAMPLING,
self.tr('Interpolation method', 'Método de Interpolação'),
options = opcoes,
defaultValue= 1
)
)
self.addParameter(
QgsProcessingParameterString(
self.PREFIX,
self.tr('Output column prefix', 'Prefixo da coluna de saída'),
defaultValue = self.tr('sample_', 'amostra_')
)
)
# output
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Points with interpolated value from raster', 'Pontos com valor interpolado do Raster')
)
)
def processAlgorithm(self, parameters, context, feedback):
RasterIN = self.parameterAsRasterLayer(
parameters,
self.INPUT,
context
)
if RasterIN is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
n_banda = self.parameterAsInt(
parameters,
self.BAND,
context
)
if n_banda is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.BAND))
pontos = self.parameterAsSource(
parameters,
self.POINTS,
context
)
if pontos is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.POINTS))
reamostragem = self.parameterAsEnum(
parameters,
self.RESAMPLING,
context
)
reamostragem = ['nearest','bilinear','bicubic'][reamostragem]
prefixo = self.parameterAsString(
parameters,
self.PREFIX,
context
)
# Camada de saída
Fields = pontos.fields()
CRS = pontos.sourceCrs()
Fields.append(QgsField(prefixo + self.tr('value', 'valor'), QVariant.Double))
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
Fields,
QgsWkbTypes.Point,
CRS
)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Abrir Raster
feedback.pushInfo(self.tr('Opening raster file...', 'Abrindo arquivo Raster...'))
image = gdal.Open(RasterIN.dataProvider().dataSourceUri())
SRC = QgsCoordinateReferenceSystem(image.GetProjection())
ulx, xres, xskew, uly, yskew, yres = image.GetGeoTransform()
cols = image.RasterXSize
rows = image.RasterYSize
#n_bands = image.RasterCount
GDT = image.GetRasterBand(1).DataType
banda = image.GetRasterBand(n_banda).ReadAsArray()
valor_nulo = image.GetRasterBand(1).GetNoDataValue()
if not valor_nulo:
valor_nulo = 0
origem = (ulx, uly)
xres = abs(xres)
yres = abs(yres)
# Verificar SRC
if not SRC == CRS:
raise QgsProcessingException(self.tr('The raster layer and the homologous point vector layer must have the same CRS!', 'A camada raster e a camada vetorial de pontos homólogos devem ter o mesmo SRC!'))
# Calcular valor interpolado para cada ponto
Percent = 100.0/pontos.featureCount() if pontos.featureCount()>0 else 0
newfeat = QgsFeature(Fields)
for index, feat in enumerate(pontos.getFeatures()):
geom = feat.geometry()
att = feat.attributes()
if geom.isMultipart():
pnts = geom.asMultiPoint()
for pnt in pnts:
X, Y = pnt.x(), pnt.y()
valor = Interpolar(X, Y,
banda,
origem,
xres,
yres,
reamostragem,
valor_nulo)
newfeat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(X, Y)))
newfeat.setAttributes(att + [valor])
sink.addFeature(newfeat, QgsFeatureSink.FastInsert)
else:
pnt = geom.asPoint()
X, Y = pnt.x(), pnt.y()
valor = Interpolar(X, Y,
banda,
origem,
xres,
yres,
reamostragem,
valor_nulo)
newfeat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(X, Y)))
newfeat.setAttributes(att + [valor])
sink.addFeature(newfeat, QgsFeatureSink.FastInsert)
if feedback.isCanceled():
break
feedback.setProgress(int((index+1) * Percent))
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
return {'output': self.OUTPUT} | processing_provider/Rast_getPointValue.py | __author__ = '<NAME>'
__date__ = '2021-11-07'
__copyright__ = '(C) 2021, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterFile,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.dip import Interpolar
import os
import numpy as np
from qgis.PyQt.QtGui import QIcon
class GetPointValue(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return GetPointValue()
def name(self):
return 'getpointvalue'
def displayName(self):
return self.tr('Estimate point value from Raster', 'Estimar valor de ponto a partir de Raster')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('sampling,sample,amostra,pegar,get,interpolate,interpolar,bilinear,cell').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = 'This tool estimates the value of the points from Raster, making the proper interpolation of the nearest pixels (cells).'
txt_pt = 'Esta ferramenta estima o valor dos pontos a partir de Raster, fazendo a devida interpolação dos pixels (células) mais próximos.'
figure = 'images/tutorial/raster_getpointvalue.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
INPUT = 'INPUT'
BAND = 'BAND'
POINTS = 'POINTS'
RESAMPLING = 'RESAMPLING'
PREFIX = 'PREFIX'
OUTPUT = 'OUTPUT'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT,
self.tr('Input Raster', 'Raster de entrada'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterBand(
self.BAND,
self.tr('Band number', 'Número da banda'),
parentLayerParameterName=self.INPUT,
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
self.POINTS,
self.tr('Vector Layer de Pontos', 'Camada Vetorial de Pontos'),
[QgsProcessing.TypeVectorPoint]
)
)
opcoes = [self.tr('Nearest','Vizinho mais próximo'),
self.tr('Bilinear'),
self.tr('Bicubic','Bicúbica')
]
self.addParameter(
QgsProcessingParameterEnum(
self.RESAMPLING,
self.tr('Interpolation method', 'Método de Interpolação'),
options = opcoes,
defaultValue= 1
)
)
self.addParameter(
QgsProcessingParameterString(
self.PREFIX,
self.tr('Output column prefix', 'Prefixo da coluna de saída'),
defaultValue = self.tr('sample_', 'amostra_')
)
)
# output
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Points with interpolated value from raster', 'Pontos com valor interpolado do Raster')
)
)
def processAlgorithm(self, parameters, context, feedback):
RasterIN = self.parameterAsRasterLayer(
parameters,
self.INPUT,
context
)
if RasterIN is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
n_banda = self.parameterAsInt(
parameters,
self.BAND,
context
)
if n_banda is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.BAND))
pontos = self.parameterAsSource(
parameters,
self.POINTS,
context
)
if pontos is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.POINTS))
reamostragem = self.parameterAsEnum(
parameters,
self.RESAMPLING,
context
)
reamostragem = ['nearest','bilinear','bicubic'][reamostragem]
prefixo = self.parameterAsString(
parameters,
self.PREFIX,
context
)
# Camada de saída
Fields = pontos.fields()
CRS = pontos.sourceCrs()
Fields.append(QgsField(prefixo + self.tr('value', 'valor'), QVariant.Double))
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
Fields,
QgsWkbTypes.Point,
CRS
)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Abrir Raster
feedback.pushInfo(self.tr('Opening raster file...', 'Abrindo arquivo Raster...'))
image = gdal.Open(RasterIN.dataProvider().dataSourceUri())
SRC = QgsCoordinateReferenceSystem(image.GetProjection())
ulx, xres, xskew, uly, yskew, yres = image.GetGeoTransform()
cols = image.RasterXSize
rows = image.RasterYSize
#n_bands = image.RasterCount
GDT = image.GetRasterBand(1).DataType
banda = image.GetRasterBand(n_banda).ReadAsArray()
valor_nulo = image.GetRasterBand(1).GetNoDataValue()
if not valor_nulo:
valor_nulo = 0
origem = (ulx, uly)
xres = abs(xres)
yres = abs(yres)
# Verificar SRC
if not SRC == CRS:
raise QgsProcessingException(self.tr('The raster layer and the homologous point vector layer must have the same CRS!', 'A camada raster e a camada vetorial de pontos homólogos devem ter o mesmo SRC!'))
# Calcular valor interpolado para cada ponto
Percent = 100.0/pontos.featureCount() if pontos.featureCount()>0 else 0
newfeat = QgsFeature(Fields)
for index, feat in enumerate(pontos.getFeatures()):
geom = feat.geometry()
att = feat.attributes()
if geom.isMultipart():
pnts = geom.asMultiPoint()
for pnt in pnts:
X, Y = pnt.x(), pnt.y()
valor = Interpolar(X, Y,
banda,
origem,
xres,
yres,
reamostragem,
valor_nulo)
newfeat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(X, Y)))
newfeat.setAttributes(att + [valor])
sink.addFeature(newfeat, QgsFeatureSink.FastInsert)
else:
pnt = geom.asPoint()
X, Y = pnt.x(), pnt.y()
valor = Interpolar(X, Y,
banda,
origem,
xres,
yres,
reamostragem,
valor_nulo)
newfeat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(X, Y)))
newfeat.setAttributes(att + [valor])
sink.addFeature(newfeat, QgsFeatureSink.FastInsert)
if feedback.isCanceled():
break
feedback.setProgress(int((index+1) * Percent))
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
return {'output': self.OUTPUT} | 0.492432 | 0.185947 |
import numpy as np
def speed(u, v):
return np.sqrt(u ** 2 + v ** 2)
def day_night_split(solzen: np.ndarray) -> tuple:
"""
solar zenith angle (degrees, 0->180; daytime if < 85)
:param solzen: 天顶角矩阵
:return: 表示白天,黑夜的矩阵索引的元组
Reference
------
.. [#] AIRS/AMSU/HSB Version 5 Level 1B Product User Guide(P10)
"""
return np.where(solzen < 85), np.where(solzen >= 85)
def dpres1d(pressure: np.ndarray or list, bot_p: float, top_p: float) -> np.ndarray:
"""
计算恒定压力水平系统的各层气压厚度
:param pressure: 气压序列
:param bot_p: 计算气压层厚度的底层气压
:param top_p: 计算气压层厚度的顶层气压
:return: 与输入气压层数相同的各层气压厚度
"""
dp = np.full(np.shape(pressure), np.nan)
len_p = len(pressure)
lev_start_idx = 0
lev_last_idx = len_p - 1
if pressure[1] > pressure[0]:
tmp_p = pressure
else:
tmp_p = pressure[::-1]
if top_p <= tmp_p[0] and bot_p >= tmp_p[-1]:
dp[0] = (tmp_p[0] + tmp_p[1]) * 0.5 - top_p
for lev_idx in range(1, len_p - 1):
dp[lev_idx] = (tmp_p[lev_idx + 1] - tmp_p[lev_idx - 1]) * 0.5
dp[len_p - 1] = bot_p - (tmp_p[len_p - 1] + tmp_p[len_p - 2]) * 0.5
else:
for lev_start_idx in range(len_p - 1, 0, -1):
if (tmp_p[lev_start_idx - 1] + tmp_p[lev_start_idx]) / 2 < top_p:
break
for lev_last_idx in range(len_p - 1):
if (tmp_p[lev_last_idx + 1] + tmp_p[lev_last_idx]) / 2 > bot_p:
break
if lev_start_idx == lev_last_idx:
dp[lev_start_idx] = bot_p - top_p
elif lev_start_idx < lev_last_idx:
dp[lev_start_idx] = (tmp_p[lev_start_idx] + tmp_p[
lev_start_idx + 1]) * 0.5 - top_p
for lev_idx in range(lev_start_idx + 1, lev_last_idx - 1):
dp[lev_idx] = (tmp_p[lev_idx + 1] - tmp_p[
lev_idx - 1]) * 0.5
dp[lev_last_idx] = bot_p - (
tmp_p[lev_start_idx] + tmp_p[lev_start_idx + 1]) * 0.5
return dp
def dbe1(dep, curt_mag, dis, delta):
"""One-dimensional Dynamic Balance Equation.
:param dep: The depth of water
:param curt_mag: Tidal current
:param dis: The distance between the two station
:param delta: Time Step
:return: The terms of the dynamic balance equation
:rtype: tuple
"""
time_len = np.size(curt_mag[0])
# Pressure Gradient
p_grad = 9.80665 * (dep[0][:] - dep[1][:]) / dis[0]
# Local Acceleration
local_acc = np.zeros(time_len)
for i in np.arange(1, time_len - 1):
local_acc[i] = (curt_mag[1][i + 1] - curt_mag[1][i - 1]) / (delta * 2)
# Advection Acceleration
adv_acc = np.zeros(time_len)
for i in np.arange(time_len):
adv_acc[i] = curt_mag[1][i] * (curt_mag[0][i] - curt_mag[1][i]) / dis[1]
# Bottom Friction
bf = local_acc + adv_acc + p_grad
return p_grad, local_acc, adv_acc, bf | esep/physics/base.py | import numpy as np
def speed(u, v):
return np.sqrt(u ** 2 + v ** 2)
def day_night_split(solzen: np.ndarray) -> tuple:
"""
solar zenith angle (degrees, 0->180; daytime if < 85)
:param solzen: 天顶角矩阵
:return: 表示白天,黑夜的矩阵索引的元组
Reference
------
.. [#] AIRS/AMSU/HSB Version 5 Level 1B Product User Guide(P10)
"""
return np.where(solzen < 85), np.where(solzen >= 85)
def dpres1d(pressure: np.ndarray or list, bot_p: float, top_p: float) -> np.ndarray:
"""
计算恒定压力水平系统的各层气压厚度
:param pressure: 气压序列
:param bot_p: 计算气压层厚度的底层气压
:param top_p: 计算气压层厚度的顶层气压
:return: 与输入气压层数相同的各层气压厚度
"""
dp = np.full(np.shape(pressure), np.nan)
len_p = len(pressure)
lev_start_idx = 0
lev_last_idx = len_p - 1
if pressure[1] > pressure[0]:
tmp_p = pressure
else:
tmp_p = pressure[::-1]
if top_p <= tmp_p[0] and bot_p >= tmp_p[-1]:
dp[0] = (tmp_p[0] + tmp_p[1]) * 0.5 - top_p
for lev_idx in range(1, len_p - 1):
dp[lev_idx] = (tmp_p[lev_idx + 1] - tmp_p[lev_idx - 1]) * 0.5
dp[len_p - 1] = bot_p - (tmp_p[len_p - 1] + tmp_p[len_p - 2]) * 0.5
else:
for lev_start_idx in range(len_p - 1, 0, -1):
if (tmp_p[lev_start_idx - 1] + tmp_p[lev_start_idx]) / 2 < top_p:
break
for lev_last_idx in range(len_p - 1):
if (tmp_p[lev_last_idx + 1] + tmp_p[lev_last_idx]) / 2 > bot_p:
break
if lev_start_idx == lev_last_idx:
dp[lev_start_idx] = bot_p - top_p
elif lev_start_idx < lev_last_idx:
dp[lev_start_idx] = (tmp_p[lev_start_idx] + tmp_p[
lev_start_idx + 1]) * 0.5 - top_p
for lev_idx in range(lev_start_idx + 1, lev_last_idx - 1):
dp[lev_idx] = (tmp_p[lev_idx + 1] - tmp_p[
lev_idx - 1]) * 0.5
dp[lev_last_idx] = bot_p - (
tmp_p[lev_start_idx] + tmp_p[lev_start_idx + 1]) * 0.5
return dp
def dbe1(dep, curt_mag, dis, delta):
"""One-dimensional Dynamic Balance Equation.
:param dep: The depth of water
:param curt_mag: Tidal current
:param dis: The distance between the two station
:param delta: Time Step
:return: The terms of the dynamic balance equation
:rtype: tuple
"""
time_len = np.size(curt_mag[0])
# Pressure Gradient
p_grad = 9.80665 * (dep[0][:] - dep[1][:]) / dis[0]
# Local Acceleration
local_acc = np.zeros(time_len)
for i in np.arange(1, time_len - 1):
local_acc[i] = (curt_mag[1][i + 1] - curt_mag[1][i - 1]) / (delta * 2)
# Advection Acceleration
adv_acc = np.zeros(time_len)
for i in np.arange(time_len):
adv_acc[i] = curt_mag[1][i] * (curt_mag[0][i] - curt_mag[1][i]) / dis[1]
# Bottom Friction
bf = local_acc + adv_acc + p_grad
return p_grad, local_acc, adv_acc, bf | 0.599837 | 0.734548 |
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'schedule_file',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('year', sa.SmallInteger),
sa.Column('semester', sa.SmallInteger),
sa.Column('institute', sa.String(128)),
sa.Column('grade', sa.String(1)),
sa.Column('course', sa.SmallInteger),
sa.Column('category', sa.String(16)),
sa.Column('file_path', sa.String, nullable=False),
)
op.create_table(
'group',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String, unique=True),
)
op.create_table(
'room',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String, unique=True),
)
op.create_table(
'teacher',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String, unique=True),
)
op.create_table(
'period',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('file_id',
sa.Integer,
sa.ForeignKey('schedule_file.id', ondelete='CASCADE'),
nullable=False),
sa.Column('day', sa.SmallInteger, nullable=False),
sa.Column('number', sa.SmallInteger, nullable=False),
sa.Column('even', sa.SmallInteger, nullable=False),
sa.Column('name', sa.String),
sa.Column('category', sa.String),
sa.Column('group_id',
sa.Integer,
sa.ForeignKey('group.id', ondelete='CASCADE'),
nullable=False),
sa.Column('room_id',
sa.Integer,
sa.ForeignKey('room.id', ondelete='CASCADE')),
sa.Column('teacher_id',
sa.Integer,
sa.ForeignKey('teacher.id', ondelete='CASCADE')),
)
def downgrade():
op.drop_table('period')
op.drop_table('group')
op.drop_table('room')
op.drop_table('teacher')
op.drop_table('schedule_file') | alembic/versions/6312e2ecbbd6_init.py | from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'schedule_file',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('year', sa.SmallInteger),
sa.Column('semester', sa.SmallInteger),
sa.Column('institute', sa.String(128)),
sa.Column('grade', sa.String(1)),
sa.Column('course', sa.SmallInteger),
sa.Column('category', sa.String(16)),
sa.Column('file_path', sa.String, nullable=False),
)
op.create_table(
'group',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String, unique=True),
)
op.create_table(
'room',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String, unique=True),
)
op.create_table(
'teacher',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String, unique=True),
)
op.create_table(
'period',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('file_id',
sa.Integer,
sa.ForeignKey('schedule_file.id', ondelete='CASCADE'),
nullable=False),
sa.Column('day', sa.SmallInteger, nullable=False),
sa.Column('number', sa.SmallInteger, nullable=False),
sa.Column('even', sa.SmallInteger, nullable=False),
sa.Column('name', sa.String),
sa.Column('category', sa.String),
sa.Column('group_id',
sa.Integer,
sa.ForeignKey('group.id', ondelete='CASCADE'),
nullable=False),
sa.Column('room_id',
sa.Integer,
sa.ForeignKey('room.id', ondelete='CASCADE')),
sa.Column('teacher_id',
sa.Integer,
sa.ForeignKey('teacher.id', ondelete='CASCADE')),
)
def downgrade():
op.drop_table('period')
op.drop_table('group')
op.drop_table('room')
op.drop_table('teacher')
op.drop_table('schedule_file') | 0.364099 | 0.141519 |
import math, random
import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Backend.Kernels.Costs import ctcLoss, ctcLossTest
from PuzzleLib.Cost.Cost import Cost
class CTC(Cost):
def __init__(self, blank, vocabsize=None, normalized=False):
super().__init__()
self.normalized = normalized
if vocabsize is not None:
assert 0 <= blank <= vocabsize
self.vocabsize = vocabsize
self.blank = blank
def calcGrad(self, pred, target):
data, datalen = pred
labels, lengths = target
self.devErr.fill(0.0)
_, grad = ctcLoss(data, datalen, labels, lengths, self.blank, error=self.devErr, normalized=self.normalized)
return grad
def calcError(self, scores, labels):
self.accumErr += self.devErr
def calcVal(self, pred, target):
raise NotImplementedError()
def checkDataShape(self, pred, target):
data, datalen = pred
labels, lengths = target
assert datalen.dtype == labels.dtype and labels.dtype == lengths.dtype and lengths.dtype == np.int32
assert datalen.shape[0] == lengths.shape[0] and lengths.shape[0] == data.shape[1]
if self.vocabsize is not None:
assert data.shape[2] == self.vocabsize
def checkValDataShape(self, pred, target):
pass
def getBatchsize(self, pred):
return pred[0].shape[1]
def unittest():
smallTest()
mediumTest()
randomTest()
def smallTest():
hostData = np.array([[[0.1, 0.6, 0.1, 0.1, 0.1]], [[0.1, 0.1, 0.6, 0.1, 0.1]]], dtype=np.float32)
data = gpuarray.to_gpu(hostData)
datalen = gpuarray.to_gpu(np.array([2], dtype=np.int32))
labels = gpuarray.to_gpu(np.array([1, 2], dtype=np.int32))
lengths = np.array([2], dtype=np.int32)
ctc = CTC(blank=4, vocabsize=5, normalized=True)
error, grad = ctc([data, datalen], [labels, lengths])
hostScore = hostData[0, 0, 1] * hostData[1, 0, 2]
assert np.isclose(math.exp(-error), hostScore)
def mediumTest():
hostData = np.array([
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508]],
[[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549]],
[[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456]],
[[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345]],
[[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]
], dtype=np.float32)
data = gpuarray.to_gpu(hostData)
datalen = gpuarray.to_gpu(np.array([5, 5], dtype=np.int32))
labels = gpuarray.to_gpu(np.array([
0, 1, 2, 1, 0,
0, 1, 1, 0
], dtype=np.int32))
lengths = np.array([5, 4], dtype=np.int32)
hostGrad = -np.array([
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508]],
[[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549]],
[[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544]],
[[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345]],
[[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]
], dtype=np.float32)
ctc = CTC(vocabsize=6, blank=5, normalized=True)
error, grad = ctc([data, datalen], [labels, lengths])
hostScore = np.empty((2, ), dtype=np.float32)
hostScore[0] = -math.log(
hostData[0, 0, 0] * hostData[1, 0, 1] * hostData[2, 0, 2] * hostData[3, 0, 1] * hostData[4, 0, 0]
)
hostScore[1] = 5.42262
hostError = np.mean(hostScore)
assert np.isclose(hostError, error)
assert np.allclose(hostGrad, grad.get())
def randomTest():
times, batchsize, vocabsize = 20, 3, 6
hostData, hostDataLen, hostLabels, lengths = createData(times, batchsize, vocabsize)
data, datalen, labels = gpuarray.to_gpu(hostData), gpuarray.to_gpu(hostDataLen), gpuarray.to_gpu(hostLabels)
blank = 0
ctc = CTC(blank=0, vocabsize=vocabsize)
error, grad = ctc([data, datalen], [labels, lengths])
hostError, hostGrad, _ = ctcLossTest(hostData, hostDataLen, hostLabels, lengths, blank)
assert np.isclose(hostError / batchsize, error)
assert np.allclose(hostGrad, grad.get(), atol=1e-5)
def createData(times, batchsize, vocabsize):
data = np.random.randn(times, batchsize, vocabsize).astype(np.float32)
datalen = np.array([times] * batchsize, dtype=np.int32)
lengths = np.array([random.randint(a=times // 4, b=times // 2 - 1) for _ in range(batchsize)], dtype=np.int32)
labels = np.concatenate([
np.array([random.randint(a=1, b=vocabsize - 1) for _ in range(lengths[b])], dtype=np.int32)
for b in range(batchsize)
])
return data, datalen, labels, lengths
if __name__ == "__main__":
unittest() | Cost/CTC.py | import math, random
import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Backend.Kernels.Costs import ctcLoss, ctcLossTest
from PuzzleLib.Cost.Cost import Cost
class CTC(Cost):
def __init__(self, blank, vocabsize=None, normalized=False):
super().__init__()
self.normalized = normalized
if vocabsize is not None:
assert 0 <= blank <= vocabsize
self.vocabsize = vocabsize
self.blank = blank
def calcGrad(self, pred, target):
data, datalen = pred
labels, lengths = target
self.devErr.fill(0.0)
_, grad = ctcLoss(data, datalen, labels, lengths, self.blank, error=self.devErr, normalized=self.normalized)
return grad
def calcError(self, scores, labels):
self.accumErr += self.devErr
def calcVal(self, pred, target):
raise NotImplementedError()
def checkDataShape(self, pred, target):
data, datalen = pred
labels, lengths = target
assert datalen.dtype == labels.dtype and labels.dtype == lengths.dtype and lengths.dtype == np.int32
assert datalen.shape[0] == lengths.shape[0] and lengths.shape[0] == data.shape[1]
if self.vocabsize is not None:
assert data.shape[2] == self.vocabsize
def checkValDataShape(self, pred, target):
pass
def getBatchsize(self, pred):
return pred[0].shape[1]
def unittest():
smallTest()
mediumTest()
randomTest()
def smallTest():
hostData = np.array([[[0.1, 0.6, 0.1, 0.1, 0.1]], [[0.1, 0.1, 0.6, 0.1, 0.1]]], dtype=np.float32)
data = gpuarray.to_gpu(hostData)
datalen = gpuarray.to_gpu(np.array([2], dtype=np.int32))
labels = gpuarray.to_gpu(np.array([1, 2], dtype=np.int32))
lengths = np.array([2], dtype=np.int32)
ctc = CTC(blank=4, vocabsize=5, normalized=True)
error, grad = ctc([data, datalen], [labels, lengths])
hostScore = hostData[0, 0, 1] * hostData[1, 0, 2]
assert np.isclose(math.exp(-error), hostScore)
def mediumTest():
hostData = np.array([
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508]],
[[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549]],
[[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456]],
[[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345]],
[[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]
], dtype=np.float32)
data = gpuarray.to_gpu(hostData)
datalen = gpuarray.to_gpu(np.array([5, 5], dtype=np.int32))
labels = gpuarray.to_gpu(np.array([
0, 1, 2, 1, 0,
0, 1, 1, 0
], dtype=np.int32))
lengths = np.array([5, 4], dtype=np.int32)
hostGrad = -np.array([
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508]],
[[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549]],
[[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544]],
[[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345]],
[[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]
], dtype=np.float32)
ctc = CTC(vocabsize=6, blank=5, normalized=True)
error, grad = ctc([data, datalen], [labels, lengths])
hostScore = np.empty((2, ), dtype=np.float32)
hostScore[0] = -math.log(
hostData[0, 0, 0] * hostData[1, 0, 1] * hostData[2, 0, 2] * hostData[3, 0, 1] * hostData[4, 0, 0]
)
hostScore[1] = 5.42262
hostError = np.mean(hostScore)
assert np.isclose(hostError, error)
assert np.allclose(hostGrad, grad.get())
def randomTest():
times, batchsize, vocabsize = 20, 3, 6
hostData, hostDataLen, hostLabels, lengths = createData(times, batchsize, vocabsize)
data, datalen, labels = gpuarray.to_gpu(hostData), gpuarray.to_gpu(hostDataLen), gpuarray.to_gpu(hostLabels)
blank = 0
ctc = CTC(blank=0, vocabsize=vocabsize)
error, grad = ctc([data, datalen], [labels, lengths])
hostError, hostGrad, _ = ctcLossTest(hostData, hostDataLen, hostLabels, lengths, blank)
assert np.isclose(hostError / batchsize, error)
assert np.allclose(hostGrad, grad.get(), atol=1e-5)
def createData(times, batchsize, vocabsize):
data = np.random.randn(times, batchsize, vocabsize).astype(np.float32)
datalen = np.array([times] * batchsize, dtype=np.int32)
lengths = np.array([random.randint(a=times // 4, b=times // 2 - 1) for _ in range(batchsize)], dtype=np.int32)
labels = np.concatenate([
np.array([random.randint(a=1, b=vocabsize - 1) for _ in range(lengths[b])], dtype=np.int32)
for b in range(batchsize)
])
return data, datalen, labels, lengths
if __name__ == "__main__":
unittest() | 0.350421 | 0.53777 |
from datetime import datetime
import os
from sqlalchemy import Column, DateTime, String, BigInteger, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError, InvalidRequestError
POSTGRES_ENVIRON_KEY = 'DATABASE_URL'
Base = declarative_base()
track_artists = Table('t_track_artists',
Base.metadata,
Column('track_id', String, ForeignKey('t_track.track_id')),
Column('artist_id', String, ForeignKey('t_artist.artist_id')))
album_artists = Table('t_album_artists',
Base.metadata,
Column('album_id', String, ForeignKey('t_album.album_id')),
Column('artist_id', String, ForeignKey('t_artist.artist_id')))
class Artist(Base):
# Meta
__tablename__ = 't_artist'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
artist_id = Column(String, primary_key=True)
artist_data = Column(JSON, nullable=False)
class Album(Base):
# Meta
__tablename__ = 't_album'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
album_id = Column(String, primary_key=True)
album_data = Column(JSON, nullable=False)
# Relationship
artists = relationship('Artist', secondary=album_artists)
tracks = relationship('Track')
class Track(Base):
# Meta
__tablename__ = 't_track'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
track_id = Column(String, primary_key=True, index=True)
album_id = Column(String, ForeignKey('t_album.album_id'), index=True)
track_data = Column(JSON, nullable=False)
audio_feature_data = Column(JSON)
# Relationships
plays = relationship('Play', back_populates='track')
album = relationship('Album', back_populates='tracks')
artists = relationship('Artist', secondary=track_artists)
class Play(Base):
# Meta
__tablename__ = 't_play'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
played_at_utc_timestamp = Column(BigInteger, primary_key=True)
played_at_utc = Column(DateTime, nullable=False)
played_at_cet = Column(DateTime, nullable=False)
day = Column(Integer, nullable=False)
month = Column(Integer, nullable=False)
year = Column(Integer, nullable=False)
hour = Column(Integer, nullable=False)
minute = Column(Integer, nullable=False)
second = Column(Integer, nullable=False)
day_of_week = Column(Integer, nullable=False) # Monday: 0, Sunday: 6
week_of_year = Column(Integer, nullable=False)
track_id = Column(String, ForeignKey('t_track.track_id'), index=True)
user_name = Column(String, nullable=False)
# Relationship
track = relationship('Track', back_populates='plays')
class PostgreSQLConnection(object):
def __init__(self):
if POSTGRES_ENVIRON_KEY in os.environ:
self.engine = create_engine(os.environ[POSTGRES_ENVIRON_KEY])
else:
import settings
self.engine = create_engine(settings.POSTGRES_CONNECTION_STRING)
self.session = sessionmaker(autoflush=False)(bind=self.engine)
def drop_db(self):
Base.metadata.drop_all(bind=self.engine)
def create_db(self):
Base.metadata.create_all(bind=self.engine)
def save_instance(self, instance):
try:
self.session.add(instance)
self.session.commit()
except IntegrityError as e:
self.session.rollback()
except InvalidRequestError as e:
self.session.rollback()
def save_play(self, play):
try:
self.session.add(play)
self.session.commit()
print("* Track \"{}\" (played at {}) saved.".format(play.track.track_data['name'], play.played_at_cet))
except IntegrityError as e:
self.session.rollback()
except InvalidRequestError as e:
self.session.rollback() | extract/models.py | from datetime import datetime
import os
from sqlalchemy import Column, DateTime, String, BigInteger, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError, InvalidRequestError
POSTGRES_ENVIRON_KEY = 'DATABASE_URL'
Base = declarative_base()
track_artists = Table('t_track_artists',
Base.metadata,
Column('track_id', String, ForeignKey('t_track.track_id')),
Column('artist_id', String, ForeignKey('t_artist.artist_id')))
album_artists = Table('t_album_artists',
Base.metadata,
Column('album_id', String, ForeignKey('t_album.album_id')),
Column('artist_id', String, ForeignKey('t_artist.artist_id')))
class Artist(Base):
# Meta
__tablename__ = 't_artist'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
artist_id = Column(String, primary_key=True)
artist_data = Column(JSON, nullable=False)
class Album(Base):
# Meta
__tablename__ = 't_album'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
album_id = Column(String, primary_key=True)
album_data = Column(JSON, nullable=False)
# Relationship
artists = relationship('Artist', secondary=album_artists)
tracks = relationship('Track')
class Track(Base):
# Meta
__tablename__ = 't_track'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
track_id = Column(String, primary_key=True, index=True)
album_id = Column(String, ForeignKey('t_album.album_id'), index=True)
track_data = Column(JSON, nullable=False)
audio_feature_data = Column(JSON)
# Relationships
plays = relationship('Play', back_populates='track')
album = relationship('Album', back_populates='tracks')
artists = relationship('Artist', secondary=track_artists)
class Play(Base):
# Meta
__tablename__ = 't_play'
created_at_utc = Column(DateTime, default=datetime.utcnow)
# Payload
played_at_utc_timestamp = Column(BigInteger, primary_key=True)
played_at_utc = Column(DateTime, nullable=False)
played_at_cet = Column(DateTime, nullable=False)
day = Column(Integer, nullable=False)
month = Column(Integer, nullable=False)
year = Column(Integer, nullable=False)
hour = Column(Integer, nullable=False)
minute = Column(Integer, nullable=False)
second = Column(Integer, nullable=False)
day_of_week = Column(Integer, nullable=False) # Monday: 0, Sunday: 6
week_of_year = Column(Integer, nullable=False)
track_id = Column(String, ForeignKey('t_track.track_id'), index=True)
user_name = Column(String, nullable=False)
# Relationship
track = relationship('Track', back_populates='plays')
class PostgreSQLConnection(object):
def __init__(self):
if POSTGRES_ENVIRON_KEY in os.environ:
self.engine = create_engine(os.environ[POSTGRES_ENVIRON_KEY])
else:
import settings
self.engine = create_engine(settings.POSTGRES_CONNECTION_STRING)
self.session = sessionmaker(autoflush=False)(bind=self.engine)
def drop_db(self):
Base.metadata.drop_all(bind=self.engine)
def create_db(self):
Base.metadata.create_all(bind=self.engine)
def save_instance(self, instance):
try:
self.session.add(instance)
self.session.commit()
except IntegrityError as e:
self.session.rollback()
except InvalidRequestError as e:
self.session.rollback()
def save_play(self, play):
try:
self.session.add(play)
self.session.commit()
print("* Track \"{}\" (played at {}) saved.".format(play.track.track_data['name'], play.played_at_cet))
except IntegrityError as e:
self.session.rollback()
except InvalidRequestError as e:
self.session.rollback() | 0.599837 | 0.138753 |
import numpy as np
from frbpoppy.log import pprint
from frbpoppy.number_density import NumberDensity
from frbpoppy.population import Population
import frbpoppy.distributions as dis
import frbpoppy.galacticops as go
import frbpoppy.precalc as pc
class CosmicPopulation(Population):
"""Generate a cosmic FRB population."""
def __init__(self,
n_gen,
days=1,
name='cosmic',
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='gaussian',
dm_host_mu=100,
dm_host_sigma=200,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e40, 1e45],
lum_index=0,
lum_function = 'schechter',
n_model='sfr',
alpha=-1.5,
w_model='lognormal',
w_range=[0.1, 10],
w_mu=0.1,
w_sigma=0.5,
si_mu=-1.4,
si_sigma=1.,
z_max=2.5,
generate=True):
"""Generate a popuation of FRBs.
Args:
n_gen (int): Number of FRB sources/sky/time to generate.
days (float): Number of days over which FRBs are generated.
name (str): Population name.
H_0 (float): Hubble constant.
W_m (float): Density parameter Ω_m.
W_v (float): Cosmological constant Ω_Λ.
dm_host_model (float): Dispersion measure host model. Options are
'gaussian' or 'lognormal'.
dm_host_mu (float): Mean dispersion measure host [pc/cm^3].
dm_host_sigma (float): Deviation dispersion measure host [pc/cm^3].
dm_igm_index (float): Dispersion measure slope for IGM [pc/cm^3].
dm_igm_sigma (float): Scatter around dm_igm. Defaults 0.2*slope*z
dm_mw_model (str): Dispersion measure model for the Milky Way.
Options are 'ne2001' or 'zero'.
emission_range (list): The frequency range [Hz] between which FRB
sources should emit the given bolometric luminosity.
lum_range (list): Bolometric luminosity (distance) range [erg/s].
lum_index (float): Power law index.
lum_function (float): Luminosity function, 'schechter' or 'powerlaw'.
n_model (str): Number density model. Either 'vol_co', 'sfr' or
'smd'.
alpha (float): Desired logN/logS of perfectly detected population.
w_model (str): Pulse width model, 'lognormal' or 'uniform'.
w_range (list): Pulse width range [ms].
w_mu (float): Mean pulse width [ms].
w_sigma (float): Deviation pulse width [ms].
si_mu (float): Mean spectral index.
si_sigma (float): Standard deviation spectral index.
z_max (float): Maximum redshift.
generate (bool): Whether to create a population
Returns:
Population: Population of FRBs.
"""
# Set up population
Population.__init__(self)
self.alpha = alpha
self.dm_host_model = dm_host_model
self.dm_host_mu = dm_host_mu
self.dm_host_sigma = dm_host_sigma
self.dm_igm_index = dm_igm_index
self.dm_igm_sigma = dm_igm_sigma
self.dm_mw_model = dm_mw_model
self.f_max = emission_range[1]
self.f_min = emission_range[0]
self.H_0 = H_0
self.lum_max = lum_range[1]
self.lum_min = lum_range[0]
self.lum_pow = lum_index
self.lum_function = lum_function
self.name = name
self.n_gen = int(n_gen)
self.n_model = n_model
self.si_mu = si_mu
self.si_sigma = si_sigma
self.time = days * 86400 # Convert to seconds
self.w_model = w_model
self.w_max = w_range[1]
self.w_min = w_range[0]
self.w_mu = w_mu
self.w_sigma = w_sigma
self.W_m = W_m
self.W_v = W_v
self.z_max = z_max
# Whether to start generating a Cosmic Population
if generate:
self.generate()
def gen_dist(self):
"""Generate distances."""
# Cosmology calculations
r = go.Redshift(self.z_max,
H_0=self.H_0,
W_m=self.W_m,
W_v=self.W_v)
self.dist_co_max = r.dist_co()
self.vol_co_max = r.vol_co()
# Ensure precalculations are done if necessary
pc.DistanceTable(H_0=self.H_0, W_m=self.W_m, W_v=self.W_v)
# Set up number density
n_den = NumberDensity(model=self.n_model,
z_max=self.z_max,
alpha=self.alpha,
H_0=self.H_0,
W_m=self.W_m,
W_v=self.W_v).draw
frbs = self.frbs
# Draw from number density
frbs.z, frbs.dist_co = n_den(self.n_gen)
def gen_direction(self):
"""Generate the direction of frbs."""
frbs = self.frbs
# Keep frb indices
frbs.index = np.arange(self.n_gen)
# Add random directional coordinates
u = np.random.uniform
frbs.ra = u(0, 360, self.n_gen)
frbs.dec = np.rad2deg(np.arccos(u(-1, 1, self.n_gen))) - 90
# Convert to galactic coordinates
frbs.gl, frbs.gb = go.radec_to_lb(frbs.ra, frbs.dec, frac=True)
def gen_gal_coords(self):
"""Generate galactic coordinates."""
frbs = self.frbs
# Get the proper distance
dist_pr = frbs.dist_co/(1+frbs.z)
# Convert into galactic coordinates
frbs.gx, frbs.gy, frbs.gz = go.lb_to_xyz(frbs.gl, frbs.gb, dist_pr)
def gen_dm_host(self):
"""Generate dm host contributions."""
frbs = self.frbs
# Dispersion measure of the host (Tendulkar)
if self.dm_host_model == 'gaussian':
frbs.dm_host = dis.trunc_norm(self.dm_host_mu,
self.dm_host_sigma,
self.n_gen).astype(np.float32)
elif self.dm_host_model == 'lognormal':
frbs.dm_host = np.random.lognormal(self.dm_host_mu,
self.dm_host_sigma,
self.n_gen).astype(np.float32)
frbs.dm_host = frbs.dm_host / (1 + frbs.z)
def gen_dm(self):
"""Generate dispersion measures."""
frbs = self.frbs
# Dispersion measure of the Milky Way
if self.dm_mw_model == 'ne2001':
frbs.dm_mw = pc.NE2001Table().lookup(frbs.gl, frbs.gb)
elif self.dm_mw_model == 'zero':
frbs.dm_mw = np.zeros_like(frbs.z)
# Dispersion measure of the intergalactic medium
frbs.dm_igm = go.ioka_dm_igm(frbs.z,
slope=self.dm_igm_index,
sigma=self.dm_igm_sigma)
# Dispersion measure of the host (Tendulkar)
self.gen_dm_host()
# Total dispersion measure
frbs.dm = frbs.dm_mw + frbs.dm_igm + frbs.dm_host
def gen_w(self, shape):
"""Generate pulse widths."""
frbs = self.frbs
# Get a random intrinsic pulse width [ms]
if self.w_model == 'lognormal':
frbs.w_int = np.random.lognormal(self.w_mu, self.w_sigma,
shape).astype(np.float32)
if self.w_model == 'uniform':
frbs.w_int = np.random.uniform(self.w_min, self.w_max,
shape).astype(np.float32)
# Calculate the pulse width upon arrival to Earth
if isinstance(shape, tuple):
frbs.w_arr = frbs.w_int*(1+frbs.z[:, None])
else:
frbs.w_arr = frbs.w_int*(1+frbs.z)
def gen_lum(self, shape):
"""Generate luminosities."""
frbs = self.frbs
# Add bolometric luminosity [erg/s]
if self.lum_function == 'schechter':
frbs.lum_bol = dis.schechter(self.lum_min,
self.lum_max,
self.lum_pow,
shape).astype(np.float64)
elif self.lum_function == 'powerlaw':
frbs.lum_bol = dis.powerlaw(self.lum_min,
self.lum_max,
self.lum_pow,
shape).astype(np.float64)
def gen_si(self, shape):
"""Generate spectral indices."""
frbs = self.frbs
# Add spectral index
frbs.si = np.random.normal(self.si_mu, self.si_sigma,
shape).astype(np.float32)
def generate(self):
"""Generate all manner of intrinsic parameters."""
# Let user know what's happening
pprint(f'Generating {self.name} population')
self.gen_dist()
self.gen_direction()
self.gen_gal_coords()
self.gen_dm()
self.gen_w(self.n_gen)
self.gen_lum(self.n_gen)
self.gen_si(self.n_gen)
pprint(f'Finished generating {self.name} population')
@classmethod
def simple(cls, n, generate=False):
"""Set up a simple, local population."""
pop = cls(n,
days=1,
name='simple',
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='gaussian',
dm_host_mu=0.,
dm_host_sigma=0.,
dm_igm_index=0.,
dm_igm_sigma=None,
dm_mw_model='zero',
emission_range=[10e6, 10e9],
lum_range=[1e38, 1e38],
lum_index=0.,
n_model='vol_co',
alpha=-1.5,
w_model='uniform',
w_range=[10, 10],
w_mu=0.1,
w_sigma=1.,
si_mu=0.,
si_sigma=0.,
z_max=0.01,
generate=generate)
return pop
@classmethod
def complex(cls, n, generate=False):
"""Set up a complex population."""
pop = cls(n,
days=1,
name='complex',
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='gaussian',
dm_host_mu=100,
dm_host_sigma=200,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e39, 1e45],
lum_index=0.,
n_model='vol_co',
alpha=-1.5,
w_model='lognormal',
w_range=[1., 1.],
w_mu=0.1,
w_sigma=0.7,
si_mu=-1.4,
si_sigma=1.,
z_max=2.5,
generate=generate)
return pop
if __name__ == '__main__':
# Quick test whether everything seems to be working or not
p = CosmicPopulation(10000)
import matplotlib.pyplot as plt
for arg in p.frbs.__dict__:
print(arg)
values = getattr(p.frbs, arg)
if values is not None:
plt.hist(values, bins=50)
plt.xlabel(arg)
plt.savefig(f'./tests/plots/{arg}.png')
plt.clf() | frbpoppy/cosmic_pop.py | import numpy as np
from frbpoppy.log import pprint
from frbpoppy.number_density import NumberDensity
from frbpoppy.population import Population
import frbpoppy.distributions as dis
import frbpoppy.galacticops as go
import frbpoppy.precalc as pc
class CosmicPopulation(Population):
"""Generate a cosmic FRB population."""
def __init__(self,
n_gen,
days=1,
name='cosmic',
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='gaussian',
dm_host_mu=100,
dm_host_sigma=200,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e40, 1e45],
lum_index=0,
lum_function = 'schechter',
n_model='sfr',
alpha=-1.5,
w_model='lognormal',
w_range=[0.1, 10],
w_mu=0.1,
w_sigma=0.5,
si_mu=-1.4,
si_sigma=1.,
z_max=2.5,
generate=True):
"""Generate a popuation of FRBs.
Args:
n_gen (int): Number of FRB sources/sky/time to generate.
days (float): Number of days over which FRBs are generated.
name (str): Population name.
H_0 (float): Hubble constant.
W_m (float): Density parameter Ω_m.
W_v (float): Cosmological constant Ω_Λ.
dm_host_model (float): Dispersion measure host model. Options are
'gaussian' or 'lognormal'.
dm_host_mu (float): Mean dispersion measure host [pc/cm^3].
dm_host_sigma (float): Deviation dispersion measure host [pc/cm^3].
dm_igm_index (float): Dispersion measure slope for IGM [pc/cm^3].
dm_igm_sigma (float): Scatter around dm_igm. Defaults 0.2*slope*z
dm_mw_model (str): Dispersion measure model for the Milky Way.
Options are 'ne2001' or 'zero'.
emission_range (list): The frequency range [Hz] between which FRB
sources should emit the given bolometric luminosity.
lum_range (list): Bolometric luminosity (distance) range [erg/s].
lum_index (float): Power law index.
lum_function (float): Luminosity function, 'schechter' or 'powerlaw'.
n_model (str): Number density model. Either 'vol_co', 'sfr' or
'smd'.
alpha (float): Desired logN/logS of perfectly detected population.
w_model (str): Pulse width model, 'lognormal' or 'uniform'.
w_range (list): Pulse width range [ms].
w_mu (float): Mean pulse width [ms].
w_sigma (float): Deviation pulse width [ms].
si_mu (float): Mean spectral index.
si_sigma (float): Standard deviation spectral index.
z_max (float): Maximum redshift.
generate (bool): Whether to create a population
Returns:
Population: Population of FRBs.
"""
# Set up population
Population.__init__(self)
self.alpha = alpha
self.dm_host_model = dm_host_model
self.dm_host_mu = dm_host_mu
self.dm_host_sigma = dm_host_sigma
self.dm_igm_index = dm_igm_index
self.dm_igm_sigma = dm_igm_sigma
self.dm_mw_model = dm_mw_model
self.f_max = emission_range[1]
self.f_min = emission_range[0]
self.H_0 = H_0
self.lum_max = lum_range[1]
self.lum_min = lum_range[0]
self.lum_pow = lum_index
self.lum_function = lum_function
self.name = name
self.n_gen = int(n_gen)
self.n_model = n_model
self.si_mu = si_mu
self.si_sigma = si_sigma
self.time = days * 86400 # Convert to seconds
self.w_model = w_model
self.w_max = w_range[1]
self.w_min = w_range[0]
self.w_mu = w_mu
self.w_sigma = w_sigma
self.W_m = W_m
self.W_v = W_v
self.z_max = z_max
# Whether to start generating a Cosmic Population
if generate:
self.generate()
def gen_dist(self):
"""Generate distances."""
# Cosmology calculations
r = go.Redshift(self.z_max,
H_0=self.H_0,
W_m=self.W_m,
W_v=self.W_v)
self.dist_co_max = r.dist_co()
self.vol_co_max = r.vol_co()
# Ensure precalculations are done if necessary
pc.DistanceTable(H_0=self.H_0, W_m=self.W_m, W_v=self.W_v)
# Set up number density
n_den = NumberDensity(model=self.n_model,
z_max=self.z_max,
alpha=self.alpha,
H_0=self.H_0,
W_m=self.W_m,
W_v=self.W_v).draw
frbs = self.frbs
# Draw from number density
frbs.z, frbs.dist_co = n_den(self.n_gen)
def gen_direction(self):
"""Generate the direction of frbs."""
frbs = self.frbs
# Keep frb indices
frbs.index = np.arange(self.n_gen)
# Add random directional coordinates
u = np.random.uniform
frbs.ra = u(0, 360, self.n_gen)
frbs.dec = np.rad2deg(np.arccos(u(-1, 1, self.n_gen))) - 90
# Convert to galactic coordinates
frbs.gl, frbs.gb = go.radec_to_lb(frbs.ra, frbs.dec, frac=True)
def gen_gal_coords(self):
"""Generate galactic coordinates."""
frbs = self.frbs
# Get the proper distance
dist_pr = frbs.dist_co/(1+frbs.z)
# Convert into galactic coordinates
frbs.gx, frbs.gy, frbs.gz = go.lb_to_xyz(frbs.gl, frbs.gb, dist_pr)
def gen_dm_host(self):
"""Generate dm host contributions."""
frbs = self.frbs
# Dispersion measure of the host (Tendulkar)
if self.dm_host_model == 'gaussian':
frbs.dm_host = dis.trunc_norm(self.dm_host_mu,
self.dm_host_sigma,
self.n_gen).astype(np.float32)
elif self.dm_host_model == 'lognormal':
frbs.dm_host = np.random.lognormal(self.dm_host_mu,
self.dm_host_sigma,
self.n_gen).astype(np.float32)
frbs.dm_host = frbs.dm_host / (1 + frbs.z)
def gen_dm(self):
"""Generate dispersion measures."""
frbs = self.frbs
# Dispersion measure of the Milky Way
if self.dm_mw_model == 'ne2001':
frbs.dm_mw = pc.NE2001Table().lookup(frbs.gl, frbs.gb)
elif self.dm_mw_model == 'zero':
frbs.dm_mw = np.zeros_like(frbs.z)
# Dispersion measure of the intergalactic medium
frbs.dm_igm = go.ioka_dm_igm(frbs.z,
slope=self.dm_igm_index,
sigma=self.dm_igm_sigma)
# Dispersion measure of the host (Tendulkar)
self.gen_dm_host()
# Total dispersion measure
frbs.dm = frbs.dm_mw + frbs.dm_igm + frbs.dm_host
def gen_w(self, shape):
"""Generate pulse widths."""
frbs = self.frbs
# Get a random intrinsic pulse width [ms]
if self.w_model == 'lognormal':
frbs.w_int = np.random.lognormal(self.w_mu, self.w_sigma,
shape).astype(np.float32)
if self.w_model == 'uniform':
frbs.w_int = np.random.uniform(self.w_min, self.w_max,
shape).astype(np.float32)
# Calculate the pulse width upon arrival to Earth
if isinstance(shape, tuple):
frbs.w_arr = frbs.w_int*(1+frbs.z[:, None])
else:
frbs.w_arr = frbs.w_int*(1+frbs.z)
def gen_lum(self, shape):
"""Generate luminosities."""
frbs = self.frbs
# Add bolometric luminosity [erg/s]
if self.lum_function == 'schechter':
frbs.lum_bol = dis.schechter(self.lum_min,
self.lum_max,
self.lum_pow,
shape).astype(np.float64)
elif self.lum_function == 'powerlaw':
frbs.lum_bol = dis.powerlaw(self.lum_min,
self.lum_max,
self.lum_pow,
shape).astype(np.float64)
def gen_si(self, shape):
"""Generate spectral indices."""
frbs = self.frbs
# Add spectral index
frbs.si = np.random.normal(self.si_mu, self.si_sigma,
shape).astype(np.float32)
def generate(self):
"""Generate all manner of intrinsic parameters."""
# Let user know what's happening
pprint(f'Generating {self.name} population')
self.gen_dist()
self.gen_direction()
self.gen_gal_coords()
self.gen_dm()
self.gen_w(self.n_gen)
self.gen_lum(self.n_gen)
self.gen_si(self.n_gen)
pprint(f'Finished generating {self.name} population')
@classmethod
def simple(cls, n, generate=False):
"""Set up a simple, local population."""
pop = cls(n,
days=1,
name='simple',
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='gaussian',
dm_host_mu=0.,
dm_host_sigma=0.,
dm_igm_index=0.,
dm_igm_sigma=None,
dm_mw_model='zero',
emission_range=[10e6, 10e9],
lum_range=[1e38, 1e38],
lum_index=0.,
n_model='vol_co',
alpha=-1.5,
w_model='uniform',
w_range=[10, 10],
w_mu=0.1,
w_sigma=1.,
si_mu=0.,
si_sigma=0.,
z_max=0.01,
generate=generate)
return pop
@classmethod
def complex(cls, n, generate=False):
"""Set up a complex population."""
pop = cls(n,
days=1,
name='complex',
H_0=67.74,
W_m=0.3089,
W_v=0.6911,
dm_host_model='gaussian',
dm_host_mu=100,
dm_host_sigma=200,
dm_igm_index=1000,
dm_igm_sigma=None,
dm_mw_model='ne2001',
emission_range=[10e6, 10e9],
lum_range=[1e39, 1e45],
lum_index=0.,
n_model='vol_co',
alpha=-1.5,
w_model='lognormal',
w_range=[1., 1.],
w_mu=0.1,
w_sigma=0.7,
si_mu=-1.4,
si_sigma=1.,
z_max=2.5,
generate=generate)
return pop
if __name__ == '__main__':
# Quick test whether everything seems to be working or not
p = CosmicPopulation(10000)
import matplotlib.pyplot as plt
for arg in p.frbs.__dict__:
print(arg)
values = getattr(p.frbs, arg)
if values is not None:
plt.hist(values, bins=50)
plt.xlabel(arg)
plt.savefig(f'./tests/plots/{arg}.png')
plt.clf() | 0.845305 | 0.448426 |
from __future__ import absolute_import
import os
import mxnet as mx
from mxnet import autograd
from mxnet.gluon import nn
from .rcnn_target import RCNNTargetSampler, RCNNTargetGenerator
from ..rcnn import RCNN2
from ..rpn import RPN
from ...nn.coder import NormalizedBoxCenterDecoder, MultiPerClassDecoder
from easydict import EasyDict as edict
from ..rpn import RPNTargetGenerator
__all__ = ['CascadeRCNN', 'get_cascade_rcnn',
'cascade_rcnn_resnet50_v1b_voc',
'cascade_rcnn_vgg16_voc',
'cascade_rcnn_vgg16_pruned_voc',
'cascade_rcnn_vgg16_pruned_coco']
class CascadeRCNN(RCNN2):
r"""Faster RCNN network.
Parameters
----------
features : gluon.HybridBlock
Base feature extractor before feature pooling layer.
top_features : gluon.HybridBlock
Tail feature extractor after feature pooling layer.
train_patterns : str
Matching pattern for trainable parameters.
scales : iterable of float
The areas of anchor boxes.
We use the following form to compute the shapes of anchors:
.. math::
width_{anchor} = size_{base} \times scale \times \sqrt{ 1 / ratio}
height_{anchor} = size_{base} \times scale \times \sqrt{ratio}
ratios : iterable of float
The aspect ratios of anchor boxes. We expect it to be a list or tuple.
classes : iterable of str
Names of categories, its length is ``num_class``.
roi_mode : str
ROI pooling mode. Currently support 'pool' and 'align'.
roi_size : tuple of int, length 2
(height, width) of the ROI region.
stride : int, default is 16
Feature map stride with respect to original image.
This is usually the ratio between original image size and feature map size.
rpn_channel : int, default is 1024
Channel number used in RPN convolutional layers.
nms_thresh : float, default is 0.3.
Non-maximum suppression threshold. You can speficy < 0 or > 1 to disable NMS.
nms_topk : int, default is 400
Apply NMS to top k detection results, use -1 to disable so that every Detection
result is used in NMS.
num_sample : int, default is 128
Number of samples for RCNN targets.
pos_iou_thresh : float, default is 0.5
Proposal whose IOU larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh_high : float, default is 0.5
Proposal whose IOU smaller than ``neg_iou_thresh_high``
and larger than ``neg_iou_thresh_low`` is regarded as negative samples.
Proposals with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
neg_iou_thresh_low : float, default is 0.0
See ``neg_iou_thresh_high``.
pos_ratio : float, default is 0.25
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, features, top_features, top_features_2nd,
top_features_3rd, classes,
short=600, max_size=1000, train_patterns=None,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), stride=16, clip=None,
rpn_channel=1024, base_size=16, scales=(0.5, 1, 2),
ratios=(8, 16, 32), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
additional_output=False, **kwargs):
super(CascadeRCNN, self).__init__(
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=short, max_size=max_size, train_patterns=train_patterns,
nms_thresh=nms_thresh, nms_topk=nms_topk, post_nms=post_nms,
roi_mode=roi_mode, roi_size=roi_size, stride=stride, clip=clip, **kwargs)
self._max_batch = 1 # currently only support batch size = 1
self._num_sample = num_sample
self._rpn_test_post_nms = rpn_test_post_nms
self._classes = classes
stds_2nd = (.05, .05, .1, .1)
stds_3rd = (.033, .033, .067, .067)
means_2nd= (0., 0., 0., 0.)
self._target_generator = {RCNNTargetGenerator(self.num_class,means_2nd,stds=(.1, .1, .2, .2))}
self._target_generator_2nd = {RCNNTargetGenerator(self.num_class, means_2nd, stds_2nd)}
self._target_generator_3rd = {RCNNTargetGenerator(self.num_class, means_2nd, stds_3rd)}
self._rpn_target_generator = set([RPNTargetGenerator(
num_sample=256, pos_iou_thresh=0.7,
neg_iou_thresh=0.3, pos_ratio=0.5,
stds=(1., 1., 1., 1.))])
with self.name_scope():
self.rpn = RPN(
channels=rpn_channel, stride=stride, base_size=base_size,
scales=scales, ratios=ratios, alloc_size=alloc_size,
clip=clip, nms_thresh=rpn_nms_thresh, train_pre_nms=rpn_train_pre_nms,
train_post_nms=rpn_train_post_nms, test_pre_nms=rpn_test_pre_nms,
test_post_nms=rpn_test_post_nms, min_size=rpn_min_size)
self.sampler = RCNNTargetSampler(
num_image=self._max_batch, num_proposal=rpn_train_post_nms,
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,pos_iou_thresh_hg=1, pos_ratio=pos_ratio)
self.sampler_2nd = RCNNTargetSampler(
num_image=self._max_batch, num_proposal=self._num_sample,
num_sample=self._num_sample, pos_iou_thresh=0.6,pos_iou_thresh_hg=0.95, pos_ratio=0.25)
self.sampler_3rd = RCNNTargetSampler(
num_image=self._max_batch, num_proposal=self._num_sample,
num_sample=self._num_sample, pos_iou_thresh=0.7,pos_iou_thresh_hg=0.95, pos_ratio=0.25)
self.box_decoder_2nd = NormalizedBoxCenterDecoder(stds=(.05, .05, .1, .1))
self.box_decoder_3rd = NormalizedBoxCenterDecoder(stds=(.033, .033, .067, .067))
@property
def target_generator(self):
"""Returns stored target generator
Returns
-------
mxnet.gluon.HybridBlock
The RCNN target generator
"""
return list(self._target_generator)[0]
@property
def target_generator_2nd(self):
return list(self._target_generator_2nd)[0]
@property
def target_generator_3rd(self):
return list(self._target_generator_3rd)[0]
@property
def rpn_target_generator(self):
return list(self._rpn_target_generator)[0]
def ROIExtraction(self, F, feature, bbox):
roi = self.add_batchid(F, bbox)
# ROI features
if self._roi_mode == 'pool':
pooled_feat = F.ROIPooling(feature, roi, self._roi_size, 1. / self._stride)
elif self._roi_mode == 'align':
pooled_feat = F.contrib.ROIAlign(feature, roi, self._roi_size, 1. / self._stride, sample_ratio=2)
else:
raise ValueError("Invalid roi mode: {}".format(self._roi_mode))
return pooled_feat
def add_batchid(self, F, bbox):
num_roi = self._num_sample if autograd.is_training() else self._rpn_test_post_nms
with autograd.pause():
roi_batchid = F.arange(0, self._max_batch, repeat=num_roi)
# remove batch dim because ROIPooling require 2d input
roi = F.concat(*[roi_batchid.reshape((-1, 1)), bbox.reshape((-1, 4))], dim=-1)
roi = F.stop_gradient(roi)
return roi
def decode_bbox(self, source_bbox, encoded_bbox, stds):
with autograd.pause():
box_decoder = NormalizedBoxCenterDecoder(stds=stds)
roi = box_decoder(encoded_bbox, self.box_to_center(source_bbox))
#roi = roi.reshape((1,-1, 4))
return roi
# pylint: disable=arguments-differ
def hybrid_forward(self, F, x, gt_box=None):
"""Forward Faster-RCNN network.
The behavior during traing and inference is different.
Parameters
----------
x : mxnet.nd.NDArray or mxnet.symbol
The network input tensor.
gt_box : type, only required during training
The ground-truth bbox tensor with shape (1, N, 4).
Returns
-------
(ids, scores, bboxes)
During inference, returns final class id, confidence scores, bounding
boxes.
"""
def _split(x, axis, num_outputs, squeeze_axis):
x = F.split(x, axis=axis, num_outputs=num_outputs, squeeze_axis=squeeze_axis)
if isinstance(x, list):
return x
else:
return [x]
feat = self.features(x)
# RPN proposals
if autograd.is_training():
rpn_score, rpn_box, raw_rpn_score, raw_rpn_box, anchors = self.rpn(feat, F.zeros_like(x))
# print(rpn_box.shape)
# rpn_index = F.Custom(rpn_box, op_type='clip_rpn_box')
# index = int(rpn_index.sum().asnumpy())
# rpn_box = rpn_box.slice_axis(axis=1,begin=0,end =index)
# #rpn_box = self.rpn_box_clip(rpn_box)
assert gt_box is not None
rpn_box, samples, matches = self.sampler(rpn_box, gt_box)
else:
_, rpn_box = self.rpn(feat, F.zeros_like(x))
# ROI features (ROI pooling or ROI Align)
num_roi = self._num_sample if autograd.is_training() else self._rpn_test_post_nms
pooled_feat = self.ROIExtraction(F=F, feature=feat, bbox=rpn_box)
top_feat = self.top_features(pooled_feat)
#top_feat = self.global_avg_pool(top_feat)
cls_pred = self.class_predictor(top_feat)
box_pred = self.box_predictor(top_feat)
# cls_pred (B * N, C) -> (B, N, C)
cls_pred = cls_pred.reshape((self._max_batch, num_roi, self.num_class + 1))
# box_pred (B * N, C * 4) -> (B, N, C, 4)
box_pred = box_pred.reshape((self._max_batch, num_roi, 1, 4))
# casscade rcnn
with autograd.pause():
roi_2nd = self.box_decoder(F.squeeze(box_pred.transpose((0, 2, 1, 3)), axis=1), self.box_to_center(rpn_box))
#roi_2nd = self.decode_bbox(source_bbox=rpn_box, \
# encoded_bbox=F.squeeze(box_pred.transpose((0, 2, 1, 3)), axis=1), stds=(.1, .1, .2, .2))
# roi_2nd_score =
if autograd.is_training():
roi_2nd, samples_2nd, matches_2nd = self.sampler_2nd(roi_2nd, gt_box)
pooled_feat_2nd = self.ROIExtraction(F=F, feature=feat, bbox=roi_2nd)
top_feat_2nd = self.top_features_2nd(pooled_feat_2nd)
cls_pred_2nd = self.class_predictor_2nd(top_feat_2nd)
box_pred_2nd = self.box_predictor_2nd(top_feat_2nd)
# cls_pred (B * N, C) -> (B, N, C)
cls_pred_2nd = cls_pred_2nd.reshape((self._max_batch, num_roi, self.num_class + 1))
# box_pred (B * N, C * 4) -> (B, N, C, 4)
box_pred_2nd = box_pred_2nd.reshape((self._max_batch, num_roi, 1, 4))
# decode rcnn box
with autograd.pause():
roi_3rd = self.box_decoder_2nd(F.squeeze(box_pred_2nd.transpose((0, 2, 1, 3)), axis=1), self.box_to_center(roi_2nd))
#roi_3rd = self.decode_bbox(source_bbox=roi_2nd, \
#encoded_bbox=F.squeeze(box_pred_2nd.transpose((0, 2, 1, 3)), axis=1), stds=(.05, .05, .1, .1))
if autograd.is_training():
roi_3rd, samples_3rd, matches_3rd = self.sampler_3rd(roi_3rd, gt_box)
pooled_feat_3rd = self.ROIExtraction(F=F, feature=feat, bbox=roi_3rd)
top_feat_3rd = self.top_features_3rd(pooled_feat_3rd)
cls_pred_3rd = self.class_predictor_3rd(top_feat_3rd)
box_pred_3rd = self.box_predictor_3rd(top_feat_3rd)
# cls_pred (B * N, C) -> (B, N, C)
cls_pred_3rd = cls_pred_3rd.reshape((self._max_batch, num_roi, self.num_class + 1))
# box_pred (B * N, C * 4) -> (B, N, C, 4)
box_pred_3rd = box_pred_3rd.reshape((self._max_batch, num_roi, 1, 4))
# no need to convert bounding boxes in training, just return
if autograd.is_training():
rpn_result = raw_rpn_score, raw_rpn_box, anchors
cascade_rcnn_result = [ [cls_pred, box_pred, rpn_box, samples, matches ],
[cls_pred_2nd, box_pred_2nd, roi_2nd, samples_2nd, matches_2nd],
[cls_pred_3rd, box_pred_3rd, roi_3rd, samples_3rd, matches_3rd ] ]
return rpn_result, cascade_rcnn_result
# cls_ids (B, N, C), scores (B, N, C)
cls_prob_3rd = F.softmax(cls_pred_3rd, axis=-1)
cls_prob_2nd = F.softmax(cls_pred_2nd, axis=-1)
cls_prob_1st = F.softmax(cls_pred, axis=-1)
cls_prob_3rd_avg = F.ElementWiseSum(cls_prob_3rd,cls_prob_2nd,cls_prob_1st)
cls_ids, scores = self.cls_decoder(cls_prob_3rd_avg )
# cls_ids, scores (B, N, C) -> (B, C, N) -> (B, C, N, 1)
cls_ids = cls_ids.transpose((0, 2, 1)).reshape((0, 0, 0, 1))
scores = scores.transpose((0, 2, 1)).reshape((0, 0, 0, 1))
# box_pred (B, N, C, 4) -> (B, C, N, 4)
box_pred = box_pred_3rd.transpose((0, 2, 1, 3))
# rpn_boxes (B, N, 4) -> B * (1, N, 4)
rpn_boxes = _split(roi_3rd, axis=0, num_outputs=self._max_batch, squeeze_axis=False)
# cls_ids, scores (B, C, N, 1) -> B * (C, N, 1)
cls_ids = _split(cls_ids, axis=0, num_outputs=self._max_batch, squeeze_axis=True)
scores = _split(scores, axis=0, num_outputs=self._max_batch, squeeze_axis=True)
# box_preds (B, C, N, 4) -> B * (C, N, 4)
box_preds = _split(box_pred, axis=0, num_outputs=self._max_batch, squeeze_axis=True)
# per batch predict, nms, each class has topk outputs
results = []
for rpn_box, cls_id, score, box_pred in zip(rpn_boxes, cls_ids, scores, box_preds):
# box_pred (C, N, 4) rpn_box (1, N, 4) -> bbox (C, N, 4)
bbox = self.box_decoder_3rd(box_pred, self.box_to_center(rpn_box))
bbox = F.repeat(bbox, repeats=self.num_class, axis=0)
# res (C, N, 6)
#print("cls_id:{} score:{} box:{}".format(cls_id.shape,score.shape,bbox.shape))
res = F.concat(*[cls_id, score, bbox], dim=-1)
# res (C, self.nms_topk, 6)
res = F.contrib.box_nms(
res, overlap_thresh=self.nms_thresh, topk=self.nms_topk, valid_thresh=0.0001,
id_index=0, score_index=1, coord_start=2, force_suppress=True)
# res (C * self.nms_topk, 6)
res = res.reshape((-3, 0))
results.append(res)
# result B * (C * topk, 6) -> (B, C * topk, 6)
result = F.stack(*results, axis=0)
ids = F.slice_axis(result, axis=-1, begin=0, end=1)
scores = F.slice_axis(result, axis=-1, begin=1, end=2)
bboxes = F.slice_axis(result, axis=-1, begin=2, end=6)
return ids, scores, bboxes
def get_cascade_rcnn(name, dataset, pretrained=False, ctx=mx.cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
r"""Utility function to return faster rcnn networks.
Parameters
----------
name : str
Model name.
dataset : str
The name of dataset.
pretrained : bool, optional, default is False
Load pretrained weights.
ctx : mxnet.Context
Context such as mx.cpu(), mx.gpu(0).
root : str
Model weights storing path.
Returns
-------
mxnet.gluon.HybridBlock
The Faster-RCNN network.
"""
net = CascadeRCNN(**kwargs)
if pretrained:
from ..model_store import get_model_file
full_name = '_'.join(('cascade_rcnn', name, dataset))
net.load_parameters(get_model_file(full_name, root=root), ctx=ctx)
return net
def cascade_rcnn_vgg16_voc(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"<NAME>., <NAME>., <NAME>., & <NAME>. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool, optional, default is False
Load pretrained weights.
pretrained_base : bool, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_cascade_rcnn_vgg16_voc(pretrained=True)
>>> print(model)
"""
from ...data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = mx.gluon.model_zoo.vision.get_model('vgg16', pretrained=pretrained_base)
features = base_network.features[:30]
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
top_features.add(base_network.features[31])
top_features.add(base_network.features[33])
top_features_2nd.add(base_network.features[31])
top_features_2nd.add(base_network.features[33])
top_features_3rd.add(base_network.features[31])
top_features_3rd.add(base_network.features[33])
#print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn','.*vgg0_conv(4|5|6|7|8|9|10|11|12)'])
return get_cascade_rcnn(
name='vgg16', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(7, 7), stride=16, clip=None,
rpn_channel=512, base_size=16, scales=(8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=20000, rpn_train_post_nms=2000,
rpn_test_pre_nms=5000, rpn_test_post_nms=300, rpn_min_size=5,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs)
def cascade_rcnn_vgg16_pruned_coco(pretrained=False, pretrained_base=True, **kwargs):
from .vgg16_pruned import vgg16_pruned
from ...data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = vgg16_pruned(pretrained=pretrained_base)
features = base_network.features[:30]
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
top_features.add(base_network.features[31])
top_features.add(base_network.features[33])
top_features_2nd.add(base_network.features[31])
top_features_2nd.add(base_network.features[33])
top_features_3rd.add(base_network.features[31])
top_features_3rd.add(base_network.features[33])
#print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn','.*vgg0_conv(4|5|6|7|8|9|10|11|12)'])
return get_cascade_rcnn(
name='vgg16_pruned', dataset='coco', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=800, max_size=1333, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1,
roi_mode='pspool', roi_size=(7, 7), stride=16, clip=4.42,
rpn_channel=512, base_size=16, scales=(4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=0,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs)
def cascade_rcnn_vgg16_pruned_voc(pretrained=False, pretrained_base=True, **kwargs):
from .vgg16_pruned import vgg16_pruned
from ...data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = vgg16_pruned(pretrained=pretrained_base)
features = base_network.features[:30]
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
top_features.add(base_network.features[31])
top_features.add(base_network.features[33])
top_features_2nd.add(base_network.features[31])
top_features_2nd.add(base_network.features[33])
top_features_3rd.add(base_network.features[31])
top_features_3rd.add(base_network.features[33])
#print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn','.*vgg0_conv(4|5|6|7|8|9|10|11|12)'])
return get_cascade_rcnn(
name='vgg16_pruned', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(7, 7), stride=16, clip=None,
rpn_channel=512, base_size=16, scales=(8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=20000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=5,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs)
def cascade_rcnn_resnet50_v1b_voc(pretrained=False, pretrained_base=True, **kwargs):
from ..resnetv1b import resnet50_v1b
from ...data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False, use_global_stats=True)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
top_features_2nd.add(getattr(base_network, layer))
top_features_3rd.add(getattr(base_network, layer))
print("~~~~~~~features~~~~~~~")
print(features)
print("~~~~~~~top_features~~~~~~~")
print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_cascade_rcnn(
name='resnet50_v1b', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), stride=16, clip=None,
rpn_channel=512, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=10000, rpn_train_post_nms=1000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=192, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs) | gluoncv/model_zoo/cascade_rcnn/cascade_rcnn.py | from __future__ import absolute_import
import os
import mxnet as mx
from mxnet import autograd
from mxnet.gluon import nn
from .rcnn_target import RCNNTargetSampler, RCNNTargetGenerator
from ..rcnn import RCNN2
from ..rpn import RPN
from ...nn.coder import NormalizedBoxCenterDecoder, MultiPerClassDecoder
from easydict import EasyDict as edict
from ..rpn import RPNTargetGenerator
__all__ = ['CascadeRCNN', 'get_cascade_rcnn',
'cascade_rcnn_resnet50_v1b_voc',
'cascade_rcnn_vgg16_voc',
'cascade_rcnn_vgg16_pruned_voc',
'cascade_rcnn_vgg16_pruned_coco']
class CascadeRCNN(RCNN2):
r"""Faster RCNN network.
Parameters
----------
features : gluon.HybridBlock
Base feature extractor before feature pooling layer.
top_features : gluon.HybridBlock
Tail feature extractor after feature pooling layer.
train_patterns : str
Matching pattern for trainable parameters.
scales : iterable of float
The areas of anchor boxes.
We use the following form to compute the shapes of anchors:
.. math::
width_{anchor} = size_{base} \times scale \times \sqrt{ 1 / ratio}
height_{anchor} = size_{base} \times scale \times \sqrt{ratio}
ratios : iterable of float
The aspect ratios of anchor boxes. We expect it to be a list or tuple.
classes : iterable of str
Names of categories, its length is ``num_class``.
roi_mode : str
ROI pooling mode. Currently support 'pool' and 'align'.
roi_size : tuple of int, length 2
(height, width) of the ROI region.
stride : int, default is 16
Feature map stride with respect to original image.
This is usually the ratio between original image size and feature map size.
rpn_channel : int, default is 1024
Channel number used in RPN convolutional layers.
nms_thresh : float, default is 0.3.
Non-maximum suppression threshold. You can speficy < 0 or > 1 to disable NMS.
nms_topk : int, default is 400
Apply NMS to top k detection results, use -1 to disable so that every Detection
result is used in NMS.
num_sample : int, default is 128
Number of samples for RCNN targets.
pos_iou_thresh : float, default is 0.5
Proposal whose IOU larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh_high : float, default is 0.5
Proposal whose IOU smaller than ``neg_iou_thresh_high``
and larger than ``neg_iou_thresh_low`` is regarded as negative samples.
Proposals with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
neg_iou_thresh_low : float, default is 0.0
See ``neg_iou_thresh_high``.
pos_ratio : float, default is 0.25
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, features, top_features, top_features_2nd,
top_features_3rd, classes,
short=600, max_size=1000, train_patterns=None,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), stride=16, clip=None,
rpn_channel=1024, base_size=16, scales=(0.5, 1, 2),
ratios=(8, 16, 32), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
additional_output=False, **kwargs):
super(CascadeRCNN, self).__init__(
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=short, max_size=max_size, train_patterns=train_patterns,
nms_thresh=nms_thresh, nms_topk=nms_topk, post_nms=post_nms,
roi_mode=roi_mode, roi_size=roi_size, stride=stride, clip=clip, **kwargs)
self._max_batch = 1 # currently only support batch size = 1
self._num_sample = num_sample
self._rpn_test_post_nms = rpn_test_post_nms
self._classes = classes
stds_2nd = (.05, .05, .1, .1)
stds_3rd = (.033, .033, .067, .067)
means_2nd= (0., 0., 0., 0.)
self._target_generator = {RCNNTargetGenerator(self.num_class,means_2nd,stds=(.1, .1, .2, .2))}
self._target_generator_2nd = {RCNNTargetGenerator(self.num_class, means_2nd, stds_2nd)}
self._target_generator_3rd = {RCNNTargetGenerator(self.num_class, means_2nd, stds_3rd)}
self._rpn_target_generator = set([RPNTargetGenerator(
num_sample=256, pos_iou_thresh=0.7,
neg_iou_thresh=0.3, pos_ratio=0.5,
stds=(1., 1., 1., 1.))])
with self.name_scope():
self.rpn = RPN(
channels=rpn_channel, stride=stride, base_size=base_size,
scales=scales, ratios=ratios, alloc_size=alloc_size,
clip=clip, nms_thresh=rpn_nms_thresh, train_pre_nms=rpn_train_pre_nms,
train_post_nms=rpn_train_post_nms, test_pre_nms=rpn_test_pre_nms,
test_post_nms=rpn_test_post_nms, min_size=rpn_min_size)
self.sampler = RCNNTargetSampler(
num_image=self._max_batch, num_proposal=rpn_train_post_nms,
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,pos_iou_thresh_hg=1, pos_ratio=pos_ratio)
self.sampler_2nd = RCNNTargetSampler(
num_image=self._max_batch, num_proposal=self._num_sample,
num_sample=self._num_sample, pos_iou_thresh=0.6,pos_iou_thresh_hg=0.95, pos_ratio=0.25)
self.sampler_3rd = RCNNTargetSampler(
num_image=self._max_batch, num_proposal=self._num_sample,
num_sample=self._num_sample, pos_iou_thresh=0.7,pos_iou_thresh_hg=0.95, pos_ratio=0.25)
self.box_decoder_2nd = NormalizedBoxCenterDecoder(stds=(.05, .05, .1, .1))
self.box_decoder_3rd = NormalizedBoxCenterDecoder(stds=(.033, .033, .067, .067))
@property
def target_generator(self):
"""Returns stored target generator
Returns
-------
mxnet.gluon.HybridBlock
The RCNN target generator
"""
return list(self._target_generator)[0]
@property
def target_generator_2nd(self):
return list(self._target_generator_2nd)[0]
@property
def target_generator_3rd(self):
return list(self._target_generator_3rd)[0]
@property
def rpn_target_generator(self):
return list(self._rpn_target_generator)[0]
def ROIExtraction(self, F, feature, bbox):
roi = self.add_batchid(F, bbox)
# ROI features
if self._roi_mode == 'pool':
pooled_feat = F.ROIPooling(feature, roi, self._roi_size, 1. / self._stride)
elif self._roi_mode == 'align':
pooled_feat = F.contrib.ROIAlign(feature, roi, self._roi_size, 1. / self._stride, sample_ratio=2)
else:
raise ValueError("Invalid roi mode: {}".format(self._roi_mode))
return pooled_feat
def add_batchid(self, F, bbox):
num_roi = self._num_sample if autograd.is_training() else self._rpn_test_post_nms
with autograd.pause():
roi_batchid = F.arange(0, self._max_batch, repeat=num_roi)
# remove batch dim because ROIPooling require 2d input
roi = F.concat(*[roi_batchid.reshape((-1, 1)), bbox.reshape((-1, 4))], dim=-1)
roi = F.stop_gradient(roi)
return roi
def decode_bbox(self, source_bbox, encoded_bbox, stds):
with autograd.pause():
box_decoder = NormalizedBoxCenterDecoder(stds=stds)
roi = box_decoder(encoded_bbox, self.box_to_center(source_bbox))
#roi = roi.reshape((1,-1, 4))
return roi
# pylint: disable=arguments-differ
def hybrid_forward(self, F, x, gt_box=None):
"""Forward Faster-RCNN network.
The behavior during traing and inference is different.
Parameters
----------
x : mxnet.nd.NDArray or mxnet.symbol
The network input tensor.
gt_box : type, only required during training
The ground-truth bbox tensor with shape (1, N, 4).
Returns
-------
(ids, scores, bboxes)
During inference, returns final class id, confidence scores, bounding
boxes.
"""
def _split(x, axis, num_outputs, squeeze_axis):
x = F.split(x, axis=axis, num_outputs=num_outputs, squeeze_axis=squeeze_axis)
if isinstance(x, list):
return x
else:
return [x]
feat = self.features(x)
# RPN proposals
if autograd.is_training():
rpn_score, rpn_box, raw_rpn_score, raw_rpn_box, anchors = self.rpn(feat, F.zeros_like(x))
# print(rpn_box.shape)
# rpn_index = F.Custom(rpn_box, op_type='clip_rpn_box')
# index = int(rpn_index.sum().asnumpy())
# rpn_box = rpn_box.slice_axis(axis=1,begin=0,end =index)
# #rpn_box = self.rpn_box_clip(rpn_box)
assert gt_box is not None
rpn_box, samples, matches = self.sampler(rpn_box, gt_box)
else:
_, rpn_box = self.rpn(feat, F.zeros_like(x))
# ROI features (ROI pooling or ROI Align)
num_roi = self._num_sample if autograd.is_training() else self._rpn_test_post_nms
pooled_feat = self.ROIExtraction(F=F, feature=feat, bbox=rpn_box)
top_feat = self.top_features(pooled_feat)
#top_feat = self.global_avg_pool(top_feat)
cls_pred = self.class_predictor(top_feat)
box_pred = self.box_predictor(top_feat)
# cls_pred (B * N, C) -> (B, N, C)
cls_pred = cls_pred.reshape((self._max_batch, num_roi, self.num_class + 1))
# box_pred (B * N, C * 4) -> (B, N, C, 4)
box_pred = box_pred.reshape((self._max_batch, num_roi, 1, 4))
# casscade rcnn
with autograd.pause():
roi_2nd = self.box_decoder(F.squeeze(box_pred.transpose((0, 2, 1, 3)), axis=1), self.box_to_center(rpn_box))
#roi_2nd = self.decode_bbox(source_bbox=rpn_box, \
# encoded_bbox=F.squeeze(box_pred.transpose((0, 2, 1, 3)), axis=1), stds=(.1, .1, .2, .2))
# roi_2nd_score =
if autograd.is_training():
roi_2nd, samples_2nd, matches_2nd = self.sampler_2nd(roi_2nd, gt_box)
pooled_feat_2nd = self.ROIExtraction(F=F, feature=feat, bbox=roi_2nd)
top_feat_2nd = self.top_features_2nd(pooled_feat_2nd)
cls_pred_2nd = self.class_predictor_2nd(top_feat_2nd)
box_pred_2nd = self.box_predictor_2nd(top_feat_2nd)
# cls_pred (B * N, C) -> (B, N, C)
cls_pred_2nd = cls_pred_2nd.reshape((self._max_batch, num_roi, self.num_class + 1))
# box_pred (B * N, C * 4) -> (B, N, C, 4)
box_pred_2nd = box_pred_2nd.reshape((self._max_batch, num_roi, 1, 4))
# decode rcnn box
with autograd.pause():
roi_3rd = self.box_decoder_2nd(F.squeeze(box_pred_2nd.transpose((0, 2, 1, 3)), axis=1), self.box_to_center(roi_2nd))
#roi_3rd = self.decode_bbox(source_bbox=roi_2nd, \
#encoded_bbox=F.squeeze(box_pred_2nd.transpose((0, 2, 1, 3)), axis=1), stds=(.05, .05, .1, .1))
if autograd.is_training():
roi_3rd, samples_3rd, matches_3rd = self.sampler_3rd(roi_3rd, gt_box)
pooled_feat_3rd = self.ROIExtraction(F=F, feature=feat, bbox=roi_3rd)
top_feat_3rd = self.top_features_3rd(pooled_feat_3rd)
cls_pred_3rd = self.class_predictor_3rd(top_feat_3rd)
box_pred_3rd = self.box_predictor_3rd(top_feat_3rd)
# cls_pred (B * N, C) -> (B, N, C)
cls_pred_3rd = cls_pred_3rd.reshape((self._max_batch, num_roi, self.num_class + 1))
# box_pred (B * N, C * 4) -> (B, N, C, 4)
box_pred_3rd = box_pred_3rd.reshape((self._max_batch, num_roi, 1, 4))
# no need to convert bounding boxes in training, just return
if autograd.is_training():
rpn_result = raw_rpn_score, raw_rpn_box, anchors
cascade_rcnn_result = [ [cls_pred, box_pred, rpn_box, samples, matches ],
[cls_pred_2nd, box_pred_2nd, roi_2nd, samples_2nd, matches_2nd],
[cls_pred_3rd, box_pred_3rd, roi_3rd, samples_3rd, matches_3rd ] ]
return rpn_result, cascade_rcnn_result
# cls_ids (B, N, C), scores (B, N, C)
cls_prob_3rd = F.softmax(cls_pred_3rd, axis=-1)
cls_prob_2nd = F.softmax(cls_pred_2nd, axis=-1)
cls_prob_1st = F.softmax(cls_pred, axis=-1)
cls_prob_3rd_avg = F.ElementWiseSum(cls_prob_3rd,cls_prob_2nd,cls_prob_1st)
cls_ids, scores = self.cls_decoder(cls_prob_3rd_avg )
# cls_ids, scores (B, N, C) -> (B, C, N) -> (B, C, N, 1)
cls_ids = cls_ids.transpose((0, 2, 1)).reshape((0, 0, 0, 1))
scores = scores.transpose((0, 2, 1)).reshape((0, 0, 0, 1))
# box_pred (B, N, C, 4) -> (B, C, N, 4)
box_pred = box_pred_3rd.transpose((0, 2, 1, 3))
# rpn_boxes (B, N, 4) -> B * (1, N, 4)
rpn_boxes = _split(roi_3rd, axis=0, num_outputs=self._max_batch, squeeze_axis=False)
# cls_ids, scores (B, C, N, 1) -> B * (C, N, 1)
cls_ids = _split(cls_ids, axis=0, num_outputs=self._max_batch, squeeze_axis=True)
scores = _split(scores, axis=0, num_outputs=self._max_batch, squeeze_axis=True)
# box_preds (B, C, N, 4) -> B * (C, N, 4)
box_preds = _split(box_pred, axis=0, num_outputs=self._max_batch, squeeze_axis=True)
# per batch predict, nms, each class has topk outputs
results = []
for rpn_box, cls_id, score, box_pred in zip(rpn_boxes, cls_ids, scores, box_preds):
# box_pred (C, N, 4) rpn_box (1, N, 4) -> bbox (C, N, 4)
bbox = self.box_decoder_3rd(box_pred, self.box_to_center(rpn_box))
bbox = F.repeat(bbox, repeats=self.num_class, axis=0)
# res (C, N, 6)
#print("cls_id:{} score:{} box:{}".format(cls_id.shape,score.shape,bbox.shape))
res = F.concat(*[cls_id, score, bbox], dim=-1)
# res (C, self.nms_topk, 6)
res = F.contrib.box_nms(
res, overlap_thresh=self.nms_thresh, topk=self.nms_topk, valid_thresh=0.0001,
id_index=0, score_index=1, coord_start=2, force_suppress=True)
# res (C * self.nms_topk, 6)
res = res.reshape((-3, 0))
results.append(res)
# result B * (C * topk, 6) -> (B, C * topk, 6)
result = F.stack(*results, axis=0)
ids = F.slice_axis(result, axis=-1, begin=0, end=1)
scores = F.slice_axis(result, axis=-1, begin=1, end=2)
bboxes = F.slice_axis(result, axis=-1, begin=2, end=6)
return ids, scores, bboxes
def get_cascade_rcnn(name, dataset, pretrained=False, ctx=mx.cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
r"""Utility function to return faster rcnn networks.
Parameters
----------
name : str
Model name.
dataset : str
The name of dataset.
pretrained : bool, optional, default is False
Load pretrained weights.
ctx : mxnet.Context
Context such as mx.cpu(), mx.gpu(0).
root : str
Model weights storing path.
Returns
-------
mxnet.gluon.HybridBlock
The Faster-RCNN network.
"""
net = CascadeRCNN(**kwargs)
if pretrained:
from ..model_store import get_model_file
full_name = '_'.join(('cascade_rcnn', name, dataset))
net.load_parameters(get_model_file(full_name, root=root), ctx=ctx)
return net
def cascade_rcnn_vgg16_voc(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"<NAME>., <NAME>., <NAME>., & <NAME>. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool, optional, default is False
Load pretrained weights.
pretrained_base : bool, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_cascade_rcnn_vgg16_voc(pretrained=True)
>>> print(model)
"""
from ...data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = mx.gluon.model_zoo.vision.get_model('vgg16', pretrained=pretrained_base)
features = base_network.features[:30]
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
top_features.add(base_network.features[31])
top_features.add(base_network.features[33])
top_features_2nd.add(base_network.features[31])
top_features_2nd.add(base_network.features[33])
top_features_3rd.add(base_network.features[31])
top_features_3rd.add(base_network.features[33])
#print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn','.*vgg0_conv(4|5|6|7|8|9|10|11|12)'])
return get_cascade_rcnn(
name='vgg16', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(7, 7), stride=16, clip=None,
rpn_channel=512, base_size=16, scales=(8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=20000, rpn_train_post_nms=2000,
rpn_test_pre_nms=5000, rpn_test_post_nms=300, rpn_min_size=5,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs)
def cascade_rcnn_vgg16_pruned_coco(pretrained=False, pretrained_base=True, **kwargs):
from .vgg16_pruned import vgg16_pruned
from ...data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = vgg16_pruned(pretrained=pretrained_base)
features = base_network.features[:30]
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
top_features.add(base_network.features[31])
top_features.add(base_network.features[33])
top_features_2nd.add(base_network.features[31])
top_features_2nd.add(base_network.features[33])
top_features_3rd.add(base_network.features[31])
top_features_3rd.add(base_network.features[33])
#print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn','.*vgg0_conv(4|5|6|7|8|9|10|11|12)'])
return get_cascade_rcnn(
name='vgg16_pruned', dataset='coco', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=800, max_size=1333, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1,
roi_mode='pspool', roi_size=(7, 7), stride=16, clip=4.42,
rpn_channel=512, base_size=16, scales=(4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=0,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs)
def cascade_rcnn_vgg16_pruned_voc(pretrained=False, pretrained_base=True, **kwargs):
from .vgg16_pruned import vgg16_pruned
from ...data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = vgg16_pruned(pretrained=pretrained_base)
features = base_network.features[:30]
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
top_features.add(base_network.features[31])
top_features.add(base_network.features[33])
top_features_2nd.add(base_network.features[31])
top_features_2nd.add(base_network.features[33])
top_features_3rd.add(base_network.features[31])
top_features_3rd.add(base_network.features[33])
#print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn','.*vgg0_conv(4|5|6|7|8|9|10|11|12)'])
return get_cascade_rcnn(
name='vgg16_pruned', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(7, 7), stride=16, clip=None,
rpn_channel=512, base_size=16, scales=(8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=20000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=5,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs)
def cascade_rcnn_resnet50_v1b_voc(pretrained=False, pretrained_base=True, **kwargs):
from ..resnetv1b import resnet50_v1b
from ...data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False, use_global_stats=True)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
top_features_2nd = nn.HybridSequential()
top_features_3rd = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
top_features_2nd.add(getattr(base_network, layer))
top_features_3rd.add(getattr(base_network, layer))
print("~~~~~~~features~~~~~~~")
print(features)
print("~~~~~~~top_features~~~~~~~")
print(top_features)
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_cascade_rcnn(
name='resnet50_v1b', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features,
top_features_2nd=top_features_2nd, top_features_3rd=top_features_3rd,
classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), stride=16, clip=None,
rpn_channel=512, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=10000, rpn_train_post_nms=1000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=192, pos_iou_thresh=0.5, pos_ratio=0.25,
**kwargs) | 0.815894 | 0.332148 |
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import framework
from pxr import Usd, UsdGeom, UsdShade, Vt
stage = framework.createWorkStage("fort.usda")
framework.appendLayer(stage, "more_materials.usda")
doorPrim = stage.GetPrimAtPath("/Meshes/Door/Cube_002")
leftTowerPrim = stage.GetPrimAtPath("/Meshes/LeftTower/Cylinder")
leftCanopyPrim = stage.GetPrimAtPath("/Meshes/LeftCanopy/Cone")
mainPrim = stage.GetPrimAtPath("/Meshes/Main/Cube")
outcroppingPrim = stage.GetPrimAtPath("/Meshes/Outcropping/Cube_001")
rightTowerPrim = stage.GetPrimAtPath("/Meshes/RightTower/Cylinder_001")
rightCanopyPrim = stage.GetPrimAtPath("/Meshes/RightCanopy/Cone_001")
blueMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Blue"))
redMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Red"))
greenMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Green"))
yellowMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Yellow"))
rootPrim = stage.GetPrimAtPath("/Meshes")
collections = [
Usd.CollectionAPI.ApplyCollection(rootPrim, "left"),
Usd.CollectionAPI.ApplyCollection(rootPrim, "right"),
Usd.CollectionAPI.ApplyCollection(rootPrim, "centre"),
Usd.CollectionAPI.ApplyCollection(rootPrim, "misc")
]
collections[0].IncludePath("/Meshes/LeftTower/Cylinder")
collections[0].IncludePath("/Meshes/LeftCanopy/Cone")
collections[1].IncludePath("/Meshes/RightTower/Cylinder_001")
collections[1].IncludePath("/Meshes/RightCanopy/Cone_001")
collections[2].IncludePath("/Meshes/Main/Cube")
collections[2].IncludePath("/Meshes/Outcropping/Cube_001")
collections[3].IncludePath("/Meshes/Door/Cube_002")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[0], blueMaterial, "left")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[1], redMaterial, "right")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[2], greenMaterial, "centre")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[3], yellowMaterial, "misc")
framework.viewUsdStage(stage)
framework.printWorkStage(stage) | prototypes/fort_collections.py | import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import framework
from pxr import Usd, UsdGeom, UsdShade, Vt
stage = framework.createWorkStage("fort.usda")
framework.appendLayer(stage, "more_materials.usda")
doorPrim = stage.GetPrimAtPath("/Meshes/Door/Cube_002")
leftTowerPrim = stage.GetPrimAtPath("/Meshes/LeftTower/Cylinder")
leftCanopyPrim = stage.GetPrimAtPath("/Meshes/LeftCanopy/Cone")
mainPrim = stage.GetPrimAtPath("/Meshes/Main/Cube")
outcroppingPrim = stage.GetPrimAtPath("/Meshes/Outcropping/Cube_001")
rightTowerPrim = stage.GetPrimAtPath("/Meshes/RightTower/Cylinder_001")
rightCanopyPrim = stage.GetPrimAtPath("/Meshes/RightCanopy/Cone_001")
blueMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Blue"))
redMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Red"))
greenMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Green"))
yellowMaterial = UsdShade.Material(stage.GetPrimAtPath("/Looks/Yellow"))
rootPrim = stage.GetPrimAtPath("/Meshes")
collections = [
Usd.CollectionAPI.ApplyCollection(rootPrim, "left"),
Usd.CollectionAPI.ApplyCollection(rootPrim, "right"),
Usd.CollectionAPI.ApplyCollection(rootPrim, "centre"),
Usd.CollectionAPI.ApplyCollection(rootPrim, "misc")
]
collections[0].IncludePath("/Meshes/LeftTower/Cylinder")
collections[0].IncludePath("/Meshes/LeftCanopy/Cone")
collections[1].IncludePath("/Meshes/RightTower/Cylinder_001")
collections[1].IncludePath("/Meshes/RightCanopy/Cone_001")
collections[2].IncludePath("/Meshes/Main/Cube")
collections[2].IncludePath("/Meshes/Outcropping/Cube_001")
collections[3].IncludePath("/Meshes/Door/Cube_002")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[0], blueMaterial, "left")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[1], redMaterial, "right")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[2], greenMaterial, "centre")
UsdShade.MaterialBindingAPI(rootPrim).Bind(collections[3], yellowMaterial, "misc")
framework.viewUsdStage(stage)
framework.printWorkStage(stage) | 0.309754 | 0.117218 |
from __future__ import unicode_literals
import frappe
from six import string_types
import frappe.share
from frappe import _
from frappe.utils import cstr, now_datetime, cint, flt, get_time, get_datetime, get_link_to_form, date_diff, nowdate
from ifitwala_ed.controllers.status_updater import StatusUpdater
class UOMMustBeIntegerError(frappe.ValidationError): pass
class TransactionBase(StatusUpdater):
def validate_posting_time(self):
# set Edit Posting Date and Time to 1 while data import
if frappe.flags.in_import and self.posting_date:
self.set_posting_time = 1
if not getattr(self, 'set_posting_time', None):
now = now_datetime()
self.posting_date = now.strftime('%Y-%m-%d')
self.posting_time = now.strftime('%H:%M:%S.%f')
elif self.posting_time:
try:
get_time(self.posting_time)
except ValueError:
frappe.throw(_('Invalid Posting Time'))
def validate_uom_is_integer(self, uom_field, qty_fields):
validate_uom_is_integer(self, uom_field, qty_fields)
def validate_with_previous_doc(self, ref):
self.exclude_fields = ["conversion_factor", "uom"] if self.get('is_return') else []
for key, val in ref.items():
is_child = val.get("is_child_table")
ref_doc = {}
item_ref_dn = []
for d in self.get_all_children(self.doctype + " Item"):
ref_dn = d.get(val["ref_dn_field"])
if ref_dn:
if is_child:
self.compare_values({key: [ref_dn]}, val["compare_fields"], d)
if ref_dn not in item_ref_dn:
item_ref_dn.append(ref_dn)
elif not val.get("allow_duplicate_prev_row_id"):
frappe.throw(_("Duplicate row {0} with same {1}").format(d.idx, key))
elif ref_dn:
ref_doc.setdefault(key, [])
if ref_dn not in ref_doc[key]:
ref_doc[key].append(ref_dn)
if ref_doc:
self.compare_values(ref_doc, val["compare_fields"])
def compare_values(self, ref_doc, fields, doc=None):
for reference_doctype, ref_dn_list in ref_doc.items():
for reference_name in ref_dn_list:
prevdoc_values = frappe.db.get_value(reference_doctype, reference_name,
[d[0] for d in fields], as_dict=1)
if not prevdoc_values:
frappe.throw(_("Invalid reference {0} {1}").format(reference_doctype, reference_name))
for field, condition in fields:
if prevdoc_values[field] is not None and field not in self.exclude_fields:
self.validate_value(field, condition, prevdoc_values[field], doc)
def validate_rate_with_reference_doc(self, ref_details):
buying_doctypes = ["Purchase Order", "Purchase Invoice", "Purchase Receipt"]
if self.doctype in buying_doctypes:
action = frappe.db.get_single_value("Buying Settings", "maintain_same_rate_action")
settings_doc = "Buying Settings"
else:
action = frappe.db.get_single_value("Selling Settings", "maintain_same_rate_action")
settings_doc = "Selling Settings"
for ref_dt, ref_dn_field, ref_link_field in ref_details:
for d in self.get("items"):
if d.get(ref_link_field):
ref_rate = frappe.db.get_value(ref_dt + " Item", d.get(ref_link_field), "rate")
if abs(flt(d.rate - ref_rate, d.precision("rate"))) >= .01:
if action == "Stop":
role_allowed_to_override = frappe.db.get_single_value(settings_doc, 'role_to_override_stop_action')
if role_allowed_to_override not in frappe.get_roles():
frappe.throw(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4})").format(
d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate))
else:
frappe.msgprint(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4})").format(
d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate), title=_("Warning"), indicator="orange")
def get_link_filters(self, for_doctype):
if hasattr(self, "prev_link_mapper") and self.prev_link_mapper.get(for_doctype):
fieldname = self.prev_link_mapper[for_doctype]["fieldname"]
values = filter(None, tuple(item.as_dict()[fieldname] for item in self.items))
if values:
ret = {
for_doctype : {
"filters": [[for_doctype, "name", "in", values]]
}
}
else:
ret = None
else:
ret = None
return ret
def delete_events(ref_type, ref_name):
events = frappe.db.sql_list(""" SELECT DISTINCT `tabEvent`.name
FROM `tabEvent`, `tabEvent Participants`
WHERE
`tabEvent`.name = `tabEvent Participants`.parent
and `tabEvent Participants`.reference_doctype = %s
and `tabEvent Participants`.reference_docname = %s
""", (ref_type, ref_name)) or []
if events:
frappe.delete_doc("Event", events, for_reload=True)
def validate_uom_is_integer(doc, uom_field, qty_fields, child_dt=None):
if isinstance(qty_fields, string_types):
qty_fields = [qty_fields]
distinct_uoms = list(set(d.get(uom_field) for d in doc.get_all_children()))
integer_uoms = list(filter(lambda uom: frappe.db.get_value("UOM", uom,
"must_be_whole_number", cache=True) or None, distinct_uoms))
if not integer_uoms:
return
for d in doc.get_all_children(parenttype=child_dt):
if d.get(uom_field) in integer_uoms:
for f in qty_fields:
qty = d.get(f)
if qty:
if abs(cint(qty) - flt(qty)) > 0.0000001:
frappe.throw(_("Row {1}: Quantity ({0}) cannot be a fraction. To allow this, disable '{2}' in UOM {3}.") \
.format(qty, d.idx, frappe.bold(_("Must be Whole Number")), frappe.bold(d.get(uom_field))),
UOMMustBeIntegerError) | ifitwala_ed/utilities/transaction_base.py |
from __future__ import unicode_literals
import frappe
from six import string_types
import frappe.share
from frappe import _
from frappe.utils import cstr, now_datetime, cint, flt, get_time, get_datetime, get_link_to_form, date_diff, nowdate
from ifitwala_ed.controllers.status_updater import StatusUpdater
class UOMMustBeIntegerError(frappe.ValidationError): pass
class TransactionBase(StatusUpdater):
def validate_posting_time(self):
# set Edit Posting Date and Time to 1 while data import
if frappe.flags.in_import and self.posting_date:
self.set_posting_time = 1
if not getattr(self, 'set_posting_time', None):
now = now_datetime()
self.posting_date = now.strftime('%Y-%m-%d')
self.posting_time = now.strftime('%H:%M:%S.%f')
elif self.posting_time:
try:
get_time(self.posting_time)
except ValueError:
frappe.throw(_('Invalid Posting Time'))
def validate_uom_is_integer(self, uom_field, qty_fields):
validate_uom_is_integer(self, uom_field, qty_fields)
def validate_with_previous_doc(self, ref):
self.exclude_fields = ["conversion_factor", "uom"] if self.get('is_return') else []
for key, val in ref.items():
is_child = val.get("is_child_table")
ref_doc = {}
item_ref_dn = []
for d in self.get_all_children(self.doctype + " Item"):
ref_dn = d.get(val["ref_dn_field"])
if ref_dn:
if is_child:
self.compare_values({key: [ref_dn]}, val["compare_fields"], d)
if ref_dn not in item_ref_dn:
item_ref_dn.append(ref_dn)
elif not val.get("allow_duplicate_prev_row_id"):
frappe.throw(_("Duplicate row {0} with same {1}").format(d.idx, key))
elif ref_dn:
ref_doc.setdefault(key, [])
if ref_dn not in ref_doc[key]:
ref_doc[key].append(ref_dn)
if ref_doc:
self.compare_values(ref_doc, val["compare_fields"])
def compare_values(self, ref_doc, fields, doc=None):
for reference_doctype, ref_dn_list in ref_doc.items():
for reference_name in ref_dn_list:
prevdoc_values = frappe.db.get_value(reference_doctype, reference_name,
[d[0] for d in fields], as_dict=1)
if not prevdoc_values:
frappe.throw(_("Invalid reference {0} {1}").format(reference_doctype, reference_name))
for field, condition in fields:
if prevdoc_values[field] is not None and field not in self.exclude_fields:
self.validate_value(field, condition, prevdoc_values[field], doc)
def validate_rate_with_reference_doc(self, ref_details):
buying_doctypes = ["Purchase Order", "Purchase Invoice", "Purchase Receipt"]
if self.doctype in buying_doctypes:
action = frappe.db.get_single_value("Buying Settings", "maintain_same_rate_action")
settings_doc = "Buying Settings"
else:
action = frappe.db.get_single_value("Selling Settings", "maintain_same_rate_action")
settings_doc = "Selling Settings"
for ref_dt, ref_dn_field, ref_link_field in ref_details:
for d in self.get("items"):
if d.get(ref_link_field):
ref_rate = frappe.db.get_value(ref_dt + " Item", d.get(ref_link_field), "rate")
if abs(flt(d.rate - ref_rate, d.precision("rate"))) >= .01:
if action == "Stop":
role_allowed_to_override = frappe.db.get_single_value(settings_doc, 'role_to_override_stop_action')
if role_allowed_to_override not in frappe.get_roles():
frappe.throw(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4})").format(
d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate))
else:
frappe.msgprint(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4})").format(
d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate), title=_("Warning"), indicator="orange")
def get_link_filters(self, for_doctype):
if hasattr(self, "prev_link_mapper") and self.prev_link_mapper.get(for_doctype):
fieldname = self.prev_link_mapper[for_doctype]["fieldname"]
values = filter(None, tuple(item.as_dict()[fieldname] for item in self.items))
if values:
ret = {
for_doctype : {
"filters": [[for_doctype, "name", "in", values]]
}
}
else:
ret = None
else:
ret = None
return ret
def delete_events(ref_type, ref_name):
events = frappe.db.sql_list(""" SELECT DISTINCT `tabEvent`.name
FROM `tabEvent`, `tabEvent Participants`
WHERE
`tabEvent`.name = `tabEvent Participants`.parent
and `tabEvent Participants`.reference_doctype = %s
and `tabEvent Participants`.reference_docname = %s
""", (ref_type, ref_name)) or []
if events:
frappe.delete_doc("Event", events, for_reload=True)
def validate_uom_is_integer(doc, uom_field, qty_fields, child_dt=None):
if isinstance(qty_fields, string_types):
qty_fields = [qty_fields]
distinct_uoms = list(set(d.get(uom_field) for d in doc.get_all_children()))
integer_uoms = list(filter(lambda uom: frappe.db.get_value("UOM", uom,
"must_be_whole_number", cache=True) or None, distinct_uoms))
if not integer_uoms:
return
for d in doc.get_all_children(parenttype=child_dt):
if d.get(uom_field) in integer_uoms:
for f in qty_fields:
qty = d.get(f)
if qty:
if abs(cint(qty) - flt(qty)) > 0.0000001:
frappe.throw(_("Row {1}: Quantity ({0}) cannot be a fraction. To allow this, disable '{2}' in UOM {3}.") \
.format(qty, d.idx, frappe.bold(_("Must be Whole Number")), frappe.bold(d.get(uom_field))),
UOMMustBeIntegerError) | 0.349311 | 0.1495 |
from . import HermesTestCase
from .. import models
class PostListViewTestCase(HermesTestCase):
def url(self):
return super(PostListViewTestCase, self).url('hermes_post_list')
def test_context_contains_posts(self):
"""The PostListView Context should contain a QuerySet of all Posts"""
response = self.get(self.url())
expected = list(models.Post.objects.all())
self.assertEqual(expected, list(response.context['posts']))
class CategoryPostListViewTestCase(HermesTestCase):
def url(self, category):
return category.get_absolute_url()
def test_context_contains_posts(self):
"""The CategoryPostListView Context should contain a QuerySet of all
Posts in the given Category
"""
response = self.get(self.url(self.root_category))
expected = list(models.Post.objects.filter(category=self.root_category))
self.assertEqual(expected, list(response.context['posts']))
class ArchivePostListViewTestCase(HermesTestCase):
def url(self, year=None, month=None, day=None):
if year and month and day:
url_name = 'hermes_archive_year_month_day'
kwargs = {'year': year, 'month': month, 'day': day, }
elif year and month:
url_name = 'hermes_archive_year_month'
kwargs = {'year': year, 'month': month, }
else:
url_name = 'hermes_archive_year'
kwargs = {'year': year, }
return super(ArchivePostListViewTestCase, self).url(url_name, **kwargs)
def test_context_contains_posts_by_month_year_day(self):
"""The ArchivePostListView Context should contain a QuerySet of all
Posts on the given month/day/year
"""
response = self.get(self.url(year=2010, month=6, day=10))
expected = list(models.Post.objects.created_on(year=2010, month=6, day=10))
self.assertEqual(expected, list(response.context['posts']))
def test_context_contains_posts_by_month_year(self):
"""The ArchivePostListView Context should contain a QuerySet of all
Posts on the given month/day
"""
response = self.get(self.url(year=2011, month=7))
expected = list(models.Post.objects.created_on(year=2011, month=7))
self.assertEqual(expected, list(response.context['posts']))
def test_context_contains_posts_by_year(self):
"""The ArchivePostListView Context should contain a QuerySet of all
Posts in the given year
"""
response = self.get(self.url(year=2012))
expected = list(models.Post.objects.created_on(year=2012))
self.assertEqual(expected, list(response.context['posts']))
class PostDetailViewTestCase(HermesTestCase):
def url(self, post):
return post.get_absolute_url()
def test_context_contains_post(self):
response = self.get(self.url(self.post1))
expected = self.post1
self.assertEqual(expected, response.context['post']) | hermes/tests/test_views.py | from . import HermesTestCase
from .. import models
class PostListViewTestCase(HermesTestCase):
def url(self):
return super(PostListViewTestCase, self).url('hermes_post_list')
def test_context_contains_posts(self):
"""The PostListView Context should contain a QuerySet of all Posts"""
response = self.get(self.url())
expected = list(models.Post.objects.all())
self.assertEqual(expected, list(response.context['posts']))
class CategoryPostListViewTestCase(HermesTestCase):
def url(self, category):
return category.get_absolute_url()
def test_context_contains_posts(self):
"""The CategoryPostListView Context should contain a QuerySet of all
Posts in the given Category
"""
response = self.get(self.url(self.root_category))
expected = list(models.Post.objects.filter(category=self.root_category))
self.assertEqual(expected, list(response.context['posts']))
class ArchivePostListViewTestCase(HermesTestCase):
def url(self, year=None, month=None, day=None):
if year and month and day:
url_name = 'hermes_archive_year_month_day'
kwargs = {'year': year, 'month': month, 'day': day, }
elif year and month:
url_name = 'hermes_archive_year_month'
kwargs = {'year': year, 'month': month, }
else:
url_name = 'hermes_archive_year'
kwargs = {'year': year, }
return super(ArchivePostListViewTestCase, self).url(url_name, **kwargs)
def test_context_contains_posts_by_month_year_day(self):
"""The ArchivePostListView Context should contain a QuerySet of all
Posts on the given month/day/year
"""
response = self.get(self.url(year=2010, month=6, day=10))
expected = list(models.Post.objects.created_on(year=2010, month=6, day=10))
self.assertEqual(expected, list(response.context['posts']))
def test_context_contains_posts_by_month_year(self):
"""The ArchivePostListView Context should contain a QuerySet of all
Posts on the given month/day
"""
response = self.get(self.url(year=2011, month=7))
expected = list(models.Post.objects.created_on(year=2011, month=7))
self.assertEqual(expected, list(response.context['posts']))
def test_context_contains_posts_by_year(self):
"""The ArchivePostListView Context should contain a QuerySet of all
Posts in the given year
"""
response = self.get(self.url(year=2012))
expected = list(models.Post.objects.created_on(year=2012))
self.assertEqual(expected, list(response.context['posts']))
class PostDetailViewTestCase(HermesTestCase):
def url(self, post):
return post.get_absolute_url()
def test_context_contains_post(self):
response = self.get(self.url(self.post1))
expected = self.post1
self.assertEqual(expected, response.context['post']) | 0.68056 | 0.403802 |
import numpy as np
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import shapely.geometry
from shapely.geometry.point import Point
from shapely.geometry.linestring import LineString
from shapely.geometry.polygon import LinearRing, Polygon
import shapely.affinity as affinity
from starr.misc import pairwise
from shapely.ops import unary_union
def plot_line_string(line_string, color='k'):
x = [line_string.coords[0][0], line_string.coords[1][0]]
y = [line_string.coords[0][1], line_string.coords[1][1]]
plt.plot(x,y, lw=3.0, color=color, zorder=10)
def minimum_edge_length(polygon, plot_segments=False):
min_size = np.inf
#previous_point = Point(polygon.coords[-1])
for ip, point_tuple in enumerate(polygon.coords):
point = Point(point_tuple)
if ip == 0:
previous_point = point
continue
distance = point.distance(previous_point)
if plot_segments:
line_string = LineString([previous_point, point])
plot_line_string(line_string, color='orange')
if distance < min_size:
min_size = distance
previous_point = point
return min_size
def make_union(object_list):
all_polygons = []
for obj in object_list:
all_polygons.append(obj.geometry.polygon)
union = unary_union(all_polygons)
if isinstance(union, shapely.geometry.polygon.Polygon):
multi_poly = False
elif isinstance(union, shapely.geometry.multipolygon.MultiPolygon):
multi_poly = True
else:
raise ValueError("unknown result of polygon union")
return union, multi_poly
class GeometryComponent(ABC):
def __init__(self):
self._position = np.zeros(2)
self._rotation = 0.0
self.polygon = None
def update(self, simulation_object):
new_pos = simulation_object.position
new_rot = simulation_object.rotation
translation = new_pos-self._position
rotation = new_rot-self._rotation
self.polygon = affinity.translate(self.polygon,
xoff=translation[0],
yoff=translation[1])
self.polygon = affinity.rotate(self.polygon,
rotation)
self._position = np.array(new_pos)
self._rotation = new_rot
@abstractmethod
def create_polygon(self, position, rotation):
pass
def get_regular_grid_ranges(self, origin, spacing, include_edges=False):
raise NotImplementedError("regular grid has not yet "+
"been implemented")
def intersects(self, other):
return self.polygon.intersects(other.polygon)
def intersection(self, other):
return self.polygon.intersection(other.polygon)
def make_buffer(self, thickness):
inner = self.polygon
outer = Polygon(inner.buffer(thickness).exterior)
return outer.difference(inner)
def area(self):
return self.polygon.area
def get_normal(self, collision):
side = 0
min_distance = np.inf
collision_side = None
for p0, p1 in pairwise(self.polygon.exterior.coords):
line = LineString( [p0,p1])
ring = LinearRing( [p0,p1,collision])
poly = Polygon(ring)
distance = poly.area/line.length
if distance < min_distance:
min_distance = distance
collision_side = side
side += 1
points = self.polygon.exterior.coords[collision_side:collision_side+2]
seg_vec = np.array(points[1])-np.array(points[0])
seg_vec = seg_vec/np.linalg.norm(seg_vec)
normal = np.array([-1*seg_vec[1], seg_vec[0]])
return normal
class Circle(GeometryComponent):
def __init__(self, radius):
super().__init__()
self.radius = radius
def create_polygon(self, position, rotation):
circ = Point(position).buffer(1)
circ = affinity.rotate(circ, rotation)
self.polygon = affinity.scale(circ, self.radius, self.radius)
def get_normal(self, collision):
normal_dir = collision-self._position
return normal_dir / np.linalg.norm(normal_dir)
class Rectangle(GeometryComponent):
def __init__(self, side_length_a, side_length_b):
super().__init__()
self.side_length_a = side_length_a
self.side_length_b = side_length_b
def create_polygon(self, position, rotation):
minx = position[0]-self.side_length_a*0.5
miny = position[1]-self.side_length_b*0.5
maxx = position[0]+self.side_length_a*0.5
maxy = position[1]+self.side_length_b*0.5
box = shapely.geometry.box(minx, miny, maxx, maxy)
box = affinity.rotate(box, rotation)
self.polygon = box
def get_regular_grid_ranges(self, origin, spacing):
x0 = -(0.5*self.side_length_a)
y0 = -(0.5*self.side_length_b)
x1 = self.side_length_a*.5
y1 = self.side_length_b*.5
x_steps_left = np.ceil((x0-origin[0]) / spacing)
#x_left = x_steps_left*spacing
x_steps_right = np.floor((x1-origin[0]) / spacing)
#x_right = x_steps_right*spacing
y_steps_down = np.ceil((y0-origin[1]) / spacing)
#y_down = y_steps_down*spacing
y_steps_up = np.floor((y1-origin[1]) / spacing)
return [[x_steps_left, x_steps_right], [y_steps_down, y_steps_up]]
#y_up = y_steps_up*spacing
#x = np.arange(x_left, x_right+spacing, spacing) + origin[0]
#y = np.arange(y_down, y_up+spacing, spacing) + origin[1]
class GeneralPolygon(GeometryComponent):
def __init__(self, polygon):
super().__init__()
self.polygon = polygon
def create_polygon(self, position, rotation):
pass | src/starr/geometry_component.py | import numpy as np
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import shapely.geometry
from shapely.geometry.point import Point
from shapely.geometry.linestring import LineString
from shapely.geometry.polygon import LinearRing, Polygon
import shapely.affinity as affinity
from starr.misc import pairwise
from shapely.ops import unary_union
def plot_line_string(line_string, color='k'):
x = [line_string.coords[0][0], line_string.coords[1][0]]
y = [line_string.coords[0][1], line_string.coords[1][1]]
plt.plot(x,y, lw=3.0, color=color, zorder=10)
def minimum_edge_length(polygon, plot_segments=False):
min_size = np.inf
#previous_point = Point(polygon.coords[-1])
for ip, point_tuple in enumerate(polygon.coords):
point = Point(point_tuple)
if ip == 0:
previous_point = point
continue
distance = point.distance(previous_point)
if plot_segments:
line_string = LineString([previous_point, point])
plot_line_string(line_string, color='orange')
if distance < min_size:
min_size = distance
previous_point = point
return min_size
def make_union(object_list):
all_polygons = []
for obj in object_list:
all_polygons.append(obj.geometry.polygon)
union = unary_union(all_polygons)
if isinstance(union, shapely.geometry.polygon.Polygon):
multi_poly = False
elif isinstance(union, shapely.geometry.multipolygon.MultiPolygon):
multi_poly = True
else:
raise ValueError("unknown result of polygon union")
return union, multi_poly
class GeometryComponent(ABC):
def __init__(self):
self._position = np.zeros(2)
self._rotation = 0.0
self.polygon = None
def update(self, simulation_object):
new_pos = simulation_object.position
new_rot = simulation_object.rotation
translation = new_pos-self._position
rotation = new_rot-self._rotation
self.polygon = affinity.translate(self.polygon,
xoff=translation[0],
yoff=translation[1])
self.polygon = affinity.rotate(self.polygon,
rotation)
self._position = np.array(new_pos)
self._rotation = new_rot
@abstractmethod
def create_polygon(self, position, rotation):
pass
def get_regular_grid_ranges(self, origin, spacing, include_edges=False):
raise NotImplementedError("regular grid has not yet "+
"been implemented")
def intersects(self, other):
return self.polygon.intersects(other.polygon)
def intersection(self, other):
return self.polygon.intersection(other.polygon)
def make_buffer(self, thickness):
inner = self.polygon
outer = Polygon(inner.buffer(thickness).exterior)
return outer.difference(inner)
def area(self):
return self.polygon.area
def get_normal(self, collision):
side = 0
min_distance = np.inf
collision_side = None
for p0, p1 in pairwise(self.polygon.exterior.coords):
line = LineString( [p0,p1])
ring = LinearRing( [p0,p1,collision])
poly = Polygon(ring)
distance = poly.area/line.length
if distance < min_distance:
min_distance = distance
collision_side = side
side += 1
points = self.polygon.exterior.coords[collision_side:collision_side+2]
seg_vec = np.array(points[1])-np.array(points[0])
seg_vec = seg_vec/np.linalg.norm(seg_vec)
normal = np.array([-1*seg_vec[1], seg_vec[0]])
return normal
class Circle(GeometryComponent):
def __init__(self, radius):
super().__init__()
self.radius = radius
def create_polygon(self, position, rotation):
circ = Point(position).buffer(1)
circ = affinity.rotate(circ, rotation)
self.polygon = affinity.scale(circ, self.radius, self.radius)
def get_normal(self, collision):
normal_dir = collision-self._position
return normal_dir / np.linalg.norm(normal_dir)
class Rectangle(GeometryComponent):
def __init__(self, side_length_a, side_length_b):
super().__init__()
self.side_length_a = side_length_a
self.side_length_b = side_length_b
def create_polygon(self, position, rotation):
minx = position[0]-self.side_length_a*0.5
miny = position[1]-self.side_length_b*0.5
maxx = position[0]+self.side_length_a*0.5
maxy = position[1]+self.side_length_b*0.5
box = shapely.geometry.box(minx, miny, maxx, maxy)
box = affinity.rotate(box, rotation)
self.polygon = box
def get_regular_grid_ranges(self, origin, spacing):
x0 = -(0.5*self.side_length_a)
y0 = -(0.5*self.side_length_b)
x1 = self.side_length_a*.5
y1 = self.side_length_b*.5
x_steps_left = np.ceil((x0-origin[0]) / spacing)
#x_left = x_steps_left*spacing
x_steps_right = np.floor((x1-origin[0]) / spacing)
#x_right = x_steps_right*spacing
y_steps_down = np.ceil((y0-origin[1]) / spacing)
#y_down = y_steps_down*spacing
y_steps_up = np.floor((y1-origin[1]) / spacing)
return [[x_steps_left, x_steps_right], [y_steps_down, y_steps_up]]
#y_up = y_steps_up*spacing
#x = np.arange(x_left, x_right+spacing, spacing) + origin[0]
#y = np.arange(y_down, y_up+spacing, spacing) + origin[1]
class GeneralPolygon(GeometryComponent):
def __init__(self, polygon):
super().__init__()
self.polygon = polygon
def create_polygon(self, position, rotation):
pass | 0.737442 | 0.592313 |
from unittest import mock
import pytest
from tulius.core.ckeditor import html_converter
from djfw.wysibb import models
from djfw.wysibb.templatetags import bbcodes
@pytest.mark.parametrize('data,value', [
[ # Check structure support
'aaa<b>d<some_tag>f</some_tag>f<s>fd</s>ff</b>bb',
'aaa[b]dff[s]fd[/s]ff[/b]bb'
],
[ # Test self closing tags and BR tag convert
'<br/><b>df<sometag/>d<br/>f<br/>brb<br/></b>',
'\n[b]dfd\nf\nbrb\n[/b]'
],
[ # Closing tag typo
'<br/><b>df<sometag/>d<br/>f<br/>brb<br/><b/>',
'\n[b]dfd\nf\nbrb\n[b][/b][/b]'
],
[ # Missing closing tag
'1<b>22<s>333</b>',
'1[b]22[s]333[/s][/b]'
],
[ # Close tag without opening one
'1<b>22</s>3</br>33</sometag>4</b></u>',
'1[b]223334[/b]'
],
])
def test_html_convertor(data, value):
assert html_converter.html_to_bb(data) == value
@pytest.mark.parametrize('data,value', [
[ # check ul list
'11<ul>2<li>33</li><li></li><li>5</li></ul>',
'11[list]2[*]33\n[*]\n[*]5\n[/list]'
],
[ # check ol list
'11<ol>2<li>33</li><li></li><li>5</li></ol>',
'11[list=1]2[*]33\n[*]\n[*]5\n[/list]'
],
])
def test_lists(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check invalid colors
'11<font color="someth">23</font>', '1123'
],
[ # check valid number color
'11<font color="#ff00ff">23</font>', '11[color=#ff00ff]23[/color]'
],
[ # check valid text color
'11<font color="red">23</font>', '11[color=red]23[/color]'
],
[ # check empty color
'11<font>23</font>', '1123'
],
])
def test_font(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check empty span
'11<span>23</span>', '1123'
],
[ # check invalid color
'11<span style="color: smth">23</span>', '1123'
],
[ # check invalid size
'11<span style="font-size: 100">23</span>', '1123'
],
[ # check color
'11<span style="color: #ff00ff">23</span>',
'11[color=#ff00ff]23[/color]'
],
[ # check size
'11<span style="font-size: 150%">23</span>',
'11[size=150]23[/size]'
],
[ # check together color and size
'11<span style="color: #ff00ff; font-size: 150%">23</span>',
'11[color=#ff00ff][size=150]23[/size][/color]'
],
])
def test_span(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check invalid a tag
'11<a>23</a>', '1123'
],
[ # check valid tag
'11<a href="tulius.com">23</a>', '11[url=tulius.com]23[/url]'
],
[ # check removing bad chars
'11<a href="tulius.com]bad">23</a>', '11[url=tulius.combad]23[/url]'
],
])
def test_a_tag(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check invalid a tag
'11<img>23</img>', '11'
],
[ # check valid tag
'11<img src="tulius.com" alt="23"/>', '11[img=tulius.com]23[/img]'
],
[ # check removing bad chars
'11<img src="tulius.com]bad"/>', '11[img=tulius.combad][/img]'
],
])
def test_img_tag(data, value):
with mock.patch.object(bbcodes.smiles, 'smile_dict', return_value={}):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.fixture(name='smiles')
def smiles_fixture():
obj = models.Smile(name='angel', text=':angel:')
obj.image.name = 'wysibb/smiles/angel.gif'
smiles = {':angel:': '/media/wysibb/smiles/angel.gif'}
with mock.patch.object(bbcodes.smiles, 'smile_dict', return_value=smiles):
with mock.patch.object(bbcodes.smiles, 'get_list', return_value=[obj]):
yield
def test_smiles(smiles):
original = '<p><img alt=":angel:" src="/media/wysibb/smiles/angel.gif"'\
' style="height:26px; width:27px" title=":angel:" /></p>'
converted = html_converter.html_to_bb(original)
result = bbcodes.bbcode(converted)
assert result == '<img class="sm" src="/media/wysibb/smiles/angel.gif"' \
' title="angel" /><br/>'
def test_special_symbols(smiles):
original = 'Ï'
converted = html_converter.html_to_bb(original)
assert bbcodes.bbcode(converted) == original
def test_paragraph_line_breaks():
original = '<p>1</p>\n\n<p>2</p>\n\n<p>3</p>\n'
assert html_converter.html_to_bb(original) == '1\n2\n3\n' | tests/test_html_converter.py | from unittest import mock
import pytest
from tulius.core.ckeditor import html_converter
from djfw.wysibb import models
from djfw.wysibb.templatetags import bbcodes
@pytest.mark.parametrize('data,value', [
[ # Check structure support
'aaa<b>d<some_tag>f</some_tag>f<s>fd</s>ff</b>bb',
'aaa[b]dff[s]fd[/s]ff[/b]bb'
],
[ # Test self closing tags and BR tag convert
'<br/><b>df<sometag/>d<br/>f<br/>brb<br/></b>',
'\n[b]dfd\nf\nbrb\n[/b]'
],
[ # Closing tag typo
'<br/><b>df<sometag/>d<br/>f<br/>brb<br/><b/>',
'\n[b]dfd\nf\nbrb\n[b][/b][/b]'
],
[ # Missing closing tag
'1<b>22<s>333</b>',
'1[b]22[s]333[/s][/b]'
],
[ # Close tag without opening one
'1<b>22</s>3</br>33</sometag>4</b></u>',
'1[b]223334[/b]'
],
])
def test_html_convertor(data, value):
assert html_converter.html_to_bb(data) == value
@pytest.mark.parametrize('data,value', [
[ # check ul list
'11<ul>2<li>33</li><li></li><li>5</li></ul>',
'11[list]2[*]33\n[*]\n[*]5\n[/list]'
],
[ # check ol list
'11<ol>2<li>33</li><li></li><li>5</li></ol>',
'11[list=1]2[*]33\n[*]\n[*]5\n[/list]'
],
])
def test_lists(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check invalid colors
'11<font color="someth">23</font>', '1123'
],
[ # check valid number color
'11<font color="#ff00ff">23</font>', '11[color=#ff00ff]23[/color]'
],
[ # check valid text color
'11<font color="red">23</font>', '11[color=red]23[/color]'
],
[ # check empty color
'11<font>23</font>', '1123'
],
])
def test_font(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check empty span
'11<span>23</span>', '1123'
],
[ # check invalid color
'11<span style="color: smth">23</span>', '1123'
],
[ # check invalid size
'11<span style="font-size: 100">23</span>', '1123'
],
[ # check color
'11<span style="color: #ff00ff">23</span>',
'11[color=#ff00ff]23[/color]'
],
[ # check size
'11<span style="font-size: 150%">23</span>',
'11[size=150]23[/size]'
],
[ # check together color and size
'11<span style="color: #ff00ff; font-size: 150%">23</span>',
'11[color=#ff00ff][size=150]23[/size][/color]'
],
])
def test_span(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check invalid a tag
'11<a>23</a>', '1123'
],
[ # check valid tag
'11<a href="tulius.com">23</a>', '11[url=tulius.com]23[/url]'
],
[ # check removing bad chars
'11<a href="tulius.com]bad">23</a>', '11[url=tulius.combad]23[/url]'
],
])
def test_a_tag(data, value):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.mark.parametrize('data,value', [
[ # check invalid a tag
'11<img>23</img>', '11'
],
[ # check valid tag
'11<img src="tulius.com" alt="23"/>', '11[img=tulius.com]23[/img]'
],
[ # check removing bad chars
'11<img src="tulius.com]bad"/>', '11[img=tulius.combad][/img]'
],
])
def test_img_tag(data, value):
with mock.patch.object(bbcodes.smiles, 'smile_dict', return_value={}):
assert html_converter.HtmlConverter().convert(data) == value
@pytest.fixture(name='smiles')
def smiles_fixture():
obj = models.Smile(name='angel', text=':angel:')
obj.image.name = 'wysibb/smiles/angel.gif'
smiles = {':angel:': '/media/wysibb/smiles/angel.gif'}
with mock.patch.object(bbcodes.smiles, 'smile_dict', return_value=smiles):
with mock.patch.object(bbcodes.smiles, 'get_list', return_value=[obj]):
yield
def test_smiles(smiles):
original = '<p><img alt=":angel:" src="/media/wysibb/smiles/angel.gif"'\
' style="height:26px; width:27px" title=":angel:" /></p>'
converted = html_converter.html_to_bb(original)
result = bbcodes.bbcode(converted)
assert result == '<img class="sm" src="/media/wysibb/smiles/angel.gif"' \
' title="angel" /><br/>'
def test_special_symbols(smiles):
original = 'Ï'
converted = html_converter.html_to_bb(original)
assert bbcodes.bbcode(converted) == original
def test_paragraph_line_breaks():
original = '<p>1</p>\n\n<p>2</p>\n\n<p>3</p>\n'
assert html_converter.html_to_bb(original) == '1\n2\n3\n' | 0.671255 | 0.576482 |
__version__ = 2.1
__all__ = ['fatal_error', 'print_image', 'plot_image', 'color_palette', 'plot_colorbar', 'apply_mask', 'readimage',
'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'plot_hist', 'image_add',
'image_subtract', 'erode', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab',
'rgb2gray', 'binary_threshold', 'median_blur', 'fill', 'invert', 'logical_and', 'logical_or', 'logical_xor',
'find_objects', 'define_roi', 'roi_objects', 'object_composition', 'analyze_object', 'analyze_bound_horizontal',
'analyze_bound_vertical','analyze_bound', 'analyze_color', 'analyze_NIR_intensity', 'fluor_fvfm', 'print_results', 'resize', 'flip',
'crop_position_mask', 'get_nir', 'adaptive_threshold', 'otsu_auto_threshold', 'report_size_marker_area',
'white_balance', 'triangle_auto_threshold', 'acute_vertex', 'scale_features', 'landmark_reference_pt_dist',
'x_axis_pseudolandmarks', 'y_axis_pseudolandmarks', 'gaussian_blur', 'cluster_contours',
'cluster_contour_splitimg', 'rotate_img', 'rotate','shift_img', 'output_mask', 'auto_crop',
'background_subtraction', 'naive_bayes_classifier', 'acute','distance_transform']
from plantcv.fatal_error import fatal_error
from plantcv.print_image import print_image
from plantcv.plot_image import plot_image
from plantcv.color_palette import color_palette
from plantcv.plot_colorbar import plot_colorbar
from plantcv.apply_mask import apply_mask
from plantcv.readimage import readimage
from plantcv.laplace_filter import laplace_filter
from plantcv.sobel_filter import sobel_filter
from plantcv.scharr_filter import scharr_filter
from plantcv.hist_equalization import hist_equalization
from plantcv.plot_hist import plot_hist
from plantcv.image_add import image_add
from plantcv.image_subtract import image_subtract
from plantcv.erode import erode
from plantcv.dilate import dilate
from plantcv.watershed import watershed_segmentation
from plantcv.rectangle_mask import rectangle_mask
from plantcv.rgb2gray_hsv import rgb2gray_hsv
from plantcv.rgb2gray_lab import rgb2gray_lab
from plantcv.rgb2gray import rgb2gray
from plantcv.binary_threshold import binary_threshold
from plantcv.median_blur import median_blur
from plantcv.fill import fill
from plantcv.invert import invert
from plantcv.logical_and import logical_and
from plantcv.logical_or import logical_or
from plantcv.logical_xor import logical_xor
from plantcv.find_objects import find_objects
from plantcv.define_roi import define_roi
from plantcv.roi_objects import roi_objects
from plantcv.object_composition import object_composition
from plantcv.analyze_object import analyze_object
from plantcv.analyze_bound_horizontal import analyze_bound_horizontal
from plantcv.analyze_bound_vertical import analyze_bound_vertical
from plantcv.analyze_bound import analyze_bound
from plantcv.analyze_color import analyze_color
from plantcv.analyze_NIR_intensity import analyze_NIR_intensity
from plantcv.fluor_fvfm import fluor_fvfm
from plantcv.print_results import print_results
from plantcv.resize import resize
from plantcv.flip import flip
from plantcv.crop_position_mask import crop_position_mask
from plantcv.get_nir import get_nir
from plantcv.adaptive_threshold import adaptive_threshold
from plantcv.otsu_auto_threshold import otsu_auto_threshold
from plantcv.report_size_marker_area import report_size_marker_area
from plantcv.white_balance import white_balance
from plantcv.triangle_auto_threshold import triangle_auto_threshold
from plantcv.acute_vertex import acute_vertex
from plantcv.scale_features import scale_features
from plantcv.landmark_reference_pt_dist import landmark_reference_pt_dist
from plantcv.x_axis_pseudolandmarks import x_axis_pseudolandmarks
from plantcv.y_axis_pseudolandmarks import y_axis_pseudolandmarks
from plantcv.gaussian_blur import gaussian_blur
from plantcv.cluster_contours import cluster_contours
from plantcv.cluster_contour_splitimg import cluster_contour_splitimg
from plantcv.rotate import rotate
from plantcv.rotate_img import rotate_img
from plantcv.shift_img import shift_img
from plantcv.output_mask_ori_img import output_mask
from plantcv.auto_crop import auto_crop
from plantcv.background_subtraction import background_subtraction
from plantcv.naive_bayes_classifier import naive_bayes_classifier
from plantcv.acute import acute
from plantcv.distance_transform import distance_transform
# add new functions to end of lists
class Params:
"""PlantCV parameters class
Keyword arguments/parameters:
device = device number. Used to count steps in the pipeline. (default: 0)
debug = None, print, or plot. Print = save to file, Plot = print to screen. (default: None)
:param device: int
:param debug: str
"""
def __init__(self, device=0, debug=None):
self.device = device
self.debug = debug
# Initialize an instance of the Params class with default values
# params is available when plantcv is imported
params = Params() | plantcv/__init__.py | __version__ = 2.1
__all__ = ['fatal_error', 'print_image', 'plot_image', 'color_palette', 'plot_colorbar', 'apply_mask', 'readimage',
'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'plot_hist', 'image_add',
'image_subtract', 'erode', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab',
'rgb2gray', 'binary_threshold', 'median_blur', 'fill', 'invert', 'logical_and', 'logical_or', 'logical_xor',
'find_objects', 'define_roi', 'roi_objects', 'object_composition', 'analyze_object', 'analyze_bound_horizontal',
'analyze_bound_vertical','analyze_bound', 'analyze_color', 'analyze_NIR_intensity', 'fluor_fvfm', 'print_results', 'resize', 'flip',
'crop_position_mask', 'get_nir', 'adaptive_threshold', 'otsu_auto_threshold', 'report_size_marker_area',
'white_balance', 'triangle_auto_threshold', 'acute_vertex', 'scale_features', 'landmark_reference_pt_dist',
'x_axis_pseudolandmarks', 'y_axis_pseudolandmarks', 'gaussian_blur', 'cluster_contours',
'cluster_contour_splitimg', 'rotate_img', 'rotate','shift_img', 'output_mask', 'auto_crop',
'background_subtraction', 'naive_bayes_classifier', 'acute','distance_transform']
from plantcv.fatal_error import fatal_error
from plantcv.print_image import print_image
from plantcv.plot_image import plot_image
from plantcv.color_palette import color_palette
from plantcv.plot_colorbar import plot_colorbar
from plantcv.apply_mask import apply_mask
from plantcv.readimage import readimage
from plantcv.laplace_filter import laplace_filter
from plantcv.sobel_filter import sobel_filter
from plantcv.scharr_filter import scharr_filter
from plantcv.hist_equalization import hist_equalization
from plantcv.plot_hist import plot_hist
from plantcv.image_add import image_add
from plantcv.image_subtract import image_subtract
from plantcv.erode import erode
from plantcv.dilate import dilate
from plantcv.watershed import watershed_segmentation
from plantcv.rectangle_mask import rectangle_mask
from plantcv.rgb2gray_hsv import rgb2gray_hsv
from plantcv.rgb2gray_lab import rgb2gray_lab
from plantcv.rgb2gray import rgb2gray
from plantcv.binary_threshold import binary_threshold
from plantcv.median_blur import median_blur
from plantcv.fill import fill
from plantcv.invert import invert
from plantcv.logical_and import logical_and
from plantcv.logical_or import logical_or
from plantcv.logical_xor import logical_xor
from plantcv.find_objects import find_objects
from plantcv.define_roi import define_roi
from plantcv.roi_objects import roi_objects
from plantcv.object_composition import object_composition
from plantcv.analyze_object import analyze_object
from plantcv.analyze_bound_horizontal import analyze_bound_horizontal
from plantcv.analyze_bound_vertical import analyze_bound_vertical
from plantcv.analyze_bound import analyze_bound
from plantcv.analyze_color import analyze_color
from plantcv.analyze_NIR_intensity import analyze_NIR_intensity
from plantcv.fluor_fvfm import fluor_fvfm
from plantcv.print_results import print_results
from plantcv.resize import resize
from plantcv.flip import flip
from plantcv.crop_position_mask import crop_position_mask
from plantcv.get_nir import get_nir
from plantcv.adaptive_threshold import adaptive_threshold
from plantcv.otsu_auto_threshold import otsu_auto_threshold
from plantcv.report_size_marker_area import report_size_marker_area
from plantcv.white_balance import white_balance
from plantcv.triangle_auto_threshold import triangle_auto_threshold
from plantcv.acute_vertex import acute_vertex
from plantcv.scale_features import scale_features
from plantcv.landmark_reference_pt_dist import landmark_reference_pt_dist
from plantcv.x_axis_pseudolandmarks import x_axis_pseudolandmarks
from plantcv.y_axis_pseudolandmarks import y_axis_pseudolandmarks
from plantcv.gaussian_blur import gaussian_blur
from plantcv.cluster_contours import cluster_contours
from plantcv.cluster_contour_splitimg import cluster_contour_splitimg
from plantcv.rotate import rotate
from plantcv.rotate_img import rotate_img
from plantcv.shift_img import shift_img
from plantcv.output_mask_ori_img import output_mask
from plantcv.auto_crop import auto_crop
from plantcv.background_subtraction import background_subtraction
from plantcv.naive_bayes_classifier import naive_bayes_classifier
from plantcv.acute import acute
from plantcv.distance_transform import distance_transform
# add new functions to end of lists
class Params:
"""PlantCV parameters class
Keyword arguments/parameters:
device = device number. Used to count steps in the pipeline. (default: 0)
debug = None, print, or plot. Print = save to file, Plot = print to screen. (default: None)
:param device: int
:param debug: str
"""
def __init__(self, device=0, debug=None):
self.device = device
self.debug = debug
# Initialize an instance of the Params class with default values
# params is available when plantcv is imported
params = Params() | 0.678007 | 0.391929 |
def time_validation(time):
if ":" not in time:
return False, "Incorrect time format try hour:min"
hour = time.split(":")[0]
min = time.split(":")[1]
if int(hour) not in range(0, 24):
return False, "Incorrect hour format, hour must be between 0-23"
if int(min) not in range(0, 60):
return False, "Incorrect min format, minutes must be between 00-59"
return True, ""
def deep_time_validation(time):
if not isinstance(time, str) or time.count(":") !=2:
return False, "Incorrect time format try hour:min:seconds"
hour, min, seconds = time.split(":")
if int(hour) not in range(0, 24):
return False, "Incorrect hour format, hour must be between 0-23"
if int(min) not in range(0, 60):
return False, "Incorrect min format, minutes must be between 00-59"
if int(seconds) not in range(0, 60):
return False, "Incorrect seconds format, seconds must be between 00-59"
return True, ""
def date_validation(date):
if not isinstance(date, str) or date.count("-") !=2:
return False, "Incorrect time format try yyyy-mm-dd"
year, month, day = date.split("-")
if int(year) not in range(1970, 2023):
return False, "Incorrect year format, year must be between 1970 and 2022"
if int(month) not in range(0, 13):
return False, "Incorrect month format, month must be between 00 and 12"
if int(day) not in range(0, 32):
return False, "Incorrect day format, day must be between 00-31"
return True, ""
def deep_datetime_validation(datetime):
if " " not in datetime:
return False, "Incorrect datetime format try yyyy-mm-dd hour:min:seconds"
date, time = datetime.split()
valid_date, msg = date_validation(date)
if not valid_date:
return valid_date, msg
return deep_time_validation(time)
def password_validation(password):
if len(password) < 6:
return False, "Password too small, must be at least 6 characters long"
if not any(c.isupper() for c in password):
return False, "Password must contain at least one upper letter"
if not any(c.isdigit() for c in password):
return False, "Password must contain at least one digit"
return True, ""
def boolean_validation(val):
if val.lower() in ['true', '1']:
return True, "true"
if val.lower() in ['0', 'false']:
return True, "false"
return False, "wrong value must be one of: true, false, 0, 1" | SmartSleep/validation.py | def time_validation(time):
if ":" not in time:
return False, "Incorrect time format try hour:min"
hour = time.split(":")[0]
min = time.split(":")[1]
if int(hour) not in range(0, 24):
return False, "Incorrect hour format, hour must be between 0-23"
if int(min) not in range(0, 60):
return False, "Incorrect min format, minutes must be between 00-59"
return True, ""
def deep_time_validation(time):
if not isinstance(time, str) or time.count(":") !=2:
return False, "Incorrect time format try hour:min:seconds"
hour, min, seconds = time.split(":")
if int(hour) not in range(0, 24):
return False, "Incorrect hour format, hour must be between 0-23"
if int(min) not in range(0, 60):
return False, "Incorrect min format, minutes must be between 00-59"
if int(seconds) not in range(0, 60):
return False, "Incorrect seconds format, seconds must be between 00-59"
return True, ""
def date_validation(date):
if not isinstance(date, str) or date.count("-") !=2:
return False, "Incorrect time format try yyyy-mm-dd"
year, month, day = date.split("-")
if int(year) not in range(1970, 2023):
return False, "Incorrect year format, year must be between 1970 and 2022"
if int(month) not in range(0, 13):
return False, "Incorrect month format, month must be between 00 and 12"
if int(day) not in range(0, 32):
return False, "Incorrect day format, day must be between 00-31"
return True, ""
def deep_datetime_validation(datetime):
if " " not in datetime:
return False, "Incorrect datetime format try yyyy-mm-dd hour:min:seconds"
date, time = datetime.split()
valid_date, msg = date_validation(date)
if not valid_date:
return valid_date, msg
return deep_time_validation(time)
def password_validation(password):
if len(password) < 6:
return False, "Password too small, must be at least 6 characters long"
if not any(c.isupper() for c in password):
return False, "Password must contain at least one upper letter"
if not any(c.isdigit() for c in password):
return False, "Password must contain at least one digit"
return True, ""
def boolean_validation(val):
if val.lower() in ['true', '1']:
return True, "true"
if val.lower() in ['0', 'false']:
return True, "false"
return False, "wrong value must be one of: true, false, 0, 1" | 0.476092 | 0.24971 |
import sys, argparse
import socket
import serial
import json
import struct
import numpy as np
import time
import multiprocessing
import matplotlib
matplotlib.use('GTKAgg')
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
style.use('ggplot')
import tkinter as tk
from tkinter import ttk
LARGE_FONT= ("Verdana", 12)
class PdoaApp(tk.Tk):
def __init__(self, data_feed=None):
tk.Tk.__init__(self) #, *args, **kwargs
#tk.Tk.iconbitmap(self, default="clienticon.ico")
tk.Tk.wm_title(self, "Pdoa visualisation")
#Create a queue to share data between process
q = multiprocessing.Queue()
#Create and start the datafeed process
self.input_data = multiprocessing.Process(None, input_thread, args=(q, data_feed,))
self.input_data.start()
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
frame = CirPlots(container, self, data_queue=q)
frame.grid(row=0, column=0, sticky="nsew")
frame.tkraise()
def close(self):
print("quitting")
self.input_data.terminate()
self.destroy()
exit(0)
class CirPlots(tk.Frame):
def __init__(self, parent, controller, data_queue=None):
self.idx = 0
self.cir_ymin = -1000
self.cir_ymax = 1000
self.tic = time.time()
self.pause = 0
self.pdoa_field = 'pd'
self.parent = parent;
tk.Frame.__init__(self, parent)
#
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.top_frame = tk.Frame(self, bg='cyan', pady=3)
self.top_frame.grid(row=0, sticky="ew")
self.top_frame.grid_rowconfigure(0, weight=1)
self.top_frame.grid_columnconfigure(1, weight=1)
quit_btn = tk.Button(self.top_frame, text="QUIT", fg="red",
command=controller.close)
quit_btn.grid(row=0, column=0)
reset_btn = tk.Button(self.top_frame, text="RESET", fg="black",
command=lambda: self.resetplot())
reset_btn.grid(row=0, column=1)
pd_toggle_btn = tk.Button(self.top_frame, text="PDSRC", fg="black",
command=lambda: self.toggle_pd_src())
pd_toggle_btn.grid(row=0, column=2)
pause_btn = tk.Button(self.top_frame, text="PAUSE", fg="black",
command=lambda: self.pauseplot())
pause_btn.grid(row=0, column=3)
self.queue_stats = tk.Label(self.top_frame, text="stats", font=("Verdana", 10))
self.queue_stats.grid(row=0, column=4, sticky="w")
self.center_frame = tk.Frame(self, bg='gray2', pady=1)
self.center_frame.grid(row=1, sticky="sw")
self.center_frame.grid_rowconfigure(0, weight=1)
self.center_frame.grid_columnconfigure(1, weight=1)
#self.center_frame.pack(fill=tk.BOTH, expand=True)
self.left_frame = tk.Frame(self.center_frame, bg='white', pady=3)
self.left_frame.grid(row=0, column=0, sticky="se")
self.fig = Figure(figsize=(6,6), dpi=100)
self.a0 = self.fig.add_subplot(211)
self.a0.set_xlabel("cir0")
self.a1 = self.fig.add_subplot(212)
self.a1.set_xlabel("cir1")
self.canvas = FigureCanvasTkAgg(self.fig, self.left_frame)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.line_0r, = self.a0.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'r')
self.line_0i, = self.a0.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'b')
self.line_0a, = self.a0.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'k', linewidth=2.0)
self.line_0fp, = self.a0.plot([0,0], [-100,100], 'g--', linewidth=2.0)
self.line_01fp, = self.a0.plot([5,5], [-100,100], 'b--', linewidth=2.0)
self.line_1r, = self.a1.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'r')
self.line_1i, = self.a1.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'b')
self.line_1a, = self.a1.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'k', linewidth=2.0)
self.line_1fp, = self.a1.plot([0,0], [-100,100], 'g--', linewidth=2.0)
toolbar = NavigationToolbar2TkAgg(self.canvas, self.left_frame)
toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# RSSI
self.middle_frame = tk.Frame(self.center_frame, bg='white', pady=3)
self.middle_frame.grid(row=0, column=1, sticky="sw")
self.rssi_fig = Figure(figsize=(2,6), dpi=100)
self.rssi_a0 = self.rssi_fig.add_subplot(211)
self.rssi_a1 = self.rssi_fig.add_subplot(212, sharex=self.rssi_a0)
self.rssi_canvas = FigureCanvasTkAgg(self.rssi_fig, self.middle_frame)
self.rssi_canvas.show()
self.rssi_canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.rssi_stats = tk.Label(self.middle_frame, text="RSSI", font=("Verdana", 10))
self.rssi_stats.pack(pady=10,padx=10)
self.rssi_rssi0, = self.rssi_a0.plot(range(-110,75), len(range(-110,75))*[0], 'k', linewidth=1.0)
# PDOA
self.right_frame = tk.Frame(self.center_frame, bg='white', pady=3)
self.right_frame.grid(row=0, column=2, sticky="sw")
self.pdoa_fig = Figure(figsize=(4,6), dpi=100)
self.pdoa_ax = self.pdoa_fig.add_subplot(111)
self.pdoa_canvas = FigureCanvasTkAgg(self.pdoa_fig, self.right_frame)
self.pdoa_canvas.show()
self.pdoa_canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.pdoa_stats = tk.Label(self.right_frame, text="stats", font=("Verdana", 10))
self.pdoa_stats.pack(pady=10,padx=10)
self.resetplot()
self.updateplot(data_queue)
def resetplot(self):
self.pause=0
self.history=[]
def toggle_pd_src(self):
if (self.pdoa_field == 'pd'):
self.pdoa_field = 'adj_pd'
else:
self.pdoa_field = 'pd'
self.drawHistogram(None, self.pdoa_stats, self.pdoa_ax, self.pdoa_canvas, field=self.pdoa_field, max_hist=500)
def pauseplot(self):
self.pause=1
self.tic = time.time()
self.idx = 0
def calc_adjusted_pd(self, d):
try:
cir0 = d['cir0']
cir1 = d['cir1']
# Check if they aleady match
fp_idx0 = float(cir0['fp_idx'])
fp_idx1 = float(cir1['fp_idx'])
if (abs(fp_idx0-fp_idx1) < 0.5):
return d['pd']
# Use the first detected LDE
acc_idx0 = int(np.floor(fp_idx0 + 0.5))
acc_idx1 = int(np.floor(fp_idx1 + 0.5))
if (fp_idx0 < fp_idx1):
# Remap cir0's fp_index into cir1
print("0 fp0:{:.1f}, fp1:{:.1f}, adj: {:d}".format(fp_idx0, fp_idx1, acc_idx0 - acc_idx1))
acc_real = float(cir1['real'][8+acc_idx0-acc_idx1])
acc_imag = float(cir1['imag'][8+acc_idx0-acc_idx1])
angle1 = np.arctan2(acc_imag,acc_real)
rcphase1 = float(cir1['rcphase'])
return np.fmod(float(cir0['angle']) - float(cir0['rcphase']) - (angle1 - rcphase1) + 3*np.pi, 2*np.pi) - np.pi;
else:
# Remap cir1's fp_index into cir0
print("1 fp0:{:.1f}, fp1:{:.1f}, adj: {:d}".format(fp_idx0, fp_idx1, acc_idx0 - acc_idx1))
acc_real = float(cir0['real'][8+acc_idx1-acc_idx0])
acc_imag = float(cir0['imag'][8+acc_idx1-acc_idx0])
angle0 = np.arctan2(acc_imag,acc_real)
rcphase0 = float(cir0['rcphase'])
return np.fmod((angle0 - rcphase0) - (float(cir1['angle']) - float(cir1['rcphase'])) + 3*np.pi, 2*np.pi) - np.pi;
except:
return None
pass
def updateplot(self, q):
if self.pause == 1:
self.parent.after(10,self.updateplot,q)
return
try: #Try to check if there is data in the queue
result=q.get_nowait()
except:
self.parent.after(10,self.updateplot,q)
return
if result !='Q':
self.idx += 1
self.queue_stats['text']="Queue: {:3d} Rate:{:6.2f}".format(q.qsize(), self.idx/(time.time()-self.tic))
result['adj_pd'] = self.calc_adjusted_pd(result)
try:
self.history.append(result)
except:
self.history = [result]
# Limit the size of the history to 10000 values
if (len(self.history) > 10000):
self.history = self.history[-10000:]
div = 1
if (q.qsize()>10):
div = 10
if (self.idx%div==0):
fp_idx0 = 0
try:
cir = result['cir0']
self.line_0r.set_xdata(xrange(len(cir['real'])))
self.line_0r.set_ydata([float(x) for x in cir['real']])
self.line_0i.set_xdata(xrange(len(cir['imag'])))
self.line_0i.set_ydata([float(x) for x in cir['imag']])
ymin = np.min([float(x) for x in cir['real']])
if (ymin < self.cir_ymin and ymin > -65000):
self.cir_ymin = ymin
self.a0.set_ylim([self.cir_ymin, self.cir_ymax])
self.a1.set_ylim([self.cir_ymin, self.cir_ymax])
mag = [np.sqrt(float(x*x)+float(y*y)) for x,y in zip(cir['real'], cir['imag'])]
ymax = np.max(mag)
if (ymax > self.cir_ymax and ymax < 65000):
self.cir_ymax = ymax
self.a0.set_ylim([self.cir_ymin, self.cir_ymax])
self.a1.set_ylim([self.cir_ymin, self.cir_ymax])
self.line_0a.set_xdata(xrange(len(cir['real'])))
self.line_0a.set_ydata(mag)
try:
fp_idx = float(cir['fp_idx'])
fp_idx0 = fp_idx
rcphase = float(cir['rcphase'])
acc_idx = np.floor(fp_idx + 0.5)
acc_adj = fp_idx - acc_idx
self.line_0fp.set_xdata([8+acc_adj, 8+acc_adj])
self.line_0fp.set_ydata([0, 0.9*self.cir_ymax])
self.a0.set_xlabel("cir0 fp_idx:{:.2f} rcph:{:.1f}".format(fp_idx, rcphase*180.0/np.pi))
except:
pass
self.a0.draw_artist(self.line_0r)
self.a0.draw_artist(self.line_0i)
self.a0.draw_artist(self.line_0a)
self.a0.draw_artist(self.line_0fp)
self.canvas.draw()
except:
pass
try:
cir = result['cir1']
self.line_1r.set_xdata(xrange(len(cir['real'])))
self.line_1r.set_ydata([float(x) for x in cir['real']])
self.line_1i.set_xdata(xrange(len(cir['imag'])))
self.line_1i.set_ydata([float(x) for x in cir['imag']])
self.line_1a.set_xdata(xrange(len(cir['real'])))
mag = [np.sqrt(float(x*x)+float(y*y)) for x,y in zip(cir['real'], cir['imag'])]
self.line_1a.set_ydata(mag)
try:
fp_idx1 = float(cir['fp_idx'])
acc_idx = np.floor(fp_idx1 + 0.5)
acc_adj = fp_idx1 - acc_idx
self.line_1fp.set_xdata([8+acc_adj, 8+acc_adj])
self.line_1fp.set_ydata([0, 0.9*self.cir_ymax])
acc_adj = fp_idx0 - acc_idx
self.line_01fp.set_xdata([8-acc_adj, 8-acc_adj])
self.line_01fp.set_ydata([0, 0.9*self.cir_ymax])
self.a1.set_xlabel("cir1 fp_idx:{:.2f} rcph:{:.1f}".format(fp_idx1, rcphase*180.0/np.pi))
except:
pass
self.a1.draw_artist(self.line_1r)
self.a1.draw_artist(self.line_1i)
self.a1.draw_artist(self.line_1a)
self.canvas.draw()
except:
pass
if (q.qsize()>10):
if (self.idx%100==0):
self.drawHistogram(result, self.pdoa_stats, self.pdoa_ax, self.pdoa_canvas, field=self.pdoa_field, max_hist=500)
if (self.idx%50==0):
#self.drawHistogramMan(result, self.rssi_stats, self.rssi_rssi0, self.rssi_canvas, field='rssi0', max_hist=200)
self.drawHistogram(result, self.rssi_stats, self.rssi_a0, self.rssi_canvas, field='rssi0', max_hist=200)
self.drawHistogram(result, self.rssi_stats, self.rssi_a1, self.rssi_canvas, field='rssi1', max_hist=200)
self.parent.after(0, self.updateplot, q)
else:
if (self.idx%50==0):
self.drawHistogram(result, self.pdoa_stats, self.pdoa_ax, self.pdoa_canvas, field=self.pdoa_field, max_hist=500)
if (self.idx%10==0):
self.drawHistogram(result, self.rssi_stats, self.rssi_a0, self.rssi_canvas, field='rssi0', max_hist=200)
self.drawHistogram(result, self.rssi_stats, self.rssi_a1, self.rssi_canvas, field='rssi1', max_hist=200)
self.parent.after(10, self.updateplot, q)
else:
print('done')
def pdoa_filter(self, data, m=2):
a=np.array(data)
a=a[abs(a - np.mean(a)) < m * np.std(a)]
return a
def drawHistogram(self, d, stats_label, fig_axis, fig_canvas, field='pd', max_hist=None):
n_bins = 64
filter_m = 0
pdata = []
if max_hist == None:
h=self.history
else:
h=self.history[-max_hist:]
for x in h:
if x == None: continue
try:
if (field=='pd' or field=='adj_pd'):
pdata.append(float(x[field])*180.0/np.pi)
else:
pdata.append(float(x[field]))
except:
pass
if (filter_m):
pdata = self.pdoa_filter(pdata, m=filter_m)
if len(pdata) < 10: return
stats = "Hist({}):{:04d} average: {:.3f} stddev: {:.3f}".format(field, len(pdata), np.mean(pdata), np.std(pdata))
print(stats)
stats_label['text']=stats
fig_axis.cla()
fig_axis.set_xlabel(field)
fig_axis.hist(pdata, bins='auto', normed=0, rwidth=0.85)
fig_canvas.draw()
# self.pdoa_ax.title("Pdoa " + stats)
def drawHistogramMan(self, n, stats_label, fig_axis, fig_canvas, field='pd', max_hist=None):
filter_m = 0
# Add new data (last n)
for x in self.history[-n:]:
try:
self.hist_data[field]['y'][int(x[field])] += 1
except:
self.hist_data[field]['y'][int(x[field])] = 0*{ }
pdata = []
if max_hist == None:
h=self.history
else:
h=self.history[-max_hist:]
for x in h:
try:
if (field=='pd' or field=='adj_pd'):
pdata.append(float(x[field])*180.0/np.pi)
else:
pdata.append(float(x[field]))
except:
pass
if (filter_m):
pdata = self.pdoa_filter(pdata, m=filter_m)
if len(pdata) < 10: return
stats = "Hist({}):{:04d} average: {:.3f} stddev: {:.3f}".format(field, len(pdata), np.mean(pdata), np.std(pdata))
print(stats)
stats_label['text']=stats
fig_axis.cla()
fig_axis.set_xlabel(field)
fig_axis.hist(pdata, bins='auto', normed=0, rwidth=0.85)
fig_canvas.draw()
# self.pdoa_ax.title("Pdoa " + stats)
class ListenerData:
def __init__(self, socket_s=None, serial_s=None):
self.tcp_s = socket_s;
self.serial_s = serial_s;
def readlines_socket(self, sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.recv(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
def readlines_serial(self, sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.read(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
def readlines(self):
if self.tcp_s != None:
return self.readlines_socket(self.tcp_s)
else:
return self.readlines_serial(self.serial_s)
def input_thread(q, data=None):
tic = time.time()
if not data: print("No input data")
for line in data.readlines():
try:
d=json.loads(line)
if (q.qsize()<100):
q.put(d)
except:
continue
# print(d)
q.put('Q')
if __name__ == '__main__':
TCP_IP = '127.0.0.1'
TCP_PORT = 19021
BUFFER_SIZE = 1024
parser = argparse.ArgumentParser()
parser.add_argument('connections', metavar='connection[s]', nargs='+', help='serial device or tcp-port (for rtt)')
parser.add_argument('-b',metavar='baudrate', type=int,dest='baudrate',default=460800,help='baudrate')
args = parser.parse_args()
if (len(args.connections)<1):
print("Defaulting to RTT using port 19021")
args.conns += "19021"
#exit(2)
indata = None
for conn in args.connections:
# Check for tcp port
try:
c = conn.split(':')
ip = c[0]
port = int(c[1])
print("TCP connection to {:s} port {:d}".format(ip,port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
indata = ListenerData(socket_s=s)
# run(socket_s=s, doblit=False)
break;
except ValueError:
# Fallback to trying as a serial port
try:
s = serial.Serial(conn,baudrate=args.baudrate, timeout=1)
print('Serial port opened:' + s.name)
s.flush()
indata = ListenerData(serial_s=s)
# run(socket_s=s, doblit=False)
break;
# run(serial_s=s, doblit=False)
except serial.serialutil.SerialException:
print('Could not open ' + self.s_devname)
exit(2)
if (not indata): exit(2)
app = PdoaApp(data_feed=indata)
app.mainloop() | apps/listener/scripts/listener_pdoa.py |
import sys, argparse
import socket
import serial
import json
import struct
import numpy as np
import time
import multiprocessing
import matplotlib
matplotlib.use('GTKAgg')
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
style.use('ggplot')
import tkinter as tk
from tkinter import ttk
LARGE_FONT= ("Verdana", 12)
class PdoaApp(tk.Tk):
def __init__(self, data_feed=None):
tk.Tk.__init__(self) #, *args, **kwargs
#tk.Tk.iconbitmap(self, default="clienticon.ico")
tk.Tk.wm_title(self, "Pdoa visualisation")
#Create a queue to share data between process
q = multiprocessing.Queue()
#Create and start the datafeed process
self.input_data = multiprocessing.Process(None, input_thread, args=(q, data_feed,))
self.input_data.start()
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
frame = CirPlots(container, self, data_queue=q)
frame.grid(row=0, column=0, sticky="nsew")
frame.tkraise()
def close(self):
print("quitting")
self.input_data.terminate()
self.destroy()
exit(0)
class CirPlots(tk.Frame):
def __init__(self, parent, controller, data_queue=None):
self.idx = 0
self.cir_ymin = -1000
self.cir_ymax = 1000
self.tic = time.time()
self.pause = 0
self.pdoa_field = 'pd'
self.parent = parent;
tk.Frame.__init__(self, parent)
#
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.top_frame = tk.Frame(self, bg='cyan', pady=3)
self.top_frame.grid(row=0, sticky="ew")
self.top_frame.grid_rowconfigure(0, weight=1)
self.top_frame.grid_columnconfigure(1, weight=1)
quit_btn = tk.Button(self.top_frame, text="QUIT", fg="red",
command=controller.close)
quit_btn.grid(row=0, column=0)
reset_btn = tk.Button(self.top_frame, text="RESET", fg="black",
command=lambda: self.resetplot())
reset_btn.grid(row=0, column=1)
pd_toggle_btn = tk.Button(self.top_frame, text="PDSRC", fg="black",
command=lambda: self.toggle_pd_src())
pd_toggle_btn.grid(row=0, column=2)
pause_btn = tk.Button(self.top_frame, text="PAUSE", fg="black",
command=lambda: self.pauseplot())
pause_btn.grid(row=0, column=3)
self.queue_stats = tk.Label(self.top_frame, text="stats", font=("Verdana", 10))
self.queue_stats.grid(row=0, column=4, sticky="w")
self.center_frame = tk.Frame(self, bg='gray2', pady=1)
self.center_frame.grid(row=1, sticky="sw")
self.center_frame.grid_rowconfigure(0, weight=1)
self.center_frame.grid_columnconfigure(1, weight=1)
#self.center_frame.pack(fill=tk.BOTH, expand=True)
self.left_frame = tk.Frame(self.center_frame, bg='white', pady=3)
self.left_frame.grid(row=0, column=0, sticky="se")
self.fig = Figure(figsize=(6,6), dpi=100)
self.a0 = self.fig.add_subplot(211)
self.a0.set_xlabel("cir0")
self.a1 = self.fig.add_subplot(212)
self.a1.set_xlabel("cir1")
self.canvas = FigureCanvasTkAgg(self.fig, self.left_frame)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.line_0r, = self.a0.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'r')
self.line_0i, = self.a0.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'b')
self.line_0a, = self.a0.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'k', linewidth=2.0)
self.line_0fp, = self.a0.plot([0,0], [-100,100], 'g--', linewidth=2.0)
self.line_01fp, = self.a0.plot([5,5], [-100,100], 'b--', linewidth=2.0)
self.line_1r, = self.a1.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'r')
self.line_1i, = self.a1.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'b')
self.line_1a, = self.a1.plot(xrange(16), 8*[self.cir_ymin, self.cir_ymax], 'k', linewidth=2.0)
self.line_1fp, = self.a1.plot([0,0], [-100,100], 'g--', linewidth=2.0)
toolbar = NavigationToolbar2TkAgg(self.canvas, self.left_frame)
toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# RSSI
self.middle_frame = tk.Frame(self.center_frame, bg='white', pady=3)
self.middle_frame.grid(row=0, column=1, sticky="sw")
self.rssi_fig = Figure(figsize=(2,6), dpi=100)
self.rssi_a0 = self.rssi_fig.add_subplot(211)
self.rssi_a1 = self.rssi_fig.add_subplot(212, sharex=self.rssi_a0)
self.rssi_canvas = FigureCanvasTkAgg(self.rssi_fig, self.middle_frame)
self.rssi_canvas.show()
self.rssi_canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.rssi_stats = tk.Label(self.middle_frame, text="RSSI", font=("Verdana", 10))
self.rssi_stats.pack(pady=10,padx=10)
self.rssi_rssi0, = self.rssi_a0.plot(range(-110,75), len(range(-110,75))*[0], 'k', linewidth=1.0)
# PDOA
self.right_frame = tk.Frame(self.center_frame, bg='white', pady=3)
self.right_frame.grid(row=0, column=2, sticky="sw")
self.pdoa_fig = Figure(figsize=(4,6), dpi=100)
self.pdoa_ax = self.pdoa_fig.add_subplot(111)
self.pdoa_canvas = FigureCanvasTkAgg(self.pdoa_fig, self.right_frame)
self.pdoa_canvas.show()
self.pdoa_canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.pdoa_stats = tk.Label(self.right_frame, text="stats", font=("Verdana", 10))
self.pdoa_stats.pack(pady=10,padx=10)
self.resetplot()
self.updateplot(data_queue)
def resetplot(self):
self.pause=0
self.history=[]
def toggle_pd_src(self):
if (self.pdoa_field == 'pd'):
self.pdoa_field = 'adj_pd'
else:
self.pdoa_field = 'pd'
self.drawHistogram(None, self.pdoa_stats, self.pdoa_ax, self.pdoa_canvas, field=self.pdoa_field, max_hist=500)
def pauseplot(self):
self.pause=1
self.tic = time.time()
self.idx = 0
def calc_adjusted_pd(self, d):
try:
cir0 = d['cir0']
cir1 = d['cir1']
# Check if they aleady match
fp_idx0 = float(cir0['fp_idx'])
fp_idx1 = float(cir1['fp_idx'])
if (abs(fp_idx0-fp_idx1) < 0.5):
return d['pd']
# Use the first detected LDE
acc_idx0 = int(np.floor(fp_idx0 + 0.5))
acc_idx1 = int(np.floor(fp_idx1 + 0.5))
if (fp_idx0 < fp_idx1):
# Remap cir0's fp_index into cir1
print("0 fp0:{:.1f}, fp1:{:.1f}, adj: {:d}".format(fp_idx0, fp_idx1, acc_idx0 - acc_idx1))
acc_real = float(cir1['real'][8+acc_idx0-acc_idx1])
acc_imag = float(cir1['imag'][8+acc_idx0-acc_idx1])
angle1 = np.arctan2(acc_imag,acc_real)
rcphase1 = float(cir1['rcphase'])
return np.fmod(float(cir0['angle']) - float(cir0['rcphase']) - (angle1 - rcphase1) + 3*np.pi, 2*np.pi) - np.pi;
else:
# Remap cir1's fp_index into cir0
print("1 fp0:{:.1f}, fp1:{:.1f}, adj: {:d}".format(fp_idx0, fp_idx1, acc_idx0 - acc_idx1))
acc_real = float(cir0['real'][8+acc_idx1-acc_idx0])
acc_imag = float(cir0['imag'][8+acc_idx1-acc_idx0])
angle0 = np.arctan2(acc_imag,acc_real)
rcphase0 = float(cir0['rcphase'])
return np.fmod((angle0 - rcphase0) - (float(cir1['angle']) - float(cir1['rcphase'])) + 3*np.pi, 2*np.pi) - np.pi;
except:
return None
pass
def updateplot(self, q):
if self.pause == 1:
self.parent.after(10,self.updateplot,q)
return
try: #Try to check if there is data in the queue
result=q.get_nowait()
except:
self.parent.after(10,self.updateplot,q)
return
if result !='Q':
self.idx += 1
self.queue_stats['text']="Queue: {:3d} Rate:{:6.2f}".format(q.qsize(), self.idx/(time.time()-self.tic))
result['adj_pd'] = self.calc_adjusted_pd(result)
try:
self.history.append(result)
except:
self.history = [result]
# Limit the size of the history to 10000 values
if (len(self.history) > 10000):
self.history = self.history[-10000:]
div = 1
if (q.qsize()>10):
div = 10
if (self.idx%div==0):
fp_idx0 = 0
try:
cir = result['cir0']
self.line_0r.set_xdata(xrange(len(cir['real'])))
self.line_0r.set_ydata([float(x) for x in cir['real']])
self.line_0i.set_xdata(xrange(len(cir['imag'])))
self.line_0i.set_ydata([float(x) for x in cir['imag']])
ymin = np.min([float(x) for x in cir['real']])
if (ymin < self.cir_ymin and ymin > -65000):
self.cir_ymin = ymin
self.a0.set_ylim([self.cir_ymin, self.cir_ymax])
self.a1.set_ylim([self.cir_ymin, self.cir_ymax])
mag = [np.sqrt(float(x*x)+float(y*y)) for x,y in zip(cir['real'], cir['imag'])]
ymax = np.max(mag)
if (ymax > self.cir_ymax and ymax < 65000):
self.cir_ymax = ymax
self.a0.set_ylim([self.cir_ymin, self.cir_ymax])
self.a1.set_ylim([self.cir_ymin, self.cir_ymax])
self.line_0a.set_xdata(xrange(len(cir['real'])))
self.line_0a.set_ydata(mag)
try:
fp_idx = float(cir['fp_idx'])
fp_idx0 = fp_idx
rcphase = float(cir['rcphase'])
acc_idx = np.floor(fp_idx + 0.5)
acc_adj = fp_idx - acc_idx
self.line_0fp.set_xdata([8+acc_adj, 8+acc_adj])
self.line_0fp.set_ydata([0, 0.9*self.cir_ymax])
self.a0.set_xlabel("cir0 fp_idx:{:.2f} rcph:{:.1f}".format(fp_idx, rcphase*180.0/np.pi))
except:
pass
self.a0.draw_artist(self.line_0r)
self.a0.draw_artist(self.line_0i)
self.a0.draw_artist(self.line_0a)
self.a0.draw_artist(self.line_0fp)
self.canvas.draw()
except:
pass
try:
cir = result['cir1']
self.line_1r.set_xdata(xrange(len(cir['real'])))
self.line_1r.set_ydata([float(x) for x in cir['real']])
self.line_1i.set_xdata(xrange(len(cir['imag'])))
self.line_1i.set_ydata([float(x) for x in cir['imag']])
self.line_1a.set_xdata(xrange(len(cir['real'])))
mag = [np.sqrt(float(x*x)+float(y*y)) for x,y in zip(cir['real'], cir['imag'])]
self.line_1a.set_ydata(mag)
try:
fp_idx1 = float(cir['fp_idx'])
acc_idx = np.floor(fp_idx1 + 0.5)
acc_adj = fp_idx1 - acc_idx
self.line_1fp.set_xdata([8+acc_adj, 8+acc_adj])
self.line_1fp.set_ydata([0, 0.9*self.cir_ymax])
acc_adj = fp_idx0 - acc_idx
self.line_01fp.set_xdata([8-acc_adj, 8-acc_adj])
self.line_01fp.set_ydata([0, 0.9*self.cir_ymax])
self.a1.set_xlabel("cir1 fp_idx:{:.2f} rcph:{:.1f}".format(fp_idx1, rcphase*180.0/np.pi))
except:
pass
self.a1.draw_artist(self.line_1r)
self.a1.draw_artist(self.line_1i)
self.a1.draw_artist(self.line_1a)
self.canvas.draw()
except:
pass
if (q.qsize()>10):
if (self.idx%100==0):
self.drawHistogram(result, self.pdoa_stats, self.pdoa_ax, self.pdoa_canvas, field=self.pdoa_field, max_hist=500)
if (self.idx%50==0):
#self.drawHistogramMan(result, self.rssi_stats, self.rssi_rssi0, self.rssi_canvas, field='rssi0', max_hist=200)
self.drawHistogram(result, self.rssi_stats, self.rssi_a0, self.rssi_canvas, field='rssi0', max_hist=200)
self.drawHistogram(result, self.rssi_stats, self.rssi_a1, self.rssi_canvas, field='rssi1', max_hist=200)
self.parent.after(0, self.updateplot, q)
else:
if (self.idx%50==0):
self.drawHistogram(result, self.pdoa_stats, self.pdoa_ax, self.pdoa_canvas, field=self.pdoa_field, max_hist=500)
if (self.idx%10==0):
self.drawHistogram(result, self.rssi_stats, self.rssi_a0, self.rssi_canvas, field='rssi0', max_hist=200)
self.drawHistogram(result, self.rssi_stats, self.rssi_a1, self.rssi_canvas, field='rssi1', max_hist=200)
self.parent.after(10, self.updateplot, q)
else:
print('done')
def pdoa_filter(self, data, m=2):
a=np.array(data)
a=a[abs(a - np.mean(a)) < m * np.std(a)]
return a
def drawHistogram(self, d, stats_label, fig_axis, fig_canvas, field='pd', max_hist=None):
n_bins = 64
filter_m = 0
pdata = []
if max_hist == None:
h=self.history
else:
h=self.history[-max_hist:]
for x in h:
if x == None: continue
try:
if (field=='pd' or field=='adj_pd'):
pdata.append(float(x[field])*180.0/np.pi)
else:
pdata.append(float(x[field]))
except:
pass
if (filter_m):
pdata = self.pdoa_filter(pdata, m=filter_m)
if len(pdata) < 10: return
stats = "Hist({}):{:04d} average: {:.3f} stddev: {:.3f}".format(field, len(pdata), np.mean(pdata), np.std(pdata))
print(stats)
stats_label['text']=stats
fig_axis.cla()
fig_axis.set_xlabel(field)
fig_axis.hist(pdata, bins='auto', normed=0, rwidth=0.85)
fig_canvas.draw()
# self.pdoa_ax.title("Pdoa " + stats)
def drawHistogramMan(self, n, stats_label, fig_axis, fig_canvas, field='pd', max_hist=None):
filter_m = 0
# Add new data (last n)
for x in self.history[-n:]:
try:
self.hist_data[field]['y'][int(x[field])] += 1
except:
self.hist_data[field]['y'][int(x[field])] = 0*{ }
pdata = []
if max_hist == None:
h=self.history
else:
h=self.history[-max_hist:]
for x in h:
try:
if (field=='pd' or field=='adj_pd'):
pdata.append(float(x[field])*180.0/np.pi)
else:
pdata.append(float(x[field]))
except:
pass
if (filter_m):
pdata = self.pdoa_filter(pdata, m=filter_m)
if len(pdata) < 10: return
stats = "Hist({}):{:04d} average: {:.3f} stddev: {:.3f}".format(field, len(pdata), np.mean(pdata), np.std(pdata))
print(stats)
stats_label['text']=stats
fig_axis.cla()
fig_axis.set_xlabel(field)
fig_axis.hist(pdata, bins='auto', normed=0, rwidth=0.85)
fig_canvas.draw()
# self.pdoa_ax.title("Pdoa " + stats)
class ListenerData:
def __init__(self, socket_s=None, serial_s=None):
self.tcp_s = socket_s;
self.serial_s = serial_s;
def readlines_socket(self, sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.recv(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
def readlines_serial(self, sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.read(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
def readlines(self):
if self.tcp_s != None:
return self.readlines_socket(self.tcp_s)
else:
return self.readlines_serial(self.serial_s)
def input_thread(q, data=None):
tic = time.time()
if not data: print("No input data")
for line in data.readlines():
try:
d=json.loads(line)
if (q.qsize()<100):
q.put(d)
except:
continue
# print(d)
q.put('Q')
if __name__ == '__main__':
TCP_IP = '127.0.0.1'
TCP_PORT = 19021
BUFFER_SIZE = 1024
parser = argparse.ArgumentParser()
parser.add_argument('connections', metavar='connection[s]', nargs='+', help='serial device or tcp-port (for rtt)')
parser.add_argument('-b',metavar='baudrate', type=int,dest='baudrate',default=460800,help='baudrate')
args = parser.parse_args()
if (len(args.connections)<1):
print("Defaulting to RTT using port 19021")
args.conns += "19021"
#exit(2)
indata = None
for conn in args.connections:
# Check for tcp port
try:
c = conn.split(':')
ip = c[0]
port = int(c[1])
print("TCP connection to {:s} port {:d}".format(ip,port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
indata = ListenerData(socket_s=s)
# run(socket_s=s, doblit=False)
break;
except ValueError:
# Fallback to trying as a serial port
try:
s = serial.Serial(conn,baudrate=args.baudrate, timeout=1)
print('Serial port opened:' + s.name)
s.flush()
indata = ListenerData(serial_s=s)
# run(socket_s=s, doblit=False)
break;
# run(serial_s=s, doblit=False)
except serial.serialutil.SerialException:
print('Could not open ' + self.s_devname)
exit(2)
if (not indata): exit(2)
app = PdoaApp(data_feed=indata)
app.mainloop() | 0.373533 | 0.15662 |
import shutil
import ipywidgets
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import clear_output
from ipywidgets import Button
from PIL import Image
from typing import List, Optional
from cocpit.auto_str import auto_str
import cocpit
plt_params = {
"axes.labelsize": "xx-large",
"axes.titlesize": "xx-large",
"xtick.labelsize": "xx-large",
"ytick.labelsize": "xx-large",
"legend.title_fontsize": 12,
}
plt.rcParams["font.family"] = "serif"
plt.rcParams.update(plt_params)
@auto_str
class GUI:
"""
- ipywidget buttons to label incorrect predictions from a dataloader.
- The dataloader, model, and all class variables are initialized in notebooks/move_wrong_predictions.ipynb
Args:
wrong_trunc (List[int]): indices where the model predictions are wrong
labels (np.ndarray[int]): image labels
paths (np.ndarray[str]): image paths
topk_props (np.ndarray[float]): top predicted probabilites
topk_classes (np.ndarray[int]): classes related to the top predicted probabilites
"""
def __init__(
self,
wrong_trunc: List[int],
labels: np.ndarray,
paths: np.ndarray,
topk_probs: np.ndarray,
topk_classes: np.ndarray,
):
self.index = 0
self.labels = np.array(labels)[wrong_trunc]
self.paths = np.array(paths)[wrong_trunc]
self.topk_probs = np.array(topk_probs)[wrong_trunc]
self.topk_classes = np.array(topk_classes)[wrong_trunc]
self.label = np.array(self.labels)[self.index]
self.next_btn = Button(
description="Next",
style=dict(
font_style="italic",
font_weight="bold",
font_variant="small-caps",
),
)
self.buttons = []
self.count = 0 # number of moved images
self.center = ipywidgets.Output() # center image with predictions
def open_image(self) -> Optional[Image.Image]:
"""
Open an image from a path at a given index
Returns:
Union[Image.Image, None]: opened PIL image or None if no image is opened
Raises:
FileNotFoundError: File already moved and cannot be opened
"""
try:
return Image.open(self.paths[self.index])
except FileNotFoundError:
print("This file cannot be found.")
def make_buttons(self) -> None:
"""Make buttons for each category"""
for idx, label in enumerate(cocpit.config.CLASS_NAMES):
self.buttons.append(
Button(
description=label,
)
)
self.buttons[idx].on_click(self.save_image)
self.next_btn.on_click(self.on_button_next)
def on_button_next(self, b) -> None:
"""
When the next button is clicked, make a new image and bar chart appear
by updating the index within the wrong predictions by 1
"""
self.index = self.index + 1
self.visualizations()
def align_buttons(self):
"""
Alter layout based on # of classes
"""
with self.center:
if len(cocpit.config.CLASS_NAMES) > 5:
# align buttons vertically
self.label_btns = ipywidgets.VBox(
[self.buttons[i] for i in range(len(cocpit.config.CLASS_NAMES))]
)
else:
# align buttons horizontally
self.label_btns = ipywidgets.HBox(
[self.buttons[i] for i in range(len(cocpit.config.CLASS_NAMES))],
)
def init_fig(self, image: Image.Image, ax1: plt.Axes) -> None:
"""
Display the raw image
Args:
image (Image.Image): opened image
ax1 (plt.Axes): subplot axis
"""
clear_output() # so that the next fig doesnt display below
ax1.imshow(image, aspect="auto")
ax1.set_title(
f"Human Labeled as: {cocpit.config.CLASS_NAMES[self.labels[self.index]]}\n"
f"Model Labeled as: {[cocpit.config.CLASS_NAMES[e] for e in self.topk_classes[self.index]][0]}\n"
)
ax1.axis("off")
def bar_chart(self, ax2) -> None:
"""
Create barchart that outputs top k predictions for a given image
Args:
ax2 (plt.Axes): subplot axis
"""
y_pos = np.arange(len(self.topk_probs[self.index]))
ax2.barh(y_pos, self.topk_probs[self.index])
ax2.set_yticks(y_pos)
ax2.set_yticklabels(
[cocpit.config.CLASS_NAMES[e] for e in self.topk_classes[self.index]]
)
ax2.tick_params(axis="y", rotation=45)
ax2.invert_yaxis() # labels read top-to-bottom
ax2.set_title("Class Probability")
def plot_saliency(self, image: Image.Image, ax2: plt.Axes, size: int = 224) -> None:
"""Create saliency map for image in test dataset
Args:
image (PIL.Image.Image): opened image
ax2 (plt.Axes): subplot axis
size (int): image size for transformation
"""
image = cocpit.plotting_scripts.saliency.preprocess(image.convert("RGB"), size)
saliency, _, _ = cocpit.plotting_scripts.saliency.get_saliency(image)
ax2.imshow(saliency[0], cmap=plt.cm.hot, aspect="auto")
ax2.axes.xaxis.set_ticks([])
ax2.axes.yaxis.set_ticks([])
def save_image(self, b) -> None:
"""
Move the image based on dropdown selection
Args:
b: button instance
"""
filename = self.paths[self.index].split("/")[-1]
try:
shutil.move(
f"{cocpit.config.DATA_DIR}{cocpit.config.CLASS_NAME_MAP[cocpit.config.CLASS_NAMES[self.all_labels[self.index]]]}/{filename}",
f"{cocpit.config.DATA_DIR}{cocpit.config.CLASS_NAME_MAP[b.description]}/{filename}",
)
self.count += 1
print(f"moved {self.count} images")
except FileNotFoundError:
print(self.paths[self.index])
print("File not found or directory does not exist. Not moving.")
def visualizations(self) -> None:
"""
Use the human and model labels and classes to
create a bar chart with the top k predictions
from the image at the current index
"""
# add chart to ipywidgets.Output()
with self.center:
if self.index == len(self.topk_probs):
print("You have completed looking at all incorrect predictions!")
return
else:
image = self.open_image()
_, (ax1, ax2, ax3) = plt.subplots(
constrained_layout=True, figsize=(19, 5), ncols=3, nrows=1
)
if image:
self.init_fig(image, ax1)
self.plot_saliency(image, ax2)
self.bar_chart(ax3)
plt.show()
# fig.savefig(f"/ai2es/plots/wrong_preds{self.index}.pdf") | cocpit/gui_wrong.py | import shutil
import ipywidgets
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import clear_output
from ipywidgets import Button
from PIL import Image
from typing import List, Optional
from cocpit.auto_str import auto_str
import cocpit
plt_params = {
"axes.labelsize": "xx-large",
"axes.titlesize": "xx-large",
"xtick.labelsize": "xx-large",
"ytick.labelsize": "xx-large",
"legend.title_fontsize": 12,
}
plt.rcParams["font.family"] = "serif"
plt.rcParams.update(plt_params)
@auto_str
class GUI:
"""
- ipywidget buttons to label incorrect predictions from a dataloader.
- The dataloader, model, and all class variables are initialized in notebooks/move_wrong_predictions.ipynb
Args:
wrong_trunc (List[int]): indices where the model predictions are wrong
labels (np.ndarray[int]): image labels
paths (np.ndarray[str]): image paths
topk_props (np.ndarray[float]): top predicted probabilites
topk_classes (np.ndarray[int]): classes related to the top predicted probabilites
"""
def __init__(
self,
wrong_trunc: List[int],
labels: np.ndarray,
paths: np.ndarray,
topk_probs: np.ndarray,
topk_classes: np.ndarray,
):
self.index = 0
self.labels = np.array(labels)[wrong_trunc]
self.paths = np.array(paths)[wrong_trunc]
self.topk_probs = np.array(topk_probs)[wrong_trunc]
self.topk_classes = np.array(topk_classes)[wrong_trunc]
self.label = np.array(self.labels)[self.index]
self.next_btn = Button(
description="Next",
style=dict(
font_style="italic",
font_weight="bold",
font_variant="small-caps",
),
)
self.buttons = []
self.count = 0 # number of moved images
self.center = ipywidgets.Output() # center image with predictions
def open_image(self) -> Optional[Image.Image]:
"""
Open an image from a path at a given index
Returns:
Union[Image.Image, None]: opened PIL image or None if no image is opened
Raises:
FileNotFoundError: File already moved and cannot be opened
"""
try:
return Image.open(self.paths[self.index])
except FileNotFoundError:
print("This file cannot be found.")
def make_buttons(self) -> None:
"""Make buttons for each category"""
for idx, label in enumerate(cocpit.config.CLASS_NAMES):
self.buttons.append(
Button(
description=label,
)
)
self.buttons[idx].on_click(self.save_image)
self.next_btn.on_click(self.on_button_next)
def on_button_next(self, b) -> None:
"""
When the next button is clicked, make a new image and bar chart appear
by updating the index within the wrong predictions by 1
"""
self.index = self.index + 1
self.visualizations()
def align_buttons(self):
"""
Alter layout based on # of classes
"""
with self.center:
if len(cocpit.config.CLASS_NAMES) > 5:
# align buttons vertically
self.label_btns = ipywidgets.VBox(
[self.buttons[i] for i in range(len(cocpit.config.CLASS_NAMES))]
)
else:
# align buttons horizontally
self.label_btns = ipywidgets.HBox(
[self.buttons[i] for i in range(len(cocpit.config.CLASS_NAMES))],
)
def init_fig(self, image: Image.Image, ax1: plt.Axes) -> None:
"""
Display the raw image
Args:
image (Image.Image): opened image
ax1 (plt.Axes): subplot axis
"""
clear_output() # so that the next fig doesnt display below
ax1.imshow(image, aspect="auto")
ax1.set_title(
f"Human Labeled as: {cocpit.config.CLASS_NAMES[self.labels[self.index]]}\n"
f"Model Labeled as: {[cocpit.config.CLASS_NAMES[e] for e in self.topk_classes[self.index]][0]}\n"
)
ax1.axis("off")
def bar_chart(self, ax2) -> None:
"""
Create barchart that outputs top k predictions for a given image
Args:
ax2 (plt.Axes): subplot axis
"""
y_pos = np.arange(len(self.topk_probs[self.index]))
ax2.barh(y_pos, self.topk_probs[self.index])
ax2.set_yticks(y_pos)
ax2.set_yticklabels(
[cocpit.config.CLASS_NAMES[e] for e in self.topk_classes[self.index]]
)
ax2.tick_params(axis="y", rotation=45)
ax2.invert_yaxis() # labels read top-to-bottom
ax2.set_title("Class Probability")
def plot_saliency(self, image: Image.Image, ax2: plt.Axes, size: int = 224) -> None:
"""Create saliency map for image in test dataset
Args:
image (PIL.Image.Image): opened image
ax2 (plt.Axes): subplot axis
size (int): image size for transformation
"""
image = cocpit.plotting_scripts.saliency.preprocess(image.convert("RGB"), size)
saliency, _, _ = cocpit.plotting_scripts.saliency.get_saliency(image)
ax2.imshow(saliency[0], cmap=plt.cm.hot, aspect="auto")
ax2.axes.xaxis.set_ticks([])
ax2.axes.yaxis.set_ticks([])
def save_image(self, b) -> None:
"""
Move the image based on dropdown selection
Args:
b: button instance
"""
filename = self.paths[self.index].split("/")[-1]
try:
shutil.move(
f"{cocpit.config.DATA_DIR}{cocpit.config.CLASS_NAME_MAP[cocpit.config.CLASS_NAMES[self.all_labels[self.index]]]}/{filename}",
f"{cocpit.config.DATA_DIR}{cocpit.config.CLASS_NAME_MAP[b.description]}/{filename}",
)
self.count += 1
print(f"moved {self.count} images")
except FileNotFoundError:
print(self.paths[self.index])
print("File not found or directory does not exist. Not moving.")
def visualizations(self) -> None:
"""
Use the human and model labels and classes to
create a bar chart with the top k predictions
from the image at the current index
"""
# add chart to ipywidgets.Output()
with self.center:
if self.index == len(self.topk_probs):
print("You have completed looking at all incorrect predictions!")
return
else:
image = self.open_image()
_, (ax1, ax2, ax3) = plt.subplots(
constrained_layout=True, figsize=(19, 5), ncols=3, nrows=1
)
if image:
self.init_fig(image, ax1)
self.plot_saliency(image, ax2)
self.bar_chart(ax3)
plt.show()
# fig.savefig(f"/ai2es/plots/wrong_preds{self.index}.pdf") | 0.810104 | 0.517937 |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoderModel,
FairseqEncoderDecoderModel,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import S2TTransformerEncoder
from fairseq.models.speech_to_speech.modules import CTCDecoder, StackedEmbedding
from fairseq.models.text_to_speech import TTSTransformerDecoder
from fairseq.models.transformer import (
Linear,
TransformerDecoder,
TransformerModelBase,
)
logger = logging.getLogger(__name__)
class S2STransformerEncoder(S2TTransformerEncoder):
"""Based on S2T transformer encoder, with support
to incorporate target speaker embedding."""
def __init__(self, args):
super().__init__(args)
self.spk_emb_proj = None
if args.target_speaker_embed:
self.spk_emb_proj = Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
def forward(
self, src_tokens, src_lengths, tgt_speaker=None, return_all_hiddens=False
):
out = super().forward(src_tokens, src_lengths, return_all_hiddens)
if self.spk_emb_proj:
x = out["encoder_out"][0]
seq_len, bsz, _ = x.size()
tgt_speaker_emb = tgt_speaker.view(1, bsz, -1).expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, tgt_speaker_emb], dim=2))
out["encoder_out"][0] = x
return out
class TransformerUnitDecoder(TransformerDecoder):
"""Based on Transformer decoder, with support to decoding stacked units"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn, output_projection
)
self.n_frames_per_step = args.n_frames_per_step
self.out_proj_n_frames = (
Linear(
self.output_embed_dim,
self.output_embed_dim * self.n_frames_per_step,
bias=False,
)
if self.n_frames_per_step > 1
else None
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
bsz, seq_len, d = x.size()
if self.out_proj_n_frames:
x = self.out_proj_n_frames(x)
x = self.output_layer(x.view(bsz, seq_len, self.n_frames_per_step, d))
x = x.view(bsz, seq_len * self.n_frames_per_step, -1)
if (
incremental_state is None and self.n_frames_per_step > 1
): # teacher-forcing mode in training
x = x[
:, : -(self.n_frames_per_step - 1), :
] # remove extra frames after <eos>
return x, extra
def upgrade_state_dict_named(self, state_dict, name):
if self.n_frames_per_step > 1:
move_keys = [
(
f"{name}.project_in_dim.weight",
f"{name}.embed_tokens.project_in_dim.weight",
)
]
for from_k, to_k in move_keys:
if from_k in state_dict and to_k not in state_dict:
state_dict[to_k] = state_dict[from_k]
del state_dict[from_k]
class S2STransformerMultitaskModelBase(FairseqEncoderDecoderModel):
@classmethod
def build_encoder(cls, args):
encoder = S2STransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_multitask_decoder(cls, args, tgt_dict, in_dim):
decoder_args = args.decoder_args
decoder_args.encoder_embed_dim = in_dim
if args.decoder_type == "transformer":
base_multitask_text_transformer_decoder_arch(decoder_args)
task_decoder = TransformerDecoder(
decoder_args,
tgt_dict,
embed_tokens=TransformerModelBase.build_embedding(
decoder_args,
tgt_dict,
decoder_args.decoder_embed_dim,
),
)
elif args.decoder_type == "ctc":
task_decoder = CTCDecoder(
dictionary=tgt_dict,
in_dim=in_dim,
)
else:
raise NotImplementedError(
"currently only support multitask decoder_type 'transformer', 'ctc'"
)
return task_decoder
@classmethod
def build_model(cls, args, task):
encoder = cls.build_encoder(args)
decoder = (
cls.build_decoder(args, task.target_dictionary)
if task.args.target_is_code
else cls.build_decoder(args)
)
base_model = cls(encoder, decoder)
# set up multitask decoders
base_model.multitask_decoders = {}
for task_name, task_obj in task.multitask_tasks.items():
in_dim = (
args.encoder_embed_dim
if task_obj.args.input_from == "encoder"
else args.decoder_embed_dim
)
task_decoder = cls.build_multitask_decoder(
task_obj.args, task_obj.target_dictionary, in_dim
)
setattr(base_model, f"{task_name}_decoder", task_decoder)
decoder_model_cls = (
FairseqEncoderModel
if task_obj.args.decoder_type == "ctc"
else FairseqLanguageModel
)
base_model.multitask_decoders[task_name] = decoder_model_cls(
getattr(base_model, f"{task_name}_decoder")
)
return base_model
def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):
return self.encoder(
src_tokens, src_lengths=src_lengths, tgt_speaker=speaker, **kwargs
)
@register_model("s2ut_transformer")
class S2UTTransformerModel(S2STransformerMultitaskModelBase):
"""
Direct speech-to-speech translation model with S2T Transformer encoder + Transformer discrete unit decoder
https://arxiv.org/abs/2107.05604
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
@classmethod
def build_decoder(cls, args, tgt_dict):
num_embeddings = len(tgt_dict)
padding_idx = tgt_dict.pad()
embed_tokens = StackedEmbedding(
num_embeddings,
args.decoder_embed_dim,
padding_idx,
num_stacked=args.n_frames_per_step,
)
return TransformerUnitDecoder(
args,
tgt_dict,
embed_tokens,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
@register_model("s2spect_transformer")
class S2SpecTTransformerModel(S2STransformerMultitaskModelBase):
"""
Speech-to-spectrogram model with S2T Transformer encoder + TTS Transformer decoder
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
# decoder
parser.add_argument("--output-frame-dim", type=int)
# decoder prenet
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
# decoder postnet
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# decoder transformer layers
parser.add_argument("--decoder-transformer-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-ffn-embed-dim", type=int)
parser.add_argument("--decoder-normalize-before", action="store_true")
parser.add_argument("--decoder-attention-heads", type=int)
@classmethod
def build_decoder(cls, args):
return TTSTransformerDecoder(args, None, padding_idx=1)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
incremental_state=None,
target_lengths=None,
speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=target_lengths,
speaker=speaker,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
def base_multitask_text_transformer_decoder_arch(args):
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
# decoder layer
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
def base_s2st_transformer_encoder_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
@register_model_architecture(
model_name="s2ut_transformer", arch_name="s2ut_transformer"
)
def s2ut_architecture_base(args):
base_s2st_transformer_encoder_architecture(args)
# decoder
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
@register_model_architecture("s2ut_transformer", "s2ut_transformer_fisher")
def s2ut_architecture_fisher(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
s2ut_architecture_base(args)
@register_model_architecture(
model_name="s2spect_transformer", arch_name="s2spect_transformer"
)
def s2spect_architecture_base(args):
base_s2st_transformer_encoder_architecture(args)
# decoder
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
# decoder prenet
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
# decoder postnet
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# decoder transformer layers
args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
@register_model_architecture("s2spect_transformer", "s2spect_transformer_fisher")
def s2spect_architecture_fisher(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
# decoder
args.prenet_dim = getattr(args, "prenet_dim", 32)
s2spect_architecture_base(args) | fairseq/models/speech_to_speech/s2s_transformer.py |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoderModel,
FairseqEncoderDecoderModel,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import S2TTransformerEncoder
from fairseq.models.speech_to_speech.modules import CTCDecoder, StackedEmbedding
from fairseq.models.text_to_speech import TTSTransformerDecoder
from fairseq.models.transformer import (
Linear,
TransformerDecoder,
TransformerModelBase,
)
logger = logging.getLogger(__name__)
class S2STransformerEncoder(S2TTransformerEncoder):
"""Based on S2T transformer encoder, with support
to incorporate target speaker embedding."""
def __init__(self, args):
super().__init__(args)
self.spk_emb_proj = None
if args.target_speaker_embed:
self.spk_emb_proj = Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
def forward(
self, src_tokens, src_lengths, tgt_speaker=None, return_all_hiddens=False
):
out = super().forward(src_tokens, src_lengths, return_all_hiddens)
if self.spk_emb_proj:
x = out["encoder_out"][0]
seq_len, bsz, _ = x.size()
tgt_speaker_emb = tgt_speaker.view(1, bsz, -1).expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, tgt_speaker_emb], dim=2))
out["encoder_out"][0] = x
return out
class TransformerUnitDecoder(TransformerDecoder):
"""Based on Transformer decoder, with support to decoding stacked units"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn, output_projection
)
self.n_frames_per_step = args.n_frames_per_step
self.out_proj_n_frames = (
Linear(
self.output_embed_dim,
self.output_embed_dim * self.n_frames_per_step,
bias=False,
)
if self.n_frames_per_step > 1
else None
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
bsz, seq_len, d = x.size()
if self.out_proj_n_frames:
x = self.out_proj_n_frames(x)
x = self.output_layer(x.view(bsz, seq_len, self.n_frames_per_step, d))
x = x.view(bsz, seq_len * self.n_frames_per_step, -1)
if (
incremental_state is None and self.n_frames_per_step > 1
): # teacher-forcing mode in training
x = x[
:, : -(self.n_frames_per_step - 1), :
] # remove extra frames after <eos>
return x, extra
def upgrade_state_dict_named(self, state_dict, name):
if self.n_frames_per_step > 1:
move_keys = [
(
f"{name}.project_in_dim.weight",
f"{name}.embed_tokens.project_in_dim.weight",
)
]
for from_k, to_k in move_keys:
if from_k in state_dict and to_k not in state_dict:
state_dict[to_k] = state_dict[from_k]
del state_dict[from_k]
class S2STransformerMultitaskModelBase(FairseqEncoderDecoderModel):
@classmethod
def build_encoder(cls, args):
encoder = S2STransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_multitask_decoder(cls, args, tgt_dict, in_dim):
decoder_args = args.decoder_args
decoder_args.encoder_embed_dim = in_dim
if args.decoder_type == "transformer":
base_multitask_text_transformer_decoder_arch(decoder_args)
task_decoder = TransformerDecoder(
decoder_args,
tgt_dict,
embed_tokens=TransformerModelBase.build_embedding(
decoder_args,
tgt_dict,
decoder_args.decoder_embed_dim,
),
)
elif args.decoder_type == "ctc":
task_decoder = CTCDecoder(
dictionary=tgt_dict,
in_dim=in_dim,
)
else:
raise NotImplementedError(
"currently only support multitask decoder_type 'transformer', 'ctc'"
)
return task_decoder
@classmethod
def build_model(cls, args, task):
encoder = cls.build_encoder(args)
decoder = (
cls.build_decoder(args, task.target_dictionary)
if task.args.target_is_code
else cls.build_decoder(args)
)
base_model = cls(encoder, decoder)
# set up multitask decoders
base_model.multitask_decoders = {}
for task_name, task_obj in task.multitask_tasks.items():
in_dim = (
args.encoder_embed_dim
if task_obj.args.input_from == "encoder"
else args.decoder_embed_dim
)
task_decoder = cls.build_multitask_decoder(
task_obj.args, task_obj.target_dictionary, in_dim
)
setattr(base_model, f"{task_name}_decoder", task_decoder)
decoder_model_cls = (
FairseqEncoderModel
if task_obj.args.decoder_type == "ctc"
else FairseqLanguageModel
)
base_model.multitask_decoders[task_name] = decoder_model_cls(
getattr(base_model, f"{task_name}_decoder")
)
return base_model
def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):
return self.encoder(
src_tokens, src_lengths=src_lengths, tgt_speaker=speaker, **kwargs
)
@register_model("s2ut_transformer")
class S2UTTransformerModel(S2STransformerMultitaskModelBase):
"""
Direct speech-to-speech translation model with S2T Transformer encoder + Transformer discrete unit decoder
https://arxiv.org/abs/2107.05604
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
@classmethod
def build_decoder(cls, args, tgt_dict):
num_embeddings = len(tgt_dict)
padding_idx = tgt_dict.pad()
embed_tokens = StackedEmbedding(
num_embeddings,
args.decoder_embed_dim,
padding_idx,
num_stacked=args.n_frames_per_step,
)
return TransformerUnitDecoder(
args,
tgt_dict,
embed_tokens,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
@register_model("s2spect_transformer")
class S2SpecTTransformerModel(S2STransformerMultitaskModelBase):
"""
Speech-to-spectrogram model with S2T Transformer encoder + TTS Transformer decoder
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
# decoder
parser.add_argument("--output-frame-dim", type=int)
# decoder prenet
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
# decoder postnet
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# decoder transformer layers
parser.add_argument("--decoder-transformer-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-ffn-embed-dim", type=int)
parser.add_argument("--decoder-normalize-before", action="store_true")
parser.add_argument("--decoder-attention-heads", type=int)
@classmethod
def build_decoder(cls, args):
return TTSTransformerDecoder(args, None, padding_idx=1)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
incremental_state=None,
target_lengths=None,
speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=target_lengths,
speaker=speaker,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
def base_multitask_text_transformer_decoder_arch(args):
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
# decoder layer
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
def base_s2st_transformer_encoder_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
@register_model_architecture(
model_name="s2ut_transformer", arch_name="s2ut_transformer"
)
def s2ut_architecture_base(args):
base_s2st_transformer_encoder_architecture(args)
# decoder
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
@register_model_architecture("s2ut_transformer", "s2ut_transformer_fisher")
def s2ut_architecture_fisher(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
s2ut_architecture_base(args)
@register_model_architecture(
model_name="s2spect_transformer", arch_name="s2spect_transformer"
)
def s2spect_architecture_base(args):
base_s2st_transformer_encoder_architecture(args)
# decoder
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
# decoder prenet
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
# decoder postnet
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# decoder transformer layers
args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
@register_model_architecture("s2spect_transformer", "s2spect_transformer_fisher")
def s2spect_architecture_fisher(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
# decoder
args.prenet_dim = getattr(args, "prenet_dim", 32)
s2spect_architecture_base(args) | 0.945889 | 0.218242 |
import logging
import socket
import time
import picamera
from platypush.backend import Backend
class CameraPiBackend(Backend):
def __init__(self, listen_port, x_resolution=640, y_resolution=480,
framerate=24, hflip=False, vflip=False,
sharpness=0, contrast=0, brightness=50,
video_stabilization=False, ISO=0, exposure_compensation=0,
exposure_mode='auto', meter_mode='average', awb_mode='auto',
image_effect='none', color_effects=None, rotation=0,
crop=(0.0, 0.0, 1.0, 1.0), **kwargs):
""" See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options """
super().__init__(**kwargs)
self.listen_port = listen_port
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', self.listen_port))
self.server_socket.listen(0)
self.camera = picamera.PiCamera()
self.camera.resolution = (x_resolution, y_resolution)
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.sharpness = sharpness
self.camera.contrast = contrast
self.camera.brightness = brightness
self.camera.video_stabilization = video_stabilization
self.camera.ISO = ISO
self.camera.exposure_compensation = exposure_compensation
self.camera.exposure_mode = exposure_mode
self.camera.meter_mode = meter_mode
self.camera.awb_mode = awb_mode
self.camera.image_effect = image_effect
self.camera.color_effects = color_effects
self.camera.rotation = rotation
self.camera.crop = crop
logging.info('Initialized Pi camera backend')
def send_message(self, msg):
pass
def run(self):
super().run()
while True:
connection = self.server_socket.accept()[0].makefile('wb')
try:
self.camera.start_recording(connection, format='h264')
while True:
self.camera.wait_recording(60)
except ConnectionError as e:
pass
finally:
try:
self.camera.stop_recording()
connection.close()
except:
pass
# vim:sw=4:ts=4:et: | platypush/backend/camera/pi.py | import logging
import socket
import time
import picamera
from platypush.backend import Backend
class CameraPiBackend(Backend):
def __init__(self, listen_port, x_resolution=640, y_resolution=480,
framerate=24, hflip=False, vflip=False,
sharpness=0, contrast=0, brightness=50,
video_stabilization=False, ISO=0, exposure_compensation=0,
exposure_mode='auto', meter_mode='average', awb_mode='auto',
image_effect='none', color_effects=None, rotation=0,
crop=(0.0, 0.0, 1.0, 1.0), **kwargs):
""" See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options """
super().__init__(**kwargs)
self.listen_port = listen_port
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', self.listen_port))
self.server_socket.listen(0)
self.camera = picamera.PiCamera()
self.camera.resolution = (x_resolution, y_resolution)
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.sharpness = sharpness
self.camera.contrast = contrast
self.camera.brightness = brightness
self.camera.video_stabilization = video_stabilization
self.camera.ISO = ISO
self.camera.exposure_compensation = exposure_compensation
self.camera.exposure_mode = exposure_mode
self.camera.meter_mode = meter_mode
self.camera.awb_mode = awb_mode
self.camera.image_effect = image_effect
self.camera.color_effects = color_effects
self.camera.rotation = rotation
self.camera.crop = crop
logging.info('Initialized Pi camera backend')
def send_message(self, msg):
pass
def run(self):
super().run()
while True:
connection = self.server_socket.accept()[0].makefile('wb')
try:
self.camera.start_recording(connection, format='h264')
while True:
self.camera.wait_recording(60)
except ConnectionError as e:
pass
finally:
try:
self.camera.stop_recording()
connection.close()
except:
pass
# vim:sw=4:ts=4:et: | 0.731251 | 0.118947 |
import random
from os.path import realpath
import aiohttp
from aiohttp import client_exceptions
class UnableToFetchCarbon(Exception):
pass
themes = [
"3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"duotone-dark",
"dracula-pro",
"hopscotch",
"lucario",
"material",
"monokai",
"nightowl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"parasio-dark",
"seti",
"shades-of-purple",
"solarized+dark",
"solarized+light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
colour = [
"#FF0000",
"#FF5733",
"#FFFF00",
"#008000",
"#0000FF",
"#800080",
"#A52A2A",
"#FF00FF",
"#D2B48C",
"#00FFFF",
"#808000",
"#800000",
"#00FFFF",
"#30D5C8",
"#00FF00",
"#008080",
"#4B0082",
"#EE82EE",
"#FFC0CB",
"#000000",
"#FFFFFF",
"#808080",
]
class CarbonAPI:
def __init__(self):
self.language = "auto"
self.drop_shadow = True
self.drop_shadow_blur = "68px"
self.drop_shadow_offset = "20px"
self.font_family = "JetBrains Mono"
self.width_adjustment = True
self.watermark = False
async def generate(self, text: str, user_id):
async with aiohttp.ClientSession(
headers={"Content-Type": "application/json"},
) as ses:
params = {
"code": text,
}
params["backgroundColor"] = random.choice(colour)
params["theme"] = random.choice(themes)
params["dropShadow"] = self.drop_shadow
params["dropShadowOffsetY"] = self.drop_shadow_offset
params["dropShadowBlurRadius"] = self.drop_shadow_blur
params["fontFamily"] = self.font_family
params["language"] = self.language
params["watermark"] = self.watermark
params["widthAdjustment"] = self.width_adjustment
try:
request = await ses.post(
"https://carbonara.vercel.app/api/cook",
json=params,
)
except client_exceptions.ClientConnectorError:
raise UnableToFetchCarbon("Can not reach the Host!")
resp = await request.read()
with open(f"cache/carbon{user_id}.jpg", "wb") as f:
f.write(resp)
return realpath(f.name) | YukkiMusic/platforms/Carbon.py |
import random
from os.path import realpath
import aiohttp
from aiohttp import client_exceptions
class UnableToFetchCarbon(Exception):
pass
themes = [
"3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"duotone-dark",
"dracula-pro",
"hopscotch",
"lucario",
"material",
"monokai",
"nightowl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"parasio-dark",
"seti",
"shades-of-purple",
"solarized+dark",
"solarized+light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
colour = [
"#FF0000",
"#FF5733",
"#FFFF00",
"#008000",
"#0000FF",
"#800080",
"#A52A2A",
"#FF00FF",
"#D2B48C",
"#00FFFF",
"#808000",
"#800000",
"#00FFFF",
"#30D5C8",
"#00FF00",
"#008080",
"#4B0082",
"#EE82EE",
"#FFC0CB",
"#000000",
"#FFFFFF",
"#808080",
]
class CarbonAPI:
def __init__(self):
self.language = "auto"
self.drop_shadow = True
self.drop_shadow_blur = "68px"
self.drop_shadow_offset = "20px"
self.font_family = "JetBrains Mono"
self.width_adjustment = True
self.watermark = False
async def generate(self, text: str, user_id):
async with aiohttp.ClientSession(
headers={"Content-Type": "application/json"},
) as ses:
params = {
"code": text,
}
params["backgroundColor"] = random.choice(colour)
params["theme"] = random.choice(themes)
params["dropShadow"] = self.drop_shadow
params["dropShadowOffsetY"] = self.drop_shadow_offset
params["dropShadowBlurRadius"] = self.drop_shadow_blur
params["fontFamily"] = self.font_family
params["language"] = self.language
params["watermark"] = self.watermark
params["widthAdjustment"] = self.width_adjustment
try:
request = await ses.post(
"https://carbonara.vercel.app/api/cook",
json=params,
)
except client_exceptions.ClientConnectorError:
raise UnableToFetchCarbon("Can not reach the Host!")
resp = await request.read()
with open(f"cache/carbon{user_id}.jpg", "wb") as f:
f.write(resp)
return realpath(f.name) | 0.36557 | 0.183283 |
import logging
from os import environ
import pandas as pd
import kaiko.utils as ut
try:
from cStringIO import StringIO # Python 2
except ImportError:
from io import StringIO
# Base URLs
_BASE_URL_KAIKO_US = 'https://us.market-api.kaiko.io/'
_BASE_URL_KAIKO_EU = 'https://eu.market-api.kaiko.io/'
_BASE_URL_RAPIDAPI = 'https://kaiko-cryptocurrency-market-data.p.rapidapi.com/' # Not supported yet
_BASE_URLS = dict(us=_BASE_URL_KAIKO_US, eu=_BASE_URL_KAIKO_EU, rapidapi=_BASE_URL_RAPIDAPI)
# API endpoints
_URL_REFERENCE_DATA_API = 'https://reference-data-api.kaiko.io/v1/'
_URL_HISTORICAL_TRADES = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}/{instrument}' \
'/trades'
_URL_ORDER_BOOK_FULL = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}/{instrument}' \
'/snapshots/full'
_URL_ORDER_BOOK_AGGREGATIONS_FULL = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}' \
'/{instrument}/ob_aggregations/full'
_URL_CANDLES = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}/{instrument}/aggregations' \
'/count_ohlcv_vwap'
_URL_DIRECT_EXCHANGE_RATE = 'v1/data/{commodity}.{data_version}/spot_direct_exchange_rate/{base_asset}/{quote_asset}'
_URL_EXCHANGE_RATE = 'v1/data/trades.v1/spot_exchange_rate/{base_asset}/{quote_asset}'
# Default settings?
def init_param_dict(keys: list, values: dict = None):
"""
Creates a dictionary filled with `value` and with keys corresponding to `keys`.
:param keys: List of keys for the dictionary.
:param values: Dictionary of values to fill (default is `None`). If the values dictionary contains keys that
did not exist in the list `keys`, then it is added to the return dictionary.
:type values: dict
:return: Dictionary with `keys` as keys and `value` as values.
:rtype: dict
"""
# Initialize with None values
output = dict(zip(keys, [None for i in keys]))
# Overwrite default values
if values is not None:
for k in values.keys():
output[k] = values[k]
return output
class KaikoClient:
"""
Kaiko Client: extracts API key from environment, sets base URL and constructs headers for API requests.
In order to change your API key, you can use the setter method for `api_key_input`. `api_key` contains the key
used by the client and cannot be set. `api_key` and `headers` are automatically updated when changing
`api_key_input`.
Valid `base_url` include 'us', 'eu', and 'rapidapi' (Rapid API no longer supported).
"""
def __init__(self, api_key: str = '', base_url: str = 'us'):
self.base_url = _BASE_URLS[base_url]
self._api_key_input = api_key
self.headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'X-Api-Key': self.api_key,
}
@property
def api_key(self) -> str:
"""
Sets the API key from the environment variable $KAIKO_API_KEY if no key is provided.
:param api_key: (optional) your API key
:return: API key to be used in the requests
"""
env = environ.get('KAIKO_API_KEY')
kaiko_api_key = env or ''
api_key = self.api_key_input or kaiko_api_key
return api_key
@property
def api_key_input(self):
return self._api_key_input
@api_key_input.setter
def api_key_input(self, newval):
self._api_key_input = newval
self.update_headers()
def update_headers(self) -> dict:
self.headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'X-Api-Key': self.api_key,
}
def load_catalogs(self):
"""
Loads
1) List of instruments -> self.all_instruments
2) List of exchanges -> self.all_exchanges
3) List of assets -> self.all_assets
Those are public endpoints which do not require authentication.
"""
print("Downloading Kaiko's catalog (lists of instruments, exchanges, assets)...")
logging.info("Downloading catalogs...")
# List of all instruments
self.all_instruments = ut.request_df(_URL_REFERENCE_DATA_API + 'instruments')
# replace None values by 'ongoing'
self.all_instruments['trade_end_time'] = self.all_instruments['trade_end_time'].apply(lambda x: x or 'ongoing')
# List of exchanges and assets
self.all_exchanges = ut.request_df(_URL_REFERENCE_DATA_API + 'exchanges')
self.all_assets = ut.request_df(_URL_REFERENCE_DATA_API + 'assets')
print("\t...done! - available under client.all_{instruments, exchanges, assets}")
logging.info("... catalogs imported!")
def __repr__(self):
return "Kaiko Client set up with \n\tBase URL: {}\n\tAPI Key : {}[...]".format(self.base_url, self.api_key[:5])
class KaikoData:
"""
General data class
Get query details from the json file as attributes
For the definition of the endpoint, there are mandatory instrument descriptions (can we get it from API?)
Attributes (draft)
- endpoint = base + endpoint
- params
"""
def __repr__(self):
return f"KaikoData setup with\n- URL\n\t {self.url},\n- Required parameters:\n\t{self.req_params}," \
f"\n- Optional parameters:\n\t{self.params}"
def __init__(self, endpoint, req_params: dict, params: dict = {}, client=None, pagination=True, **kwargs):
self.client = client or KaikoClient()
self.endpoint = self.client.base_url + endpoint
self.params = params
self.req_params = req_params
self._form_url()
self.pagination = pagination
# catch parameters given to the class constructor
self._add_to_params(**kwargs)
self._add_to_req_params(**kwargs)
self._form_url()
logging.info(f"\n\nInitiated data object\n{self.__repr__()}\n")
def _form_url(self):
self.url = self.endpoint.format(**self.req_params)
@staticmethod
def _format_param_timestamps(params):
for key in ['start_time', 'end_time']:
if key in params:
params[key] = ut.convert_timestamp_to_apiformat(params[key])
return params
@property
def query(self):
return dict(**self.params, **self.req_params)
@property
def params(self):
return self._format_param_timestamps(self._params)
@params.setter
def params(self, params):
self._params = params
def _add_to_params(self, **kwargs):
for key in kwargs:
if key in self.parameter_space:
self._params[key] = kwargs[key]
def _add_to_req_params(self, **kwargs):
for key in kwargs:
if key in self.req_params.keys():
self.req_params[key] = kwargs[key]
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
return df
def _request_api(self):
self.df, self.query_api = ut.request_df(self.url,
return_query=True,
headers=self.client.headers,
params=self.params,
df_formatter=self.df_formatter,
pagination=self.pagination,
)
def load_catalogs(self):
""" Loads catalogs in the client """
self.client.load_catalogs()
class Trades(KaikoData):
"""
Tick-by-tick trade data
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100000), client=None, **kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='trades',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'start_time,end_time,page_size,continuation_token'.split(',')
endpoint = _URL_HISTORICAL_TRADES
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
return df
class Candles(KaikoData):
"""
Candles (Count OHLCV VWAP)
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100000),
client=None,
**kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='trades',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'interval,start_time,end_time,page_size,continuation_token,sort'.split(',')
endpoint = _URL_CANDLES
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
return df
def add_price_levels(df):
"""
Add order-book price levels corresponding to amounts given by the API:
X_volume_Y where X is in {bid, ask} and Y is the price level relative to the midprice:
0_1 ... 0_9 : 0.1% to 0.9% away from the mid price
1 ... 10 : 1% to 10% away from the mid price
"""
for side in ['bid', 'ask']:
labs = [l for l in df.columns if l.startswith('%s_volume' % side)]
for lab in labs:
# calculate the level
lvl_lab = lab.split('volume')[-1]
lvl = float('.'.join(lvl_lab.split('_'))) / 100
# side of the order book
eps = -1 * (side == 'bid') + 1 * (side == 'ask')
newlab = '%s_price%s' % (side, lvl_lab)
df[newlab] = df["mid_price"] * (1 + eps * lvl)
return df
class OrderBookSnapshots(KaikoData):
"""
Order-book snapshot data
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100),
client=None,
**kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='order_book_snapshots',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'start_time,end_time,page_size,continuation_token,slippage,slippage_ref,orders,limit_orders'.split(',')
endpoint = _URL_ORDER_BOOK_FULL
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
if len(self.df) == 0:
print(f'No data was found for the time range selected. \n{self.query_api}')
print('NB: only one month of historical order book snapshots is available from the API. Please setup a '
'Data Feed delivery if you are trying to access data older than a month.')
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('poll_timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
df = add_price_levels(df)
return df
class OrderBookAggregations(KaikoData):
"""
Order-book data statistics (averages)
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100),
client=None,
**kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='order_book_snapshots',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'start_time,end_time,page_size,continuation_token,slippage,slippage_ref,interval'.split(',')
endpoint = _URL_ORDER_BOOK_AGGREGATIONS_FULL
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
if len(self.df) == 0:
print(f'No data was found for the time range selected. \n{self.query_api}')
print('NB: only one month of historical order book snapshots is available from the API. Please setup a '
'Data Feed delivery if you are trying to access data older than a month.')
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('poll_timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
df = add_price_levels(df)
return df
if __name__ == '__main__':
FORMAT = "%(asctime)-15s %(levelname)-8s | %(lineno)d %(filename)s: %(message)s"
logging.basicConfig(filename='/var/tmp/kaiko.log', level=logging.DEBUG, format=FORMAT, filemode='a')
# test = OrderBookAverages('cbse', 'btc-usd', start_time='2020-08-06', interval='10m')
test = Candles('cbse', 'eth-usd', start_time='2020-08-06', interval='1d')
print(test.df) | kaiko/kaiko.py | import logging
from os import environ
import pandas as pd
import kaiko.utils as ut
try:
from cStringIO import StringIO # Python 2
except ImportError:
from io import StringIO
# Base URLs
_BASE_URL_KAIKO_US = 'https://us.market-api.kaiko.io/'
_BASE_URL_KAIKO_EU = 'https://eu.market-api.kaiko.io/'
_BASE_URL_RAPIDAPI = 'https://kaiko-cryptocurrency-market-data.p.rapidapi.com/' # Not supported yet
_BASE_URLS = dict(us=_BASE_URL_KAIKO_US, eu=_BASE_URL_KAIKO_EU, rapidapi=_BASE_URL_RAPIDAPI)
# API endpoints
_URL_REFERENCE_DATA_API = 'https://reference-data-api.kaiko.io/v1/'
_URL_HISTORICAL_TRADES = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}/{instrument}' \
'/trades'
_URL_ORDER_BOOK_FULL = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}/{instrument}' \
'/snapshots/full'
_URL_ORDER_BOOK_AGGREGATIONS_FULL = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}' \
'/{instrument}/ob_aggregations/full'
_URL_CANDLES = 'v1/data/{commodity}.{data_version}/exchanges/{exchange}/{instrument_class}/{instrument}/aggregations' \
'/count_ohlcv_vwap'
_URL_DIRECT_EXCHANGE_RATE = 'v1/data/{commodity}.{data_version}/spot_direct_exchange_rate/{base_asset}/{quote_asset}'
_URL_EXCHANGE_RATE = 'v1/data/trades.v1/spot_exchange_rate/{base_asset}/{quote_asset}'
# Default settings?
def init_param_dict(keys: list, values: dict = None):
"""
Creates a dictionary filled with `value` and with keys corresponding to `keys`.
:param keys: List of keys for the dictionary.
:param values: Dictionary of values to fill (default is `None`). If the values dictionary contains keys that
did not exist in the list `keys`, then it is added to the return dictionary.
:type values: dict
:return: Dictionary with `keys` as keys and `value` as values.
:rtype: dict
"""
# Initialize with None values
output = dict(zip(keys, [None for i in keys]))
# Overwrite default values
if values is not None:
for k in values.keys():
output[k] = values[k]
return output
class KaikoClient:
"""
Kaiko Client: extracts API key from environment, sets base URL and constructs headers for API requests.
In order to change your API key, you can use the setter method for `api_key_input`. `api_key` contains the key
used by the client and cannot be set. `api_key` and `headers` are automatically updated when changing
`api_key_input`.
Valid `base_url` include 'us', 'eu', and 'rapidapi' (Rapid API no longer supported).
"""
def __init__(self, api_key: str = '', base_url: str = 'us'):
self.base_url = _BASE_URLS[base_url]
self._api_key_input = api_key
self.headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'X-Api-Key': self.api_key,
}
@property
def api_key(self) -> str:
"""
Sets the API key from the environment variable $KAIKO_API_KEY if no key is provided.
:param api_key: (optional) your API key
:return: API key to be used in the requests
"""
env = environ.get('KAIKO_API_KEY')
kaiko_api_key = env or ''
api_key = self.api_key_input or kaiko_api_key
return api_key
@property
def api_key_input(self):
return self._api_key_input
@api_key_input.setter
def api_key_input(self, newval):
self._api_key_input = newval
self.update_headers()
def update_headers(self) -> dict:
self.headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'X-Api-Key': self.api_key,
}
def load_catalogs(self):
"""
Loads
1) List of instruments -> self.all_instruments
2) List of exchanges -> self.all_exchanges
3) List of assets -> self.all_assets
Those are public endpoints which do not require authentication.
"""
print("Downloading Kaiko's catalog (lists of instruments, exchanges, assets)...")
logging.info("Downloading catalogs...")
# List of all instruments
self.all_instruments = ut.request_df(_URL_REFERENCE_DATA_API + 'instruments')
# replace None values by 'ongoing'
self.all_instruments['trade_end_time'] = self.all_instruments['trade_end_time'].apply(lambda x: x or 'ongoing')
# List of exchanges and assets
self.all_exchanges = ut.request_df(_URL_REFERENCE_DATA_API + 'exchanges')
self.all_assets = ut.request_df(_URL_REFERENCE_DATA_API + 'assets')
print("\t...done! - available under client.all_{instruments, exchanges, assets}")
logging.info("... catalogs imported!")
def __repr__(self):
return "Kaiko Client set up with \n\tBase URL: {}\n\tAPI Key : {}[...]".format(self.base_url, self.api_key[:5])
class KaikoData:
"""
General data class
Get query details from the json file as attributes
For the definition of the endpoint, there are mandatory instrument descriptions (can we get it from API?)
Attributes (draft)
- endpoint = base + endpoint
- params
"""
def __repr__(self):
return f"KaikoData setup with\n- URL\n\t {self.url},\n- Required parameters:\n\t{self.req_params}," \
f"\n- Optional parameters:\n\t{self.params}"
def __init__(self, endpoint, req_params: dict, params: dict = {}, client=None, pagination=True, **kwargs):
self.client = client or KaikoClient()
self.endpoint = self.client.base_url + endpoint
self.params = params
self.req_params = req_params
self._form_url()
self.pagination = pagination
# catch parameters given to the class constructor
self._add_to_params(**kwargs)
self._add_to_req_params(**kwargs)
self._form_url()
logging.info(f"\n\nInitiated data object\n{self.__repr__()}\n")
def _form_url(self):
self.url = self.endpoint.format(**self.req_params)
@staticmethod
def _format_param_timestamps(params):
for key in ['start_time', 'end_time']:
if key in params:
params[key] = ut.convert_timestamp_to_apiformat(params[key])
return params
@property
def query(self):
return dict(**self.params, **self.req_params)
@property
def params(self):
return self._format_param_timestamps(self._params)
@params.setter
def params(self, params):
self._params = params
def _add_to_params(self, **kwargs):
for key in kwargs:
if key in self.parameter_space:
self._params[key] = kwargs[key]
def _add_to_req_params(self, **kwargs):
for key in kwargs:
if key in self.req_params.keys():
self.req_params[key] = kwargs[key]
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
return df
def _request_api(self):
self.df, self.query_api = ut.request_df(self.url,
return_query=True,
headers=self.client.headers,
params=self.params,
df_formatter=self.df_formatter,
pagination=self.pagination,
)
def load_catalogs(self):
""" Loads catalogs in the client """
self.client.load_catalogs()
class Trades(KaikoData):
"""
Tick-by-tick trade data
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100000), client=None, **kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='trades',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'start_time,end_time,page_size,continuation_token'.split(',')
endpoint = _URL_HISTORICAL_TRADES
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
return df
class Candles(KaikoData):
"""
Candles (Count OHLCV VWAP)
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100000),
client=None,
**kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='trades',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'interval,start_time,end_time,page_size,continuation_token,sort'.split(',')
endpoint = _URL_CANDLES
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
return df
def add_price_levels(df):
"""
Add order-book price levels corresponding to amounts given by the API:
X_volume_Y where X is in {bid, ask} and Y is the price level relative to the midprice:
0_1 ... 0_9 : 0.1% to 0.9% away from the mid price
1 ... 10 : 1% to 10% away from the mid price
"""
for side in ['bid', 'ask']:
labs = [l for l in df.columns if l.startswith('%s_volume' % side)]
for lab in labs:
# calculate the level
lvl_lab = lab.split('volume')[-1]
lvl = float('.'.join(lvl_lab.split('_'))) / 100
# side of the order book
eps = -1 * (side == 'bid') + 1 * (side == 'ask')
newlab = '%s_price%s' % (side, lvl_lab)
df[newlab] = df["mid_price"] * (1 + eps * lvl)
return df
class OrderBookSnapshots(KaikoData):
"""
Order-book snapshot data
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100),
client=None,
**kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='order_book_snapshots',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'start_time,end_time,page_size,continuation_token,slippage,slippage_ref,orders,limit_orders'.split(',')
endpoint = _URL_ORDER_BOOK_FULL
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
if len(self.df) == 0:
print(f'No data was found for the time range selected. \n{self.query_api}')
print('NB: only one month of historical order book snapshots is available from the API. Please setup a '
'Data Feed delivery if you are trying to access data older than a month.')
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('poll_timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
df = add_price_levels(df)
return df
class OrderBookAggregations(KaikoData):
"""
Order-book data statistics (averages)
"""
def __init__(self, exchange, instrument, instrument_class: str = 'spot', params: dict = dict(page_size=100),
client=None,
**kwargs):
# Initialize endpoint required parameters
self.req_params = dict(commodity='order_book_snapshots',
data_version='latest',
exchange=exchange,
instrument_class=instrument_class,
instrument=instrument,
)
self.parameter_space = 'start_time,end_time,page_size,continuation_token,slippage,slippage_ref,interval'.split(',')
endpoint = _URL_ORDER_BOOK_AGGREGATIONS_FULL
KaikoData.__init__(self, endpoint, self.req_params, params, client, **kwargs)
self._request_api()
if len(self.df) == 0:
print(f'No data was found for the time range selected. \n{self.query_api}')
print('NB: only one month of historical order book snapshots is available from the API. Please setup a '
'Data Feed delivery if you are trying to access data older than a month.')
@staticmethod
def df_formatter(res):
df = pd.DataFrame(res['data'], dtype='float')
df.set_index('poll_timestamp', inplace=True)
df.index = ut.convert_timestamp_unix_to_datetime(df.index)
df = add_price_levels(df)
return df
if __name__ == '__main__':
FORMAT = "%(asctime)-15s %(levelname)-8s | %(lineno)d %(filename)s: %(message)s"
logging.basicConfig(filename='/var/tmp/kaiko.log', level=logging.DEBUG, format=FORMAT, filemode='a')
# test = OrderBookAverages('cbse', 'btc-usd', start_time='2020-08-06', interval='10m')
test = Candles('cbse', 'eth-usd', start_time='2020-08-06', interval='1d')
print(test.df) | 0.716814 | 0.191592 |
from baselayer.app.access import auth_or_token
from ..base import BaseHandler
from ...models import DBSession, Group, Photometry, Spectrum
class SharingHandler(BaseHandler):
@auth_or_token
def post(self):
"""
---
description: Share data with additional groups/users
requestBody:
content:
application/json:
schema:
type: object
properties:
photometryIDs:
type: array
items:
type: integer
description: |
IDs of the photometry data to be shared. If `spectrumIDs` is not
provided, this is required.
spectrumIDs:
type: array
items:
type: integer
description: IDs of the spectra to be shared. If `photometryIDs` is
not provided, this is required.
groupIDs:
type: array
items:
type: integer
description: |
List of IDs of groups data will be shared with. To share data with
a single user, specify their single user group ID here.
required:
- groupIDs
responses:
200:
content:
application/json:
schema: Success
"""
data = self.get_json()
group_ids = data.get("groupIDs", None)
if group_ids is None or group_ids == []:
return self.error("Missing required `groupIDs` field.")
phot_ids = data.get("photometryIDs", [])
spec_ids = data.get("spectrumIDs", [])
if not phot_ids and not spec_ids:
return self.error(
"One of either `photometryIDs` or `spectrumIDs` " "must be provided."
)
groups = Group.query.filter(Group.id.in_(group_ids))
if not all([group in self.current_user.accessible_groups for group in groups]):
return self.error(
"Insufficient permissions: you must have access to each "
"target group you wish to share data with."
)
obj_id = None
if phot_ids:
query = Photometry.query.filter(Photometry.id.in_(phot_ids))
for phot in query:
# Ensure user has access to data being shared
_ = Photometry.get_if_owned_by(phot.id, self.current_user)
for group in groups:
phot.groups.append(group)
# Grab obj_id for use in websocket message below
if obj_id is None:
obj_id = phot.obj_id
if spec_ids:
query = Spectrum.query.filter(Spectrum.id.in_(spec_ids))
for spec in query:
# Ensure user has access to data being shared
_ = Spectrum.get_if_owned_by(spec.id, self.current_user)
for group in groups:
spec.groups.append(group)
# Grab obj_id for use in websocket message below
if obj_id is None:
obj_id = spec.obj_id
DBSession().commit()
if phot_ids:
self.push(
action="skyportal/FETCH_SOURCE_PHOTOMETRY", payload={"obj_id": obj_id}
)
if spec_ids:
self.push(
action="skyportal/FETCH_SOURCE_SPECTRA", payload={"obj_id": obj_id}
)
return self.success() | skyportal/handlers/api/sharing.py | from baselayer.app.access import auth_or_token
from ..base import BaseHandler
from ...models import DBSession, Group, Photometry, Spectrum
class SharingHandler(BaseHandler):
@auth_or_token
def post(self):
"""
---
description: Share data with additional groups/users
requestBody:
content:
application/json:
schema:
type: object
properties:
photometryIDs:
type: array
items:
type: integer
description: |
IDs of the photometry data to be shared. If `spectrumIDs` is not
provided, this is required.
spectrumIDs:
type: array
items:
type: integer
description: IDs of the spectra to be shared. If `photometryIDs` is
not provided, this is required.
groupIDs:
type: array
items:
type: integer
description: |
List of IDs of groups data will be shared with. To share data with
a single user, specify their single user group ID here.
required:
- groupIDs
responses:
200:
content:
application/json:
schema: Success
"""
data = self.get_json()
group_ids = data.get("groupIDs", None)
if group_ids is None or group_ids == []:
return self.error("Missing required `groupIDs` field.")
phot_ids = data.get("photometryIDs", [])
spec_ids = data.get("spectrumIDs", [])
if not phot_ids and not spec_ids:
return self.error(
"One of either `photometryIDs` or `spectrumIDs` " "must be provided."
)
groups = Group.query.filter(Group.id.in_(group_ids))
if not all([group in self.current_user.accessible_groups for group in groups]):
return self.error(
"Insufficient permissions: you must have access to each "
"target group you wish to share data with."
)
obj_id = None
if phot_ids:
query = Photometry.query.filter(Photometry.id.in_(phot_ids))
for phot in query:
# Ensure user has access to data being shared
_ = Photometry.get_if_owned_by(phot.id, self.current_user)
for group in groups:
phot.groups.append(group)
# Grab obj_id for use in websocket message below
if obj_id is None:
obj_id = phot.obj_id
if spec_ids:
query = Spectrum.query.filter(Spectrum.id.in_(spec_ids))
for spec in query:
# Ensure user has access to data being shared
_ = Spectrum.get_if_owned_by(spec.id, self.current_user)
for group in groups:
spec.groups.append(group)
# Grab obj_id for use in websocket message below
if obj_id is None:
obj_id = spec.obj_id
DBSession().commit()
if phot_ids:
self.push(
action="skyportal/FETCH_SOURCE_PHOTOMETRY", payload={"obj_id": obj_id}
)
if spec_ids:
self.push(
action="skyportal/FETCH_SOURCE_SPECTRA", payload={"obj_id": obj_id}
)
return self.success() | 0.762778 | 0.292696 |
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
import services
from venues.venue_service import VenueService
venue_game_schema = GsiGridSchema(label='Venue Game Service')
venue_game_schema.add_field('zone', label='Venue', width=1, unique_field=True)
venue_game_schema.add_field('voting_open', label='Voting Open', width=1)
venue_game_schema.add_field('active', label='Active', width=1)
with venue_game_schema.add_has_many('civic_policies', GsiGridSchema, label='Civic Policies') as sub_schema:
sub_schema.add_field('civic_policy', label='Civic Policy')
sub_schema.add_field('status', label='Status')
sub_schema.add_field('votes', label='Votes')
@GsiHandler('venue_game_service', venue_game_schema)
def generate_venue_game_service_data(*args, zone_id:int=None, **kwargs):
service_info = []
venue_game_service = services.venue_game_service()
venue_service = services.venue_service()
street_service = services.street_service()
zone_manager = services.get_zone_manager()
if venue_game_service is None:
return service_info
active_zone = services.current_zone()
voting_open = street_service.voting_open
for (zone_id, instance) in venue_game_service._zone_provider.items():
zone = zone_manager.get(zone_id, allow_uninstantiated_zones=True)
if zone is None:
continue
lot_name = zone.lot.get_lot_name()
try:
household = zone.lot.get_household()
except:
household = None
household_name = '' if household is None else '(' + household.name + ')'
zone_str = lot_name + household_name + ' ' + str(zone)
civic_policy_entry = []
enacted_policies = instance.get_enacted_policies(tuning=True)
balloted_policies = instance.get_balloted_policies(tuning=True)
up_for_repeal = instance.get_up_for_repeal_policies(tuning=True)
source_venue = None
for policy in instance.get_civic_policies(tuning=True):
status_str = ''
if not enacted_policies:
source_venue = VenueService.get_variable_venue_source_venue(policy.sub_venue)
if source_venue is not None:
if policy.sub_venue is source_venue:
status_str += '[Enacted by default] '
if policy in enacted_policies:
status_str += 'Enacted '
if policy in balloted_policies:
status_str += 'Balloted '
if policy in up_for_repeal:
status_str += 'Up for Repeal'
if status_str == '':
status_str = 'Dormant'
if policy.vote_count_statistic is None:
votes = 'n/a'
else:
votes = instance.get_stat_value(policy.vote_count_statistic)
entry = {'civic_policy': str(policy), 'status': status_str, 'votes': votes}
civic_policy_entry.append(entry)
entry = {'zone': zone_str, 'voting_open': 'Yes' if voting_open else 'No', 'active': str(type(venue_service.active_venue)) if zone is active_zone else '', 'civic_policies': civic_policy_entry}
service_info.append(entry)
service_info = sorted(service_info, key=lambda entry: entry['zone'])
return service_info | S4/S4 Library/simulation/venues/civic_policies/venue_civic_policy_handlers.py | from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
import services
from venues.venue_service import VenueService
venue_game_schema = GsiGridSchema(label='Venue Game Service')
venue_game_schema.add_field('zone', label='Venue', width=1, unique_field=True)
venue_game_schema.add_field('voting_open', label='Voting Open', width=1)
venue_game_schema.add_field('active', label='Active', width=1)
with venue_game_schema.add_has_many('civic_policies', GsiGridSchema, label='Civic Policies') as sub_schema:
sub_schema.add_field('civic_policy', label='Civic Policy')
sub_schema.add_field('status', label='Status')
sub_schema.add_field('votes', label='Votes')
@GsiHandler('venue_game_service', venue_game_schema)
def generate_venue_game_service_data(*args, zone_id:int=None, **kwargs):
service_info = []
venue_game_service = services.venue_game_service()
venue_service = services.venue_service()
street_service = services.street_service()
zone_manager = services.get_zone_manager()
if venue_game_service is None:
return service_info
active_zone = services.current_zone()
voting_open = street_service.voting_open
for (zone_id, instance) in venue_game_service._zone_provider.items():
zone = zone_manager.get(zone_id, allow_uninstantiated_zones=True)
if zone is None:
continue
lot_name = zone.lot.get_lot_name()
try:
household = zone.lot.get_household()
except:
household = None
household_name = '' if household is None else '(' + household.name + ')'
zone_str = lot_name + household_name + ' ' + str(zone)
civic_policy_entry = []
enacted_policies = instance.get_enacted_policies(tuning=True)
balloted_policies = instance.get_balloted_policies(tuning=True)
up_for_repeal = instance.get_up_for_repeal_policies(tuning=True)
source_venue = None
for policy in instance.get_civic_policies(tuning=True):
status_str = ''
if not enacted_policies:
source_venue = VenueService.get_variable_venue_source_venue(policy.sub_venue)
if source_venue is not None:
if policy.sub_venue is source_venue:
status_str += '[Enacted by default] '
if policy in enacted_policies:
status_str += 'Enacted '
if policy in balloted_policies:
status_str += 'Balloted '
if policy in up_for_repeal:
status_str += 'Up for Repeal'
if status_str == '':
status_str = 'Dormant'
if policy.vote_count_statistic is None:
votes = 'n/a'
else:
votes = instance.get_stat_value(policy.vote_count_statistic)
entry = {'civic_policy': str(policy), 'status': status_str, 'votes': votes}
civic_policy_entry.append(entry)
entry = {'zone': zone_str, 'voting_open': 'Yes' if voting_open else 'No', 'active': str(type(venue_service.active_venue)) if zone is active_zone else '', 'civic_policies': civic_policy_entry}
service_info.append(entry)
service_info = sorted(service_info, key=lambda entry: entry['zone'])
return service_info | 0.256646 | 0.118998 |