id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11573400
|
import threading
from dataclasses import dataclass
from traceback import FrameSummary
from typing import List, Optional
import rx
from rx.disposable import Disposable
from rxbp.init.initobserverinfo import init_observer_info
from rxbp.observable import Observable
from rxbp.observablesubjects.observablesubjectbase import ObservableSubjectBase
from rxbp.observerinfo import ObserverInfo
from rxbp.scheduler import Scheduler
from rxbp.schedulers.trampolinescheduler import TrampolineScheduler
from rxbp.utils.tooperatorexception import to_operator_exception
@dataclass
class RefCountObservable(Observable):
source: Observable
subject: ObservableSubjectBase
subscribe_scheduler: TrampolineScheduler
stack: List[FrameSummary]
def __post_init__(self):
self.count = 0
self.volatile_disposables: List[rx.typing.Disposable] = []
self.first_disposable: Optional[rx.typing.Disposable] = None
self.lock = threading.RLock()
# self.scheduled_next = False
def observe(self, observer_info: ObserverInfo):
disposable = self.subject.observe(observer_info)
if observer_info.is_volatile:
self.volatile_disposables.append(disposable)
return disposable
with self.lock:
self.count += 1
current_cound = self.count
if current_cound == 1:
if self.subscribe_scheduler.idle:
raise Exception(to_operator_exception(
message='observe method call should be scheduled on subscribe scheduler',
stack=self.stack,
))
# def action(_, __):
# self.scheduled_next = True
#
# self.subscribe_scheduler.schedule(action)
subject_subscription = init_observer_info(self.subject, is_volatile=observer_info.is_volatile)
self.first_disposable = self.source.observe(subject_subscription)
# else:
# if self.scheduled_next:
# raise Exception(to_operator_exception(
# message='subsequent subscribe call has been delayed, make sure to not delay Flowable subscriptions',
# stack=self.stack,
# ))
def dispose():
disposable.dispose()
with self.lock:
self.count -= 1
if self.count == 0:
dispose_all = True
else:
dispose_all = False
if dispose_all:
self.first_disposable.dispose()
for d in self.volatile_disposables:
d.dispose()
return Disposable(dispose)
|
11573408
|
from cms.models.pluginmodel import CMSPlugin
from django.db import models
class FloatModel(CMSPlugin):
FLOAT_CHOICES = (
('left', 'float left'),
('right', 'float right'),
)
FLOAT_BREAKPOINT_CHOICES = (
('', 'very small'),
('sm-', 'small'),
('md-', 'medium'),
('lg-', 'large'),
('xl-', 'very large'),
)
float_direction = models.CharField(
max_length=256,
choices=FLOAT_CHOICES,
default=FLOAT_CHOICES[0][0],
)
float_breakpoint = models.CharField(
max_length=256,
choices=FLOAT_BREAKPOINT_CHOICES,
default=FLOAT_BREAKPOINT_CHOICES[0][0],
help_text="At which bootstrap4 breakpoint should the float behaviour start, starting from smallest.",
blank=True,
)
margin_top = models.PositiveIntegerField(default=0)
margin_right = models.PositiveIntegerField(default=0)
margin_bottom = models.PositiveIntegerField(default=0)
margin_left = models.PositiveIntegerField(default=0)
def __str__(self):
return "float-{}{}".format(
self.float_breakpoint,
self.float_direction,
)
|
11573442
|
from werkzeug.security import generate_password_hash
from app.models import db, User
def seed_users():
demo = User(username='Demo', email='<EMAIL>',
password='password')
db.session.add(demo)
db.session.commit()
def undo_users():
db.session.execute('TRUNCATE users CASCADE;')
db.session.commit()
|
11573470
|
import os
import datetime
import numpy as np
from keras.utils import to_categorical
from keras.preprocessing.image import Iterator
from .image_io_utils import load_image, save_to_image
from .image_augmentation_utils import image_randomcrop, image_centercrop
from ..vis_utils import plot_image_label
from ...configures import NAME_MAP
class SegDirectoryIterator(Iterator):
def __init__(self,
base_fnames,
data_generator,
image_dir,
image_suffix,
image_color_mode,
label_dir,
label_suffix,
n_class,
feed_onehot_label=True,
cval=255.,
label_cval=0,
crop_mode="random",
target_size=None,
batch_size=1,
shuffle=True,
seed=None,
debug=False,
dataset_name="voc"
):
"""
:param base_fnames: list, basic file names
:param data_generator: ImageDataGenerator instance
:param image_dir: string
:param image_suffix: string, one of ["npy", "jpg", "jpeg", "png", "tif"]
:param image_color_mode: string, one of ["gray", "rgb", "multi"]
:param label_dir: string
:param label_suffix: string, one of ["npy", "jpg", "jpeg", "png", "tif"]
:param n_class: int, number of classes, including background
:param feed_onehot_label: bool, whether to apply one-hot encoding to labels
:param cval: float, filling value for image
:param label_cval: float, filling value for label
:param crop_mode:string, one of ["none", "resize", "random", "center"]
:param target_size: tuple, (height, width)
:param batch_size: int
:param shuffle: bool
:param seed: float
:param save_to_dir: string
:param save_image_path: string
:param save_label_path: string
"""
self.base_fnames = base_fnames
self.nb_sample = len(base_fnames)
self.seg_data_generator = data_generator
self.image_dir = image_dir
self.image_suffix = image_suffix
if self.image_suffix not in [".npy", ".jpg", ".jpeg", ".png", ".tif"]:
raise ValueError(
"Invalid image suffix: {}. Expected '.npy', '.jpg', '.jpeg', '.png' or '.tif'.".format(self.image_suffix))
self.image_color_mode = image_color_mode
if self.image_color_mode not in ["gray", "rgb", "multi"]:
raise ValueError(
"Invalid image color mode: {}. Expected 'gray', 'rgb', 'multi'.".format(self.image_color_mode))
self.label_dir = label_dir
self.label_suffix = label_suffix
if self.label_suffix not in [".npy", ".jpg", ".jpeg", ".png", ".tif"]:
raise ValueError(
"Invalid label suffix: {}. Expected '.npy', '.jpg', '.jpeg', '.png' or '.tif'.".format(self.label_suffix))
self.n_class = n_class
self.feed_onehot_label = feed_onehot_label
self.cval = cval
self.label_cval = label_cval
self.crop_mode = crop_mode
if self.crop_mode not in ["random", "center", "resize", "none"]:
raise ValueError(
"Invalid crop mode: {}. Expected 'random', 'center', 'resize', 'none'.".format(self.crop_mode))
self.target_size = target_size
self.batch_size = batch_size
self.debug = debug
self.shuffle = shuffle
self.seed = seed
self.dataset_name = dataset_name
super(SegDirectoryIterator, self).__init__(
self.nb_sample, batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, indices):
""" get a batch of samples
:param indices: list
list of sample indices
:return: a batch of samples, including images and labels
"""
batch_image = []
batch_label = []
_image_is_gray = self.image_color_mode=="gray"
for ind in indices:
### 1. load image and label
if self.image_suffix == ".npy":
# using Numpy to load *.npy files
# NOTE: RESIZE, GRAY, VALUE_SCALE are not valid here!
_image = np.load(os.path.join(self.image_dir, self.base_fnames[ind] + self.image_suffix))
_label = np.load(os.path.join(self.label_dir, self.base_fnames[ind] + self.label_suffix))
else:
# if the crop_mode is 'resize', resize the input image to target size,
# but will not apply random cropping or center cropping
if self.crop_mode=="resize":
_target_size = (self.target_size[0], self.target_size[1])
else:
# if the _target_size is None, the input image will maintain it's original size
_target_size = None
if self.image_suffix == ".tif" and self.image_color_mode == "multi":
# using GDAL to load multi-spectral images
### NOTE: RESIZE AND GRAY ARE NOT VALID here!!!
_target_size = None
_image = load_image(os.path.join(self.image_dir, self.base_fnames[ind] + self.image_suffix),
value_scale=1, use_gdal=True)
else:
# RGB/Gray using PIL
_image = load_image(os.path.join(self.image_dir, self.base_fnames[ind] + self.image_suffix),
is_gray=_image_is_gray, value_scale=1, target_size=_target_size)
_label = load_image(os.path.join(self.label_dir, self.base_fnames[ind] + self.label_suffix),
is_gray=True, value_scale=1, target_size=_target_size)
### 2. do padding if applying cropping
img_h, img_w, img_c = _image.shape
if self.crop_mode in ["random", "center"]:
pad_h = max(self.target_size[0] - img_h, 0)
pad_w = max(self.target_size[1] - img_w, 0)
_image = np.lib.pad(_image,
((pad_h//2, pad_h - pad_h//2), (pad_w//2, pad_w - pad_w//2), (0, 0)),
'constant', constant_values=self.cval) # change 255 to 0
_label = np.lib.pad(_label, ((pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2), (0, 0)),
'constant', constant_values=self.label_cval)
## 3. do cropping from the padded image/label
if self.crop_mode == 'center':
_image, _label = image_centercrop(_image, _label, self.target_size[0], self.target_size[1])
elif self.crop_mode == 'random':
_image, _label = image_randomcrop(_image, _label, self.target_size[0], self.target_size[1])
### 4. do data augmentation for rgb images
if self.image_color_mode=="rgb":
_image, _label = self.seg_data_generator.random_transform(_image,
_label,
cval=self.cval,
label_cval=self.label_cval,
seed=None)
# we do not apply a normalization here since a BN is firstly adopted in the FCN.
### 5.1. clip the label values to a valid range
_label = np.clip(_label, 0, self.n_class - 1).astype(np.uint8)
### 5.2. save the generated images to local dir
if self.debug:
#time_flag = "_{}".format(datetime.datetime.now().strftime("%y%m%d%H%M%S"))
plot_image_label(_image/255, _label, vmin=0, vmax=self.n_class - 1, names=NAME_MAP[self.dataset_name])
#save_to_image(_image, os.path.join(self.save_image_path, self.base_fnames[ind] + time_flag + self.image_suffix))
#save_to_image(_label, os.path.join(self.save_label_path, self.base_fnames[ind] + time_flag + self.label_suffix))
if self.feed_onehot_label:
_label = to_categorical(_label, self.n_class, dtype="uint8")
assert _label.shape==(self.target_size[0], self.target_size[1], self.n_class)
batch_image.append(_image)
batch_label.append(_label)
batch_image = np.stack(batch_image, axis=0)
batch_label = np.stack(batch_label, axis=0)
return (batch_image, batch_label)
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
|
11573474
|
import setuptools
from setuptools.extension import Extension
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="dbscan",
version="0.0.9",
author="<NAME>",
author_email="<EMAIL>",
description="Theoretically efficient and practical parallel DBSCAN",
long_description=long_description,
long_description_content_type="text/markdown",
keywords='cluster clustering density dbscan',
url="https://github.com/wangyiqiu/dbscan-python",
license='MIT',
packages=[''],#setuptools.find_packages(),
package_dir={'': '.'},
package_data={'': ['dbscan/DBSCAN.cpython-38-x86_64-linux-gnu.so']},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
"License :: OSI Approved :: MIT License",
'Programming Language :: C++',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
"Operating System :: POSIX :: Linux",
],
python_requires='>=3.8',
)
|
11573478
|
import pickle
from functools import partial
from typing import Union, Callable, Optional, List, Dict
from .. import uwsgi
from ..typehints import Strint
from ..utils import decode, decode_deep, listify
__offloaded_functions: Dict[str, Callable] = {}
TypeMuleFarm = Union[Strint, 'Mule', 'Farm']
def _get_farms() -> List[str]:
return decode_deep(listify(uwsgi.opt.get(b'farm', [])))
def _mule_messages_hook(message: bytes):
# Processes mule messages, tries to decode it.
try:
print(Mule.get_current_id())
loaded = pickle.loads(message)
except pickle.UnpicklingError:
return
else:
if not isinstance(loaded, tuple):
return
return __offloaded_functions[loaded[1]](*loaded[2], **loaded[3])
uwsgi.mule_msg_hook = _mule_messages_hook
def __offload(func_name: str, mule_or_farm: TypeMuleFarm, *args, **kwargs) -> bool:
# Sends a message to a mule/farm, instructing it
# to run a function using given arguments,
target = Mule if isinstance(mule_or_farm, int) else Farm
return target(mule_or_farm).send(pickle.dumps(
(
'ucfg_off',
func_name,
args,
kwargs,
)
))
def mule_offload(mule_or_farm: TypeMuleFarm = None) -> Callable:
"""Decorator. Use to offload function execution to a mule or a farm.
:param mule_or_farm: If not set, offloads to a first mule.
"""
if isinstance(mule_or_farm, Mule):
target = mule_or_farm.id
elif isinstance(mule_or_farm, Farm):
target = mule_or_farm.name
else:
target = mule_or_farm
target = target or 1
def mule_offload_(func):
func_name = func.__name__
__offloaded_functions[func_name] = func
return partial(__offload, func_name, target)
return mule_offload_
class Mule:
"""Represents uWSGI Mule.
.. note:: Register mules before using this. E.g.:
``section.workers.set_mules_params(mules=3)``
"""
__slots__ = ['id']
def __init__(self, id: int):
"""
:param id: Mule ID. Enumeration starts with 1.
"""
self.id = id
def __str__(self):
return str(self.id)
def offload(self) -> Callable:
"""Decorator. Allows to offload function execution on this mule.
.. code-block:: python
first_mule = Mule(1)
@first_mule.offload()
def for_mule(*args, **kwargs):
# This function will be offloaded to and handled by mule 1.
...
"""
return mule_offload(self)
@classmethod
def get_current_id(cls) -> int:
"""Returns current mule ID. Returns 0 if not a mule."""
return uwsgi.mule_id()
@classmethod
def get_current(cls) -> Optional['Mule']:
"""Returns current mule object or None if not a mule."""
mule_id = cls.get_current_id()
if not mule_id:
return None
return Mule(mule_id)
@classmethod
def get_message(
cls,
*,
signals: bool = True,
farms: bool = False,
buffer_size: int = 65536,
timeout: int = -1
) -> str:
"""Block until a mule message is received and return it.
This can be called from multiple threads in the same programmed mule.
:param signals: Whether to manage signals.
:param farms: Whether to manage farms.
:param buffer_size:
:param timeout: Seconds.
:raises ValueError: If not in a mule.
"""
return decode(uwsgi.mule_get_msg(signals, farms, buffer_size, timeout))
def send(self, message: Union[str, bytes]) -> bool:
"""Sends a message to a mule(s)/farm.
:param message:
:raises ValueError: If no mules, or mule ID or farm name is not recognized.
"""
return uwsgi.mule_msg(message, self.id)
class Farm:
"""Represents uWSGI Mule Farm.
.. note:: Register farms before using this. E.g.:
``section.workers.set_mules_params(farms=section.workers.mule_farm('myfarm', 2))``
"""
__slots__ = ['name', 'mules']
def __init__(self, name: str, *, mules: List[int] = None):
"""
:param name: Mule farm name.
:param mules: Attached mules.
"""
self.name = name
self.mules = tuple(Mule(mule_id) for mule_id in mules or [])
def __str__(self):
return f"{self.name}: {', '.join(map(str, self.mules))}"
@classmethod
def get_farms(cls) -> List['Farm']:
"""Returns a list of registered farm objects.
.. code-block:: python
farms = Farm.get_farms()
first_farm = farms[0]
first_farm_first_mule = first_farm.mules[0]
"""
return [Farm._from_spec(farm_spec) for farm_spec in _get_farms()]
@classmethod
def _from_spec(cls, spec: str) -> 'Farm':
name, _, mules = spec.partition(':')
return Farm(name=name, mules=[int(mule_id) for mule_id in mules.split(',')])
def offload(self) -> Callable:
"""Decorator. Allows to offload function execution on mules of this farm.
.. code-block:: python
first_mule = Farm('myfarm')
@first_mule.offload()
def for_mule(*args, **kwargs):
# This function will be offloaded to farm `myfarm` and handled by any mule from that farm.
...
"""
return mule_offload(self)
@property
def is_mine(self) -> bool:
"""Returns flag indicating whether the current mule belongs to this farm."""
return uwsgi.in_farm(self.name)
@classmethod
def get_message(cls) -> str:
"""Reads a mule farm message.
* http://uwsgi.readthedocs.io/en/latest/Embed.html
:raises ValueError: If not in a mule
"""
return decode(uwsgi.farm_get_msg())
def send(self, message: Union[str, bytes]):
"""Sends a message to the given farm.
:param message:
"""
return uwsgi.farm_msg(self.name, message)
|
11573494
|
from avalon import api
class FusionSelectContainers(api.InventoryAction):
label = "Select Containers"
icon = "mouse-pointer"
color = "#d8d8d8"
def process(self, containers):
import avalon.fusion
tools = [i["_tool"] for i in containers]
comp = avalon.fusion.get_current_comp()
flow = comp.CurrentFrame.FlowView
with avalon.fusion.comp_lock_and_undo_chunk(comp, self.label):
# Clear selection
flow.Select()
# Select tool
for tool in tools:
flow.Select(tool)
|
11573512
|
from transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel
import torch.nn as nn
class BertForQuestionAnswering(BertPreTrainedModel):
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2) # start/end
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
pooled_output = outputs[1]
# predict start & end position
sequence_output = self.dropout(sequence_output)
qa_logits = self.qa_outputs(sequence_output)
start_logits, end_logits = qa_logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
# classification
pooled_output = self.dropout(pooled_output)
classifier_logits = self.classifier(pooled_output)
if labels is not None:
start_labels, end_labels, class_labels = labels
start_loss = nn.CrossEntropyLoss(ignore_index=-1)(start_logits, start_labels)
end_loss = nn.CrossEntropyLoss(ignore_index=-1)(end_logits, end_labels)
class_loss = nn.CrossEntropyLoss()(classifier_logits, class_labels)
outputs = start_loss + end_loss + 2*class_loss
else:
outputs = (start_logits, end_logits, classifier_logits)
return outputs
|
11573520
|
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
import pandas as pd
import numpy as np
from typing import *
from yo_fluq_ds._fluq._common import *
def default_ax(ax):
if ax is None:
_, ax = plt.subplots(1,1)
return ax
|
11573545
|
import numpy as np
import sys,os
import cv2
import caffe
dataset="voc"
net_file= dataset+'/MobileNetSSD_deploy.prototxt'
caffe_model=dataset+'/MobileNetSSD.caffemodel'
net = caffe.Net(net_file,caffe_model,caffe.TEST)
CLASSES = ('background','aeroplane', 'bicycle', 'bird', 'boat','bottle', 'bus', 'car', 'cat', 'chair','cow', 'diningtable', 'dog', 'horse','motorbike', 'person', 'pottedplant','sheep', 'sofa', 'train', 'tvmonitor')
input_size=(300,300)
def preprocess(img):
img = cv2.resize(img, input_size)
img = img - 127.5
img = img * 0.007843
return img
def postprocess(img, out):
h = img.shape[0]
w = img.shape[1]
box = out['detection_out'][0,0,:,3:7] * np.array([w, h, w, h])
cls = out['detection_out'][0,0,:,1]
conf = out['detection_out'][0,0,:,2]
return (box.astype(np.int32), conf, cls)
def detect(img):
data = preprocess(img)
net.blobs['data'].data[...] = data.transpose(2, 0, 1)
out = net.forward()
box, conf, cls = postprocess(img, out)
for i in range(len(box)):
p1 = (box[i][0], box[i][1])
p2 = (box[i][2], box[i][3])
cv2.rectangle(img, p1, p2, (0,255,0))
p3 = (max(p1[0], 15), max(p1[1], 15))
title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
cv2.putText(img, title, p3, 1, 1, (0, 255, 0), 1)
return img
def test_camera():
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
if not ret:
break
img = detect(img)
cv2.imshow("img", img)
cv2.waitKey(1)
def testdir(dir="images"):
files=os.listdir(dir)
for file in files:
imgfile=dir+"/"+file
img = cv2.imread(imgfile)
img = detect(img)
cv2.imshow("img", img)
cv2.waitKey()
if __name__=="__main__":
testdir()
#test_camera()
|
11573562
|
import pytest
from flex.constants import (
INTEGER,
NUMBER,
STRING,
)
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.loading.definitions.schema import schema_validator
from tests.utils import (
assert_path_not_in_errors,
assert_message_in_errors,
)
def test_min_and_max_length_are_not_required():
"""
Ensure that neither the `minLength` nor the `maxLength` fields of a schema are
required.
"""
try:
schema_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('minLength', errors)
assert_path_not_in_errors('maxLength', errors)
@pytest.mark.parametrize(
'value',
('abc', [1, 2], None, {'a': 1}, True),
)
def test_min_length_for_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({'minLength': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'minLength.type',
)
@pytest.mark.parametrize(
'type_',
(
INTEGER,
(INTEGER, NUMBER),
),
)
def test_type_validation_for_min_length_for_invalid_types(type_):
with pytest.raises(ValidationError) as err:
schema_validator({
'minLength': 5,
'type': type_,
})
assert_message_in_errors(
MESSAGES['type']['invalid_type_for_min_length'],
err.value.detail,
'type',
)
@pytest.mark.parametrize(
'type_',
(
STRING,
(INTEGER, STRING),
),
)
def test_type_validation_for_min_length_for_valid_types(type_):
try:
schema_validator({
'minLength': 5,
'type': type_,
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors(
'type',
errors,
)
@pytest.mark.parametrize(
'value',
('abc', [1, 2], None, {'a': 1}, True),
)
def test_max_length_for_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({'maxLength': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'maxLength.type',
)
@pytest.mark.parametrize(
'type_',
(
INTEGER,
(INTEGER, NUMBER),
),
)
def test_type_validation_for_max_length_for_invalid_types(type_):
with pytest.raises(ValidationError) as err:
schema_validator({
'maxLength': 5,
'type': type_,
})
assert_message_in_errors(
MESSAGES['type']['invalid_type_for_max_length'],
err.value.detail,
'type',
)
@pytest.mark.parametrize(
'type_',
(
STRING,
(INTEGER, STRING),
),
)
def test_type_validation_for_max_length_for_valid_types(type_):
try:
schema_validator({
'maxLength': 5,
'type': type_,
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors(
'type',
errors,
)
def test_max_length_must_be_greater_than_or_equal_to_min_length():
with pytest.raises(ValidationError) as err:
schema_validator({
'maxLength': 8,
'minLength': 9,
})
assert_message_in_errors(
MESSAGES['max_length']['must_be_greater_than_min_length'],
err.value.detail,
'maxLength',
)
def test_min_length_must_be_positive():
with pytest.raises(ValidationError) as err:
schema_validator({
'minLength': -1,
})
assert_message_in_errors(
MESSAGES['minimum']['invalid'],
err.value.detail,
'minLength.minimum',
)
def test_max_length_must_be_greater_than_0():
with pytest.raises(ValidationError) as err:
schema_validator({
'maxLength': 0,
})
assert_message_in_errors(
MESSAGES['minimum']['invalid'],
err.value.detail,
'maxLength.minimum',
)
def test_min_and_max_length_with_valid_values():
try:
schema_validator({
'minLength': 8,
'maxLength': 10,
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('minLength', errors)
assert_path_not_in_errors('maxLength', errors)
|
11573604
|
from abc import ABC, abstractmethod
class SessionFragment(ABC):
def __init__(self, session):
self.session = session
@abstractmethod
def is_busy(self):
"""Whether the fragment is doing work"""
def close(self):
"""Close the fragment state"""
pass
|
11573607
|
import os
import re
from datetime import datetime
from functools import cached_property
from http import HTTPStatus
from flask import current_app, abort
from sqlalchemy.orm import joinedload
from template_support.file_storage import FileStorage
from thumbnail.cache import ThumbnailCache
from ..config import Config
from ..queue import TaskQueue
from ..queue.framework import TaskLogStorage
from ..socket.log_watcher import LogWatcher
def get_config() -> Config:
"""Get current application config."""
return current_app.config.get("CONFIG")
def get_task_queue() -> TaskQueue:
"""Get current TaskQueue instance associated with the current application."""
return current_app.config.get("TASK_QUEUE")
def get_log_storage() -> TaskLogStorage:
"""Get current TaskLogStorage instance associated with the current application."""
return current_app.config.get("LOG_STORAGE")
def get_file_storage() -> FileStorage:
"""Get application file storage."""
return current_app.config.get("APP_FILE_STORAGE")
def get_log_watcher() -> LogWatcher:
"""Get current LogWatcher instance."""
return current_app.config.get("LOG_WATCHER")
def get_thumbnails() -> ThumbnailCache:
"""Get current application thumbnail cache."""
return current_app.config.get("THUMBNAILS")
def resolve_video_file_path(file_path):
"""Get path to the video file."""
config = get_config()
return os.path.join(os.path.abspath(config.video_folder), file_path)
_TRUTHY = {"1", "true", ""}
_FALSY = {"0", "false"}
def parse_boolean(args, name):
"""Parse boolean parameter."""
value = args.get(name)
if value is None:
return value
elif value.lower() in _TRUTHY:
return True
elif value.lower() in _FALSY:
return False
else:
abort(HTTPStatus.BAD_REQUEST.value, f"'{name}' has invalid format (expected {_TRUTHY} or {_FALSY})")
def parse_seq(args, name):
"""Parse sequence of comma-separated values."""
seq = args.get(name, "", type=str)
items = [item.strip() for item in seq.split(",")]
items = [item for item in items if len(item) > 0]
return items
def parse_positive_int(args, name, default=None):
"""Parse positive integer parameter."""
value = args.get(name, default=default, type=int)
if value is not default and value < 0:
abort(HTTPStatus.BAD_REQUEST.value, f"'{name}' cannot be negative")
return value
def parse_int_list(args, name, default=None):
"""Parse integer list."""
value = args.get(name, default=None, type=str)
if value is None:
return default
result = []
for item in map(str.strip, value.split(",")):
try:
result.append(int(item))
except ValueError:
abort(HTTPStatus.BAD_REQUEST.value, f"'{name}' must be a comma-separated list of ints")
return result
def parse_positive_float(args, name, default=None):
"""Parse positive float parameter."""
value = args.get(name, default=default, type=float)
if value is not default and value < 0:
abort(HTTPStatus.BAD_REQUEST.value, f"'{name}' cannot be negative")
return value
DATE_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}$")
def parse_date(args, name, default=None):
"""Parse date parameter."""
value = args.get(name, default=None)
if value is default:
return value
try:
return datetime.strptime(value, "%Y-%m-%d")
except ValueError as error:
abort(HTTPStatus.BAD_REQUEST.value, str(error))
def parse_enum(args, name, enum, default=None):
"""Parse enum parameter."""
values = set(e.value for e in enum)
value = args.get(name, default=default)
if value is default:
return enum(value) if value is not None else value
if value not in values:
abort(HTTPStatus.BAD_REQUEST.value, f"'{name}' must be one of {values}")
return enum(value)
def parse_enum_seq(args, name, values, default=None):
"""Parse sequence of enum values."""
raw_value = args.get(name)
if raw_value is None:
return default
result = set()
for value in raw_value.split(","):
if value not in values:
abort(HTTPStatus.BAD_REQUEST.value, f"'{name}' must be a comma-separated sequence of values from {values}")
result.add(value)
return result
def parse_fields(args, name, fields):
"""Parse requested fields list."""
field_names = parse_enum_seq(args, name, values=fields.names, default=())
return {fields.get(name) for name in field_names}
class Fields:
"""Helper class to fetch entity fields."""
def __init__(self, *fields):
self._fields = tuple(fields)
self._index = {field.key: field for field in fields}
@property
def fields(self):
"""List fields."""
return self._fields
@cached_property
def names(self):
"""Set of field names."""
return {field.key for field in self.fields}
def get(self, name):
"""Get field by name."""
return self._index[name]
@staticmethod
def preload(query, fields, *path):
"""Enable eager loading for enumerated fields."""
for field in fields:
full_path = path + (field,)
query = query.options(joinedload(*full_path))
return query
|
11573612
|
from typing import (
List,
Tuple,
Dict,
)
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
try:
import constants
except:
from .. import constants
from ner import utils
USE_SMART = True
def get_ents(
example: List[str],
seq_label: List[str],
) -> Tuple[List[Tuple[int, int]], List[str]]:
'''
Convert a label to a list of word ranges and entities
entities[i] = str, the entity corresponding to word_range[i]
'''
entities: list = []
range_start : int = None
seq_label = [] if seq_label is None else seq_label
seq_class = None
for i, label in enumerate(seq_label):
if (label == 'O' or i == len(seq_label) - 1) and range_start is not None:
entities.append((seq_class, example[range_start : i]))
range_start = None
elif label.startswith('B'):
if range_start is not None:
entities.append((seq_class, example[range_start : i]))
seq_class = label[2:]
range_start = i
return entities
class DictionaryModel(nn.Module):
'''
This model gets a training example and sets the word in the training example to belong
to the class passed in. In particular given a sentence x, and tagged sentence y all the
words in x increment the counter of y by 1
Inference involves selecting the maximum class from the input
Simplistic model
'''
def __init__(
self,
vocab,
tags,
smart=False
):
super(DictionaryModel, self).__init__()
self.smart = smart
self.vocab = vocab
self.tags = tags
self.classifier = Variable(torch.Tensor(
len(vocab),
len(tags)
),
requires_grad=False,
)
# list of class, ent
self.dictionary: List[Tuple[str, str]] = []
def check_contains(self, ent: str, sent: List[str]) -> Tuple[int, int]:
i = 0
while i < len(sent):
if ent[0] == sent[i] and (i + len(ent)) < len(sent):
# if we can go till the end of the sentence
# check is substring
found = True
for j in range(len(ent)):
if ent[j] != sent[i + j]:
found = False
if found:
return [i, i + len(ent)]
i += 1
return None
def smart_forward_single(self, x: torch.Tensor) -> torch.Tensor:
sent = self.vocab.decode(x.cpu().long())
res = ['O'] * len(sent)
for classifier_class, ent in self.dictionary:
index_tuple = self.check_contains(ent, sent)
if index_tuple is not None:
start, end = index_tuple
res[start] = f'B-{classifier_class}'
for i in range(start + 1, end):
res[i] = f'I-{classifier_class}'
return torch.Tensor([self.tags(token) for token in res]).to(x.device)
def smart_forward(self, x: torch.Tensor, x_chars: torch.Tensor, s_ids: torch.Tensor = None) -> torch.Tensor:
batch = x.shape[0]
res = torch.Tensor(x.shape)
for bi in range(batch):
curr_batch = x[bi]
curr_batch_tags = self.smart_forward_single(curr_batch)
res[bi] = curr_batch_tags
return res
def add_example(self, sentence: torch.Tensor, tags: torch.Tensor) -> None:
if self.smart:
self.smart_add_example(sentence, tags)
else:
self.classifier[sentence, tags] += 1
def smart_add_single(self, sentence: torch.Tensor, tags: torch.Tensor) -> None:
sentence_decode = self.vocab.decode(sentence)
tags_decode = self.tags.decode(tags)
ents = get_ents(sentence_decode, tags_decode)
self.dictionary.extend(ents)
def smart_add_example(self, sentence: torch.Tensor, tags: torch.Tensor) -> None:
batch = sentence.shape[0]
for bi in range(batch):
curr_batch = sentence[bi]
curr_tags = tags[bi]
self.smart_add_single(curr_batch, curr_tags)
def forward(self, x: torch.Tensor, x_chars: torch.Tensor, s_ids: torch.Tensor = None) -> torch.Tensor:
'''
Given a sentence x, returns a softmax distribution
over the space
x has shape (batch, sequence_length)
returns (batch, sequence length, tag_vocab_size) of all the tags
'''
if self.smart:
return self.smart_forward(x, x_chars, s_ids)
# x (batch, s)
# counts (vocab, tag)
# expected (batch, s, tag)
# s is the index in vocab to select
counts = self.classifier[x.long()]
# (batch size, sequence length, tag_vocab)
# return F.softmax(counts, dim=-1)
return torch.argmax(counts, dim=2)
def compute_uncertainty(self, x: torch.Tensor, x_chars: torch.Tensor, s_ids: torch.Tensor = None) -> torch.Tensor:
batch_size = x.shape[0]
return torch.zeros(batch_size)
class PhraseDictionaryModel(DictionaryModel):
def __init__(
self,
vocab,
tags,
):
super(PhraseDictionaryModel, self).__init__(vocab, tags, smart=True)
|
11573637
|
import pytest
from flex.constants import (
STRING,
)
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from tests.utils import (
assert_message_in_errors,
assert_path_not_in_errors,
)
from flex.loading.common.reference import (
reference_object_validator,
)
def test_ref_is_required():
ref_schema = {}
with pytest.raises(ValidationError) as err:
reference_object_validator(ref_schema)
assert_message_in_errors(
MESSAGES['required']['required'],
err.value.detail,
'required.$ref',
)
@pytest.mark.parametrize(
'value',
({'a': 'abc'}, 1, 1.1, True, ['a', 'b'], None),
)
def test_ref_with_invalid_types(value):
schema = {'$ref': value}
with pytest.raises(ValidationError) as err:
reference_object_validator(schema)
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'$ref.type',
)
def test_valid_reference():
schema = {'$ref': '#/definitions/SomeReference'}
context = {
'definitions': {
'SomeReference': {
'type': STRING,
}
}
}
try:
reference_object_validator(schema, context=context)
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors(
'$ref',
errors,
)
|
11573645
|
import torch
import numpy
import abc
from typing import *
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
class _BaseBuffer(object):
nsteps: int
def __init__(self, nsteps: int):
self.nsteps = nsteps
def append(self, transition):
pass
def rollout(self):
pass
def learn(self, data, nminibatches):
pass
def __len__(self):
pass
class LossArgs:
__slots__ = ["new_log_probs", "old_log_probs", "advantages", "new_vals", "old_vals", "returns", "entropies"]
new_log_probs: torch.Tensor
old_log_probs: torch.Tensor
advantages: torch.Tensor
new_vals: torch.Tensor
old_vals: torch.Tensor
returns: torch.Tensor
entropies: torch.Tensor
class _BaseAlgo(abc.ABC):
_nnet: torch.nn.Module
meta = "BASE"
_trainer: bool = True
_device: torch.device = torch.device("cpu")
@abc.abstractmethod
def __init__(self, *args):
pass
def __call__(self, state: numpy.ndarray, done: numpy.ndarray):
with torch.no_grad():
if self._device == torch.device('cpu'):
return self._nnet.get_action(
torch.from_numpy(numpy.asarray(state, dtype=numpy.float32)),
torch.from_numpy(numpy.asarray(done))
)
else:
return self._nnet.get_action(
torch.from_numpy(numpy.asarray(state, dtype=numpy.float32)).to(self._device),
torch.from_numpy(numpy.asarray(done)).to(self._device)
)
def reset(self):
self._nnet.reset()
def update(self, from_agent):
assert not self._trainer
self.load_state_dict(from_agent.get_state_dict())
return True
def is_trainer(self) -> bool:
return self._trainer
def get_state_dict(self) -> Dict[str, dict]:
assert self._trainer
return {
"nnet": self._nnet.state_dict(),
"optimizer": self._optimizer.state_dict()
}
def load_state_dict(self, state_dict: Dict[str, dict] or dict) -> tuple:
if state_dict.get("optimizer") is None:
return self._nnet.load_state_dict(state_dict)
info = []
if state_dict.get("optimizer") and hasattr(self, "_optimizer"):
info.append(self._optimizer.load_state_dict(state_dict["optimizer"]))
return (self._nnet.load_state_dict(state_dict["nnet"]),
*info)
def save(self, filename: str):
torch.save(self.get_state_dict(), filename)
def load(self, filename: str):
self.load_state_dict(torch.load(filename, map_location=self._device))
def get_nn_instance(self):
assert self.is_trainer(), "Is not a trainer."
return self._nnet
def experience(self, transition: dict) -> list or None:
pass
def train(self, data: Dict[str, list] or None) -> list:
pass
def to(self, device):
return self
def device_info(self) -> str:
if self._device.type == 'cuda':
return torch.cuda.get_device_name(device=self._device)
else:
return 'CPU'
|
11573648
|
from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import KRKS
from pyscf.pbc.tdscf import KTDDFT
from pyscf.pbc.tdscf import kproxy_supercell
from pyscf.tdscf.common_slow import eig, format_frozen_k, format_frozen_mol
from test_common import retrieve_m, ov_order, assert_vectors_close
import unittest
from numpy import testing
import numpy
def density_fitting_ks(x):
"""
Constructs density-fitting (Gamma-point) Kohn-Sham objects.
Args:
x (Cell): the supercell;
Returns:
The DF-KS object.
"""
return KRKS(x).density_fit()
class DiamondTestGamma(unittest.TestCase):
"""Compare this (supercell proxy) @Gamma vs reference (pyscf)."""
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
cls.model_krks = model_krks = KRKS(cell)
model_krks.kernel()
cls.td_model_krks = td_model_krks = KTDDFT(model_krks)
td_model_krks.nroots = 5
td_model_krks.kernel()
cls.ref_m_krhf = retrieve_m(td_model_krks)
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krks
del cls.model_krks
del cls.cell
def test_eri(self):
"""Tests all ERI implementations: with and without symmetries."""
e = kproxy_supercell.PhysERI(self.model_krks, "dft", [1, 1, 1], KRKS)
m = e.tdhf_full_form()
testing.assert_allclose(self.ref_m_krhf, m, atol=1e-14)
vals, vecs = eig(m, nroots=self.td_model_krks.nroots)
testing.assert_allclose(vals, self.td_model_krks.e, atol=1e-5)
def test_class(self):
"""Tests container behavior."""
model = kproxy_supercell.TDProxy(self.model_krks, "dft", [1, 1, 1], KRKS)
model.nroots = self.td_model_krks.nroots
assert not model.fast
model.kernel()
testing.assert_allclose(model.e, self.td_model_krks.e, atol=1e-5)
assert_vectors_close(model.xy, numpy.array(self.td_model_krks.xy), atol=1e-12)
class DiamondTestShiftedGamma(unittest.TestCase):
"""Test this (supercell proxy) @non-Gamma: exception."""
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.get_abs_kpts((.1, .2, .3))
cls.model_krks = model_krks = KRKS(cell, kpts=k).density_fit()
model_krks.kernel()
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.model_krks
del cls.cell
def test_class(self):
"""Tests container behavior."""
model = kproxy_supercell.TDProxy(self.model_krks, "dft", [1, 1, 1], density_fitting_ks)
# Shifted k-point grid is not TRS: an exception should be raised
with self.assertRaises(RuntimeError):
model.kernel()
class DiamondTestSupercell2(unittest.TestCase):
"""Compare this (supercell proxy) @2kp vs supercell reference (pyscf)."""
k = 2
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1])
# K-points
cls.model_krks = model_krks = KRKS(cell, k)
model_krks.conv_tol = 1e-14
model_krks.kernel()
# Supercell reference
cls.model_rks = model_rks = kproxy_supercell.k2s(model_krks, [cls.k, 1, 1], KRKS)
# Ensure orbitals are real
testing.assert_allclose(model_rks.mo_coeff[0].imag, 0, atol=1e-8)
cls.ov_order = ov_order(model_krks)
# The Gamma-point TD
cls.td_model_rks = td_model_rks = KTDDFT(model_rks)
td_model_rks.kernel()
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_rks
del cls.model_krks
del cls.model_rks
del cls.cell
def test_class(self):
"""Tests container behavior."""
model = kproxy_supercell.TDProxy(self.model_krks, "dft", [self.k, 1, 1], KRKS)
model.nroots = self.td_model_rks.nroots
assert not model.fast
model.kernel()
testing.assert_allclose(model.e, self.td_model_rks.e, atol=1e-5)
if self.k == 2:
vecs = model.xy.reshape(len(model.xy), -1)[:, self.ov_order]
# A loose tolerance here because of a low plane-wave cutoff
assert_vectors_close(vecs, numpy.array(self.td_model_rks.xy).squeeze(), atol=1e-3)
# Test real
testing.assert_allclose(model.e.imag, 0, atol=1e-8)
def test_raw_response(self):
"""Tests the `supercell_response` and whether it slices output properly."""
eri = kproxy_supercell.PhysERI(self.model_krks, "dft", [self.k, 1, 1], KRKS)
ref_m_full = eri.proxy_response()
# Test single
for frozen in (1, [0, -1]):
space = format_frozen_k(frozen, eri.nmo_full[0], len(eri.nmo_full))
space_o = numpy.concatenate(tuple(i[:j] for i, j in zip(space, eri.nocc_full)))
space_v = numpy.concatenate(tuple(i[j:] for i, j in zip(space, eri.nocc_full)))
space_ov = numpy.logical_and(space_o[:, numpy.newaxis], space_v[numpy.newaxis, :]).reshape(-1)
m = kproxy_supercell.supercell_response(
eri.proxy_vind,
numpy.concatenate(space),
eri.nocc_full,
eri.nmo_full,
True,
eri.model_super.supercell_inv_rotation,
eri.proxy_model,
)
ref_m = tuple(i[space_ov][:, space_ov] for i in ref_m_full)
testing.assert_allclose(ref_m, m, atol=1e-12)
# Test pair
for frozen in ((1, 3), ([1, -2], [0, -1])):
space = tuple(format_frozen_k(i, eri.nmo_full[0], len(eri.nmo_full)) for i in frozen)
space_o = tuple(numpy.concatenate(tuple(i[:j] for i, j in zip(s, eri.nocc_full))) for s in space)
space_v = tuple(numpy.concatenate(tuple(i[j:] for i, j in zip(s, eri.nocc_full))) for s in space)
space_ov = tuple(
numpy.logical_and(i[:, numpy.newaxis], j[numpy.newaxis, :]).reshape(-1)
for i, j in zip(space_o, space_v)
)
m = kproxy_supercell.supercell_response(
eri.proxy_vind,
(numpy.concatenate(space[0]), numpy.concatenate(space[1])),
eri.nocc_full,
eri.nmo_full,
True,
eri.model_super.supercell_inv_rotation,
eri.proxy_model,
)
ref_m = tuple(i[space_ov[0]][:, space_ov[1]] for i in ref_m_full)
testing.assert_allclose(ref_m, m, atol=1e-12)
def test_raw_response_ov(self):
"""Tests the `molecular_response` and whether it slices output properly."""
eri = kproxy_supercell.PhysERI(self.model_krks, "dft", [self.k, 1, 1], KRKS)
ref_m_full = eri.proxy_response()
s = sum(eri.nocc_full) * (sum(eri.nmo_full) - sum(eri.nocc_full))
ref_m_full = tuple(i.reshape((s, s)) for i in ref_m_full)
# Test single
for frozen in (1, [0, -1]):
space_ov = format_frozen_mol(frozen, s)
m = kproxy_supercell.supercell_response_ov(
eri.proxy_vind,
space_ov,
eri.nocc_full,
eri.nmo_full,
True,
eri.model_super.supercell_inv_rotation,
eri.proxy_model,
)
ref_m = tuple(i[space_ov, :][:, space_ov] for i in ref_m_full)
testing.assert_allclose(ref_m, m, atol=1e-12)
# Test pair
for frozen in ((1, 3), ([1, -2], [0, -1])):
space_ov = tuple(format_frozen_mol(i, s) for i in frozen)
m = kproxy_supercell.supercell_response_ov(
eri.proxy_vind,
space_ov,
eri.nocc_full,
eri.nmo_full,
True,
eri.model_super.supercell_inv_rotation,
eri.proxy_model,
)
ref_m = tuple(i[space_ov[0], :][:, space_ov[1]] for i in ref_m_full)
testing.assert_allclose(ref_m, m, atol=1e-12)
class DiamondTestSupercell3(DiamondTestSupercell2):
"""Compare this (supercell proxy) @3kp vs supercell reference (pyscf)."""
k = 3
|
11573651
|
from ignite.contrib.handlers.param_scheduler import ParamScheduler
class ManualParamScheduler(ParamScheduler):
"""A class for updating an optimizer's parameter value manually.
Args:
optimizer (`torch.optim.Optimizer`): the optimizer to use
param_name (str): name of optimizer's parameter to update
param_callback (callable): A callback that should return the value.
save_history (bool, optional): whether to log the parameter values
(default=False)
"""
def __init__(self, optimizer, param_name, param_callback, save_history=False):
super(ManualParamScheduler, self).__init__(optimizer, param_name, save_history=save_history)
self.param_callback = param_callback
def get_param(self):
"""Method to get current optimizer's parameter value
"""
return self.param_callback()
|
11573683
|
from keras.layers import Conv1D,Multiply
class GatedConv1D():
'''门控线性单元 https://arxiv.org/abs/1612.08083'''
def __init__(self, filters, kernel_size, strides=1, padding='same',kernel_initializer = "he_normal"):
self.filters = filters
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.kernel_initializer = kernel_initializer
def call(self,x):
A = Conv1D(self.filters,
kernel_size=self.kernel_size,
padding=self.padding,
strides=self.strides,
kernel_initializer=self.kernel_initializer)(x)
B = Conv1D(self.filters,
kernel_size=self.kernel_size,
padding=self.padding,
strides=self.strides,
kernel_initializer=self.kernel_initializer,
activation="sigmoid",)(x)
H = Multiply()([A,B])
return H
def __call__(self, x,*args, **kwargs):
return self.call(x)
|
11573703
|
import math
import torch
def bitreversal_permutation(n, device=None, dtype=None):
"""Return the bit reversal permutation used in FFT.
By default, the permutation is stored in numpy array.
Parameter:
n: integer, must be a power of 2.
Return:
perm: bit reversal permutation, pytorch tensor of size n
"""
log_n = int(math.log2(n))
assert n == 1 << log_n, 'n must be a power of 2'
perm = torch.arange(n, device=device, dtype=dtype).reshape(1, n)
for i in range(log_n):
perm = torch.vstack(perm.chunk(2, dim=-1))
perm = perm.squeeze(-1)
return perm
def invert_permutation(perm: torch.Tensor) -> torch.Tensor:
"""
Params:
perm: (..., n)
Return:
inverse_perm: (..., n)
"""
# This is simpler but has complexity O(n log n)
# return torch.argsort(perm, dim=-1)
# This is more complicated but has complexity O(n)
arange = torch.arange(perm.shape[-1], device=perm.device).expand_as(perm)
return torch.empty_like(perm).scatter_(-1, perm, arange)
|
11573726
|
import numpy as np
import paddle
# 缓存容器:内容为{obs, act, obs_, reward, done}五元组
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e4)):
self.max_size = max_size
self.cur = 0
self.size = 0
self.states = np.zeros((max_size, state_dim))
self.actions = np.zeros((max_size, action_dim))
self.next_states = np.zeros((max_size, state_dim))
self.rewards = np.zeros((max_size, 1))
self.dones = np.zeros((max_size, 1))
self.device = paddle.get_device()
# 存入数据
def add(self, state, action, next_state, reward, done):
self.states[self.cur] = state
self.actions[self.cur] = action
self.next_states[self.cur] = next_state
self.rewards[self.cur] = reward
self.dones[self.cur] = done
# 指针移动
self.cur = (self.cur + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
# 采样
def sample(self, batch):
ids = np.random.randint(0, self.size, size=batch)
# 返回paddle张量
return (
paddle.to_tensor(self.states[ids], dtype='float32', place=self.device),
paddle.to_tensor(self.actions[ids], dtype='float32', place=self.device),
paddle.to_tensor(self.next_states[ids], dtype='float32', place=self.device),
paddle.to_tensor(self.rewards[ids], dtype='float32', place=self.device),
paddle.to_tensor(self.dones[ids], dtype='float32', place=self.device)
)
|
11573746
|
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from PIL import ImageFilter
import random
import io
import base64
def getRandomColor():
c1 = random.randint(0,255)
c2 = random.randint(0,255)
c3 = random.randint(0,255)
return (c1,c2,c3)
def getImageBytes(num):
image = Image.new('RGB',(90,30),getRandomColor())
draw = ImageDraw.Draw(image)
font=ImageFont.truetype("static/NotoSansHans-Regular.ttf",size=26)
draw.text((20,0),str(num),getRandomColor(),font=font)
imgByteArr = io.BytesIO()
image.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
return imgByteArr
def RotateImage(angle):
img = Image.open('static/runway.png')
size_s=img.size
img=img.rotate(360-angle)
img=img.resize((32,32))
bIO=io.BytesIO()
img.save(bIO,format='PNG')
img_bytes=bIO.getvalue()
b64_data=base64.b64encode(img_bytes).decode(encoding='utf-8')
return 'data:image/png;base64,'+b64_data
|
11573799
|
import warnings
from typing import Collection, Optional, Tuple
from typing import Union
import cftime
import pandas as pd
import xarray as xr
from xcube.core.gridmapping import GridMapping
from xcube.util.assertions import assert_given
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
Bbox = Tuple[float, float, float, float]
TimeRange = Union[Tuple[Optional[str], Optional[str]],
Tuple[Optional[pd.Timestamp], Optional[pd.Timestamp]]]
def select_subset(dataset: xr.Dataset,
*,
var_names: Collection[str] = None,
bbox: Bbox = None,
time_range: TimeRange = None):
"""
Create a subset from *dataset* given *var_names*,
*bbox*, *time_range*.
This is a high-level convenience function that may invoke
* :func:select_variables_subset
* :func:select_spatial_subset
* :func:select_temporal_subset
:param dataset: The dataset.
:param var_names: Optional variable names.
:param bbox: Optional bounding box in the dataset's
CRS coordinate units.
:param time_range: Optional time range
:return: a subset of *dataset*, or unchanged *dataset*
if no keyword-arguments are used.
"""
if var_names is not None:
dataset = select_variables_subset(dataset, var_names=var_names)
if bbox is not None:
dataset = select_spatial_subset(dataset, xy_bbox=bbox)
if time_range is not None:
dataset = select_temporal_subset(dataset, time_range=time_range)
return dataset
def select_variables_subset(dataset: xr.Dataset,
var_names: Collection[str] = None) -> xr.Dataset:
"""
Select data variable from given *dataset* and create new dataset.
:param dataset: The dataset from which to select variables.
:param var_names: The names of data variables to select.
:return: A new dataset. It is empty, if *var_names* is empty.
It is *dataset*, if *var_names* is None.
"""
if var_names is None:
return dataset
dropped_variables = set(dataset.data_vars.keys()).difference(var_names)
if not dropped_variables:
return dataset
return dataset.drop_vars(dropped_variables)
def select_spatial_subset(dataset: xr.Dataset,
ij_bbox: Tuple[int, int, int, int] = None,
ij_border: int = 0,
xy_bbox: Tuple[float, float, float, float] = None,
xy_border: float = 0.,
grid_mapping: GridMapping = None,
geo_coding: GridMapping = None,
xy_names: Tuple[str, str] = None) \
-> Optional[xr.Dataset]:
"""
Select a spatial subset of *dataset* for the
bounding box *ij_bbox* or *xy_bbox*.
*ij_bbox* or *xy_bbox* must not be given both.
:param xy_bbox: Bounding box in coordinates of the dataset's CRS.
:param xy_border: Extra border added to *xy_bbox*.
:param dataset: Source dataset.
:param ij_bbox: Bounding box (i_min, i_min, j_max, j_max)
in pixel coordinates.
:param ij_border: Extra border added to *ij_bbox*
in number of pixels
:param xy_bbox: The bounding box in x,y coordinates.
:param xy_border: Border in units of the x,y coordinates.
:param grid_mapping: Optional dataset grid mapping.
:param geo_coding: Deprecated. Use *grid_mapping* instead.
:param xy_names: Optional tuple of the x- and y-coordinate
variables in *dataset*. Ignored if *geo_coding* is given.
:return: Spatial dataset subset
"""
if ij_bbox is None and xy_bbox is None:
raise ValueError('One of ij_bbox and xy_bbox must be given')
if ij_bbox and xy_bbox:
raise ValueError('Only one of ij_bbox and xy_bbox can be given')
if geo_coding:
warnings.warn('keyword "geo_coding" has been deprecated,'
' use "grid_mapping" instead',
DeprecationWarning)
grid_mapping = grid_mapping or geo_coding
if grid_mapping is None:
grid_mapping = GridMapping.from_dataset(dataset,
xy_var_names=xy_names)
x_name, y_name = grid_mapping.xy_var_names
x = dataset[x_name]
y = dataset[y_name]
if x.ndim == 1 and y.ndim == 1:
# Hotfix für #981 and #985
if xy_bbox:
if y.values[0] < y.values[-1]:
ds = dataset.sel(**{
x_name: slice(xy_bbox[0] - xy_border,
xy_bbox[2] + xy_border),
y_name: slice(xy_bbox[1] - xy_border,
xy_bbox[3] + xy_border)
})
else:
ds = dataset.sel(**{
x_name: slice(xy_bbox[0] - xy_border,
xy_bbox[2] + xy_border),
y_name: slice(xy_bbox[3] + xy_border,
xy_bbox[1] - xy_border)
})
return ds
else:
return dataset.isel(**{
x_name: slice(ij_bbox[0] - ij_border,
ij_bbox[2] + ij_border),
y_name: slice(ij_bbox[1] - ij_border,
ij_bbox[3] + ij_border)
})
else:
if xy_bbox:
ij_bbox = grid_mapping.ij_bbox_from_xy_bbox(xy_bbox,
ij_border=ij_border,
xy_border=xy_border)
if ij_bbox[0] == -1:
return None
width, height = grid_mapping.size
i_min, j_min, i_max, j_max = ij_bbox
if i_min > 0 or j_min > 0 or i_max < width - 1 or j_max < height - 1:
x_dim, y_dim = grid_mapping.xy_dim_names
i_slice = slice(i_min, i_max + 1)
j_slice = slice(j_min, j_max + 1)
return dataset.isel({x_dim: i_slice, y_dim: j_slice})
return dataset
def select_temporal_subset(dataset: xr.Dataset,
time_range: TimeRange,
time_name: str = 'time') -> xr.Dataset:
"""
Select a temporal subset from *dataset* given *time_range*.
:param dataset: The dataset. Must include time
:param time_range: Time range given as two time stamps
(start, end) that may be (ISO) strings or datetime objects.
:param time_name: optional name of the time coordinate variable.
Defaults to "time".
:return:
"""
assert_given(time_range, 'time_range')
time_name = time_name or 'time'
if time_name not in dataset:
raise ValueError(f'cannot compute temporal subset: variable'
f' "{time_name}" not found in dataset')
time_1, time_2 = time_range
time_1 = pd.to_datetime(time_1) if time_1 is not None else None
time_2 = pd.to_datetime(time_2) if time_2 is not None else None
if time_1 is None and time_2 is None:
return dataset
if time_2 is not None:
delta = time_2 - time_2.floor('1D')
if delta == pd.Timedelta('0 days 00:00:00'):
time_2 += pd.Timedelta('1D')
try:
return dataset.sel({time_name or 'time': slice(time_1, time_2)})
except TypeError:
calendar = dataset.time.encoding.get('calendar')
time_1 = cftime.datetime(time_1.year, time_1.month, time_1.day,
calendar=calendar)
time_2 = cftime.datetime(time_2.year, time_2.month, time_2.day,
calendar=calendar)
return dataset.sel({time_name or 'time': slice(time_1, time_2)})
|
11573830
|
from loguru import logger
from chronos.metadata import Setting, Session
def get_setting(key):
"""Get setting value from database. Return None or value."""
session = Session()
value = session.query(Setting).get(key)
session.close()
return value
def set_setting(key, value):
"""Update setting or create new."""
session = Session()
if get_setting(key) is None:
session.add(Setting(key=key, value=value))
logger.debug("Created new 'setting': {} with value: '{}'", key, value)
else:
session.query(Setting).get(key).value = value
logger.debug("Updated 'setting': {} with value: '{}'", key, value)
session.commit()
session.close()
def get_all_settings():
"""Get all settings from database."""
session = Session()
all_settings = session.query(Setting).all()
session.close()
return session
|
11573831
|
from __future__ import unicode_literals
from moya import expose
@expose.macro("macro.expose.double")
def double(n):
return n * 2
@expose.macro("macro.expose.tripple")
def tripple(n):
return n * 3
@expose.filter("cube")
def cube(n):
return n ** 3
|
11573841
|
NLP_CONFIG = {'system_entity_recognizer': {}}
ENTITY_RESOLVER_CONFIG = {'model_type': 'exact_match'}
|
11573845
|
import pytest
from computation.automata.pda import (
DPDA,
NPDA,
DPDADesign,
DPDARulebook,
NPDADesign,
NPDARulebook,
PDAConfiguration,
PDARule,
Stack,
)
# check parentheses
rulebook = DPDARulebook(
[
PDARule(1, "(", 2, "$", ["b", "$"]),
PDARule(2, "(", 2, "b", ["b", "b"]),
PDARule(2, ")", 2, "b", []),
PDARule(2, None, 1, "$", ["$"]),
]
)
def test_pda_rule():
rule = PDARule(1, "(", 2, "$", ["b", "$"])
configuration = PDAConfiguration(1, Stack(["$"]))
assert rule.applies_to(configuration, "(")
def test_pda_config():
config1 = PDAConfiguration(3, Stack(["$"]))
config2 = PDAConfiguration(3, Stack(["$"]))
assert config1 == config2
assert set([config1, config2]) == set([config1])
def test_pda_rulebook():
configuration = PDAConfiguration(1, Stack(["$"]))
configuration = rulebook.next_configuration(configuration, "(")
assert configuration.stack == Stack(["$", "b"])
def test_dpda():
dpda = DPDA(PDAConfiguration(1, Stack(["$"])), [1], rulebook)
assert dpda.accepting
assert not (dpda.read_string("(()").accepting)
assert dpda.current_configuration.state == 2
with pytest.raises(RuntimeError):
DPDARulebook([PDARule(1, None, 1, "$", ["$"])]).follow_free_moves(
PDAConfiguration(1, Stack(["$"]))
)
dpda = DPDA(PDAConfiguration(1, Stack(["$"])), [1], rulebook)
assert not (dpda.read_string("(()(").accepting)
assert dpda.read_string("))()").accepting
dpda = DPDA(PDAConfiguration(1, Stack(["$"])), [1], rulebook)
dpda.read_string("())")
assert dpda.current_configuration.state == PDAConfiguration.STUCK_STATE
assert not dpda.accepting
assert dpda.is_stuck
def test_dpda_design():
dpda_design = DPDADesign(1, "$", [1], rulebook)
assert dpda_design.accepts("(((((((((())))))))))")
assert dpda_design.accepts("()(())((()))(()(()))")
assert not (dpda_design.accepts("(()(()(()()(()()))()"))
assert not (dpda_design.accepts("())"))
def test_npda_design():
rulebook = NPDARulebook(
[
PDARule(1, "a", 1, "$", ["a", "$"]),
PDARule(1, "a", 1, "a", ["a", "a"]),
PDARule(1, "a", 1, "b", ["a", "b"]),
PDARule(1, "b", 1, "$", ["b", "$"]),
PDARule(1, "b", 1, "a", ["b", "a"]),
PDARule(1, "b", 1, "b", ["b", "b"]),
PDARule(1, None, 2, "$", ["$"]),
PDARule(1, None, 2, "a", ["a"]),
PDARule(1, None, 2, "b", ["b"]),
PDARule(2, "a", 2, "a", []),
PDARule(2, "b", 2, "b", []),
PDARule(2, None, 3, "$", ["$"]),
]
)
configuration = PDAConfiguration(1, Stack(["$"]))
npda = NPDA([configuration], [3], rulebook)
assert npda.accepting
assert not (npda.read_string("abb").accepting)
assert PDAConfiguration(
1, Stack(["$", "a", "b", "b"]) in npda.current_configurations
)
assert npda.read_character("a").accepting
assert PDAConfiguration(
1, Stack(["$", "a", "b", "b", "a"]) in npda.current_configurations
)
npda_design = NPDADesign(1, "$", [3], rulebook)
assert npda_design.accepts("abba")
assert npda_design.accepts("babbaabbab")
assert not (npda_design.accepts("abb"))
|
11573876
|
import os
import shutil
import tempfile
import numpy as np
import numpy.random as npr
import torch
from torch.utils.data.dataset import Dataset
class TTensorDictDataset(Dataset):
def __init__(self, tensors, in_place_shuffle = True):
super(TTensorDictDataset, self).__init__()
self.in_place_shuffle = in_place_shuffle
self._tensors = tensors
self._size = next(iter(tensors.values())).shape[0]
def __getitem__(self, index):
return {k: self._tensors[k][index] for k in self._tensors}
def __len__(self):
return self._size
def shuffle_(self):
perm = torch.randperm(len(self))
for k in self._tensors:
self._tensors[k] = self._tensors[k][perm]
class NDArrayDictDataset(TTensorDictDataset):
def __init__(self, ndarrays, in_place_shuffle = True):
super(NDArrayDictDataset, self).__init__(
{k: torch.from_numpy(v) for k, v in ndarrays.items()}, in_place_shuffle)
self._ndarrays = ndarrays
def shuffle_(self):
perm = npr.permutation(len(self))
for v in self._ndarrays.values():
np.take(v, perm, axis = 0, out = v)
class MemmapDictDataset(Dataset):
def __init__(self, npzfile):
self.dirname = tempfile.mkdtemp()
self._name2memmap = {
k: self._load_mmap(k, v)
for k, v in npzfile.items()
}
self._size = next(iter(self._name2memmap.values())).shape[0]
def __getitem__(self, index):
rv = {}
for k, v in self._name2memmap.items():
rv[k] = v[index]
return rv
def _load_mmap(self, name, np_array):
fname = os.path.join(self.dirname, '%s.npy' % name)
mmap_ndarray = np.memmap(fname, dtype = np_array.dtype, shape = np_array.shape, mode = 'w+')
mmap_ndarray = np_array
return torch.from_numpy(mmap_ndarray)
def __len__(self):
return self._size
def __del__(self):
del self._name2memmap
shutil.rmtree(self.dirname)
|
11573938
|
import os
from setuptools import Extension
from setuptools import setup
setup(
name="greenlet",
version='0.3.1',
description='Lightweight in-process concurrent programming',
long_description=open(
os.path.join(os.path.dirname(__file__), 'README'), 'r').read(),
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="http://bitbucket.org/ambroff/greenlet",
repository='http://bitbucket.org/ambroff/greenlet/',
license="MIT License",
platforms=['any'],
test_suite='tests.test_collector',
headers=['greenlet.h'],
ext_modules=[Extension(name='greenlet', sources=['greenlet.c'])],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules'])
|
11573966
|
import ee, getpass, time, math, sys
from flask import Flask, render_template, request
from eeMad import imad
from eeWishart import omnibus
ee.Initialize()
app = Flask(__name__, static_url_path='/static')
def simon(path):
images = ee.List(
[ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160305T171543_20160305T171608_010237_00F1FA_49DC')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160329T171543_20160329T171608_010587_00FBF9_B4DE')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160410T171538_20160410T171603_010762_010122_CEF6')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160422T171539_20160422T171604_010937_010677_03F6')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160504T171539_20160504T171604_011112_010BED_80AF')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160516T171540_20160516T171605_011287_011198_FC21')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160528T171603_20160528T171628_011462_011752_F570')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160609T171604_20160609T171629_011637_011CD1_C2F5')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160715T171605_20160715T171630_012162_012DA2_95A1')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160727T171606_20160727T171631_012337_013359_29A6')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160808T171607_20160808T171632_012512_01392E_44C4')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160901T171608_20160901T171633_012862_0144E3_30E5')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160925T171609_20160925T171634_013212_015050_8FDB')),
ee.call("S1.dB",ee.Image(path+'S1B_IW_GRDH_1SDV_20161001T171508_20161001T171533_002316_003E9D_D195')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20161007T171609_20161007T171634_013387_0155CD_F513')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20161019T171609_20161019T171634_013562_015B60_27FF')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20161031T171609_20161031T171634_013737_0160BD_4FAE')) ] )
return ee.ImageCollection(images)
def simonf(path):
def sel(image):
return ee.Image(image).select(['VV','VH'])
images = ee.List(
[ee.Image(path+'S1A_IW_GRDH_1SDV_20160305T171543_20160305T171608_010237_00F1FA_49DC'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160329T171543_20160329T171608_010587_00FBF9_B4DE'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160410T171538_20160410T171603_010762_010122_CEF6'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160422T171539_20160422T171604_010937_010677_03F6'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160504T171539_20160504T171604_011112_010BED_80AF'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160516T171540_20160516T171605_011287_011198_FC21'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160528T171603_20160528T171628_011462_011752_F570'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160609T171604_20160609T171629_011637_011CD1_C2F5'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160715T171605_20160715T171630_012162_012DA2_95A1'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160727T171606_20160727T171631_012337_013359_29A6'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160808T171607_20160808T171632_012512_01392E_44C4'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160901T171608_20160901T171633_012862_0144E3_30E5'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160925T171609_20160925T171634_013212_015050_8FDB'),
ee.Image(path+'S1B_IW_GRDH_1SDV_20161001T171508_20161001T171533_002316_003E9D_D195'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20161007T171609_20161007T171634_013387_0155CD_F513'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20161019T171609_20161019T171634_013562_015B60_27FF'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20161031T171609_20161031T171634_013737_0160BD_4FAE') ] )
return ee.ImageCollection(images.map(sel))
@app.route('/')
def index():
return app.send_static_file('index.html')
def get_vv(image):
''' get 'VV' band from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VV').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def get_vh(image):
''' get 'VH' band from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VH').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def get_vvvh(image):
''' get 'VV' and 'VH' bands from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VV','VH').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def get_vvvh_raw(image):
return image.select('VV','VH')
def get_image(current,image):
''' accumulate a single image from a collection of images '''
return ee.Image.cat(ee.Image(image),current)
def clipList(current,prev):
imlist = ee.List(ee.Dictionary(prev).get('imlist'))
rect = ee.Dictionary(prev).get('rect')
imlist = imlist.add(ee.Image(current).clip(rect))
return ee.Dictionary({'imlist':imlist,'rect':rect})
@app.route('/sentinel1.html', methods = ['GET', 'POST'])
def Sentinel1():
if request.method == 'GET':
username = getpass.getuser()
return render_template('sentinel1.html', navbar = 'Hi there %s!'%username,
centerlon = 8.5,
centerlat = 50.05)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
latitude = float(request.form['latitude'])
longitude = float(request.form['longitude'])
orbit = request.form['orbit']
polarization1 = request.form['polarization']
relativeorbitnumber = request.form['relativeorbitnumber']
if polarization1 == 'VV,VH':
polarization = ['VV','VH']
else:
polarization = polarization1
mode = request.form['mode']
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
how = request.form['how']
if request.form.has_key('export'):
export = request.form['export']
else:
export = 'none'
exportname = request.form['exportname']
start = ee.Date(startdate)
finish = ee.Date(enddate)
if how == 'longlat':
point = ee.Geometry.Point([longitude,latitude])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(point) \
.filterDate(start, finish) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('instrumentMode', mode)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
projection = image.select('VV').projection().getInfo()['crs']
else:
projection = image.select('VH').projection().getInfo()['crs']
downloadpath = image.getDownloadUrl({'scale':1000})
im = get_vv(image)
mapid = im.getMapId({'min':0, 'max':1, 'opacity': 0.5})
return render_template('sentinel1out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
polarization = polarization1,
projection = projection,
gdexportid = gdexportid,
systemid = systemid,
count = count,
timestamp = timestamp)
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', mode)) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
# test_collection = simonf('TEST/simonf/S1/raw/')
# collection = test_collection \
# .filterBounds(ulPoint) \
# .filterBounds(lrPoint) \
# .filterDate(start, finish) \
# .filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
# .filter(ee.Filter.eq('resolution_meters', 10)) \
# .filter(ee.Filter.eq('instrumentMode', mode)) \
# .filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
collection = collection.sort('system:time_start')
system_ids = ee.List(collection.aggregate_array('system:id'))
systemidlist = []
for systemid in system_ids.getInfo():
systemidlist.append(systemid)
systemids = str(systemidlist)
acquisition_times = ee.List(collection.aggregate_array('system:time_start'))
count = acquisition_times.length().getInfo()
if count==0:
raise ValueError('No images found')
timestamplist = []
for timestamp in acquisition_times.getInfo():
tmp = time.gmtime(int(timestamp)/1000)
timestamplist.append(time.strftime('%c', tmp))
timestamp = timestamplist[0]
timestamps = str(timestamplist)
relative_orbit_numbers = ee.List(collection.aggregate_array('relativeOrbitNumber_start'))
relativeorbitnumberlist = []
for ron in relative_orbit_numbers.getInfo():
relativeorbitnumberlist.append(ron)
relativeorbitnumbers = str(relativeorbitnumberlist)
image = ee.Image(collection.first())
systemid = image.get('system:id').getInfo()
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
projection = image.select('VV').projection().getInfo()['crs']
else:
projection = image.select('VH').projection().getInfo()['crs']
# make into collection of VV, VH or VVVH images and restore linear scale
if polarization == 'VV':
pcollection = collection.map(get_vv)
elif polarization == 'VH':
pcollection = collection.map(get_vh)
else:
pcollection = collection.map(get_vvvh)
# pcollection = collection.map(get_vvvh_raw)
# clipped image for display on map
image1 = ee.Image(pcollection.first())
image1clip = image1.clip(rect)
downloadpath = image1.getDownloadUrl({'scale':30})
# clip the image collection and create a single multiband image
compositeimage = ee.Image(pcollection.iterate(get_image,image1clip))
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(compositeimage,exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpathclip = compositeimage.getDownloadUrl({'scale':10})
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
mapid = image1.select('VV').getMapId({'min': 0, 'max':1, 'opacity': 0.6})
mapidclip = image1clip.select('VV').getMapId({'min': 0, 'max':1, 'opacity': 0.7})
else:
mapid = image1.select('VH').getMapId({'min': 0, 'max':1, 'opacity': 0.6})
mapidclip = image1clip.select('VH').getMapId({'min': 0, 'max':1, 'opacity': 0.7})
return render_template('sentinel1out.html',
mapidclip = mapidclip['mapid'],
tokenclip = mapidclip['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download image collection intersection',
downloadpath = downloadpath,
downloadpathclip = downloadpathclip,
projection = projection,
systemid = systemid,
count = count,
timestamp = timestamp,
gdexportid = gdexportid,
timestamps = timestamps,
systemids = systemids,
polarization = polarization1,
relativeorbitnumbers = relativeorbitnumbers)
except Exception as e:
return '<br />An error occurred in Sentinel1: %s'%e
@app.route('/sentinel2.html', methods = ['GET', 'POST'])
def Sentinel2():
if request.method == 'GET':
username = getpass.getuser()
return render_template('sentinel2.html', navbar = 'Hi there %s!'%username)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
desired_projection = request.form['projection']
latitude = float(request.form['latitude'])
longitude = float(request.form['longitude'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
exportname = request.form['exportname']
how = request.form['how']
start = ee.Date(startdate)
finish = ee.Date(enddate)
if how == 'longlat':
point = ee.Geometry.Point([longitude,latitude])
elements = ee.ImageCollection('COPERNICUS/S2') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVERAGE_ASSESSMENT', True)
count = elements.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
element = elements.first()
image = ee.Image(element)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVERAGE_ASSESSMENT').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
if desired_projection != 'default':
projection = desired_projection
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
mapid = image.select('B2','B3','B4') \
.getMapId({'min': 0, 'max': 2000, 'opacity': 0.8})
return render_template('sentinel2out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.sort('CLOUD_COVERAGE_ASSESSMENT', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
imageclip = image.clip(rect)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVERAGE_ASSESSMENT').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
if desired_projection != 'default':
projection = desired_projection
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(imageclip.select('B2','B3','B4','B8'),exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpathclip = imageclip.select('B2','B3','B4','B8').getDownloadUrl({'scale':10, 'crs':projection})
rgb = image.select('B2','B3','B4')
rgbclip = imageclip.select('B2','B3','B5')
mapid = rgb.getMapId({'min':0, 'max':2000, 'opacity': 0.6})
mapidclip = rgbclip.getMapId({'min':0, 'max':2000, 'opacity': 1.0})
return render_template('sentinel2out.html',
mapidclip = mapidclip['mapid'],
tokenclip = mapidclip['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download image intersection',
downloadpath = downloadpath,
downloadpathclip = downloadpathclip,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
except Exception as e:
return '<br />An error occurred in Sentinel2: %s'%e
@app.route('/landsat5.html', methods = ['GET', 'POST'])
def Landsat5():
if request.method == 'GET':
username = getpass.getuser()
return render_template('landsat5.html', navbar = 'Hi there %s!'%username)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
path = int(request.form['path'])
row = int(request.form['row'])
latitude = float(request.form['latitude'])
longitude = float(request.form['longitude'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
how = request.form['how']
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
exportname = request.form['exportname']
start = ee.Date(startdate)
finish = ee.Date(enddate)
if how == 'pathrow':
elements = ee.ImageCollection('LT5_L1T') \
.filterMetadata('WRS_PATH', 'equals', path) \
.filterMetadata('WRS_ROW', 'equals', row) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
count = elements.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
element = elements.first()
image = ee.Image(element)
longitude = (image.get('CORNER_LL_LON_PRODUCT').getInfo()+image.get('CORNER_UR_LON_PRODUCT').getInfo())/2
latitude = (image.get('CORNER_UR_LAT_PRODUCT').getInfo()+image.get('CORNER_LL_LAT_PRODUCT').getInfo())/2
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
cloudcover = image.get('CLOUD_COVER').getInfo()
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':30,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpath = image.getDownloadUrl({'scale':30, 'crs':'EPSG:4326'})
rgb = image.select('B4','B5','B7')
mapid = rgb.getMapId({'min':0, 'max':250, 'opacity': 0.6})
return render_template('landsat5out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
elif how == 'longlat':
point = ee.Geometry.Point([longitude,latitude])
elements = ee.ImageCollection('LT5_L1T') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
count = elements.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
element = elements.first()
image = ee.Image(element)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVER').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':30,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
mapid = image.select('B4','B5','B7') \
.getMapId({'min': 0, 'max': 250, 'opacity': 0.6})
return render_template('landsat5out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
projection = projection,
systemid = systemid,
cloudcove = cloudcover,
count = count,
timestamp = timestamp)
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('LT5_L1T') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
imageclip = image.clip(rect)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVER').getInfo()
projection = image.select('B1').projection().getInfo()['crs']
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(imageclip,exportname,
{'scale':30,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpathclip = imageclip.select('B1','B2','B3','B4','B5','B7').getDownloadUrl({'scale':30, 'crs':projection})
rgb = image.select('B4','B5','B7')
rgbclip = imageclip.select('B4','B5','B7')
mapid = rgb.getMapId({'min':0, 'max':250, 'opacity': 0.6})
mapidclip = rgbclip.getMapId({'min':0, 'max':250, 'opacity': 1.0})
return render_template('landsat5out.html',
mapidclip = mapidclip['mapid'],
tokenclip = mapidclip['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download image intersection',
downloadpath = downloadpath,
downloadpathclip = downloadpathclip,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
except Exception as e:
return '<br />An error occurred in Landsat5: %s'%e
@app.route('/mad.html', methods = ['GET', 'POST'])
def Mad():
if request.method == 'GET':
username = getpass.getuser()
return render_template('mad.html', navbar = 'Hi there %s!'%username)
else:
try:
path = int(request.form['path'])
row = int(request.form['row'])
niter = int(request.form['iterations'])
start1 = ee.Date(request.form['startdate1'])
finish1 = ee.Date(request.form['enddate1'])
start2 = ee.Date(request.form['startdate2'])
finish2 = ee.Date(request.form['enddate2'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
exportname = request.form['exportname']
how = request.form['how']
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
if how == 'pathrow':
element = ee.ImageCollection('LT5_L1T') \
.filterMetadata('WRS_PATH', 'equals', path) \
.filterMetadata('WRS_ROW', 'equals', row) \
.filterDate(start1, finish1) \
.sort('CLOUD_COVER') \
.first()
image1 = ee.Image(element).select('B1','B2','B3','B4','B5','B7')
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get('CLOUD_COVER').getInfo()
centerlon = (image1.get('CORNER_LL_LON_PRODUCT').getInfo()+image1.get('CORNER_UR_LON_PRODUCT').getInfo())/2
centerlat = (image1.get('CORNER_UR_LAT_PRODUCT').getInfo()+image1.get('CORNER_LL_LAT_PRODUCT').getInfo())/2
element = ee.ImageCollection('LT5_L1T') \
.filterMetadata('WRS_PATH', 'equals', path) \
.filterMetadata('WRS_ROW', 'equals', row) \
.filterDate(start2, finish2) \
.sort('CLOUD_COVER') \
.first()
image2 = ee.Image(element).select('B1','B2','B3','B4','B5','B7')
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get('CLOUD_COVER').getInfo()
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('LT5_L1T') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start1, finish1) \
.sort('CLOUD_COVER', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for first time interval')
image1 = ee.Image(collection.first()).clip(rect).select('B1','B2','B3','B4','B5','B7')
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get('CLOUD_COVER').getInfo()
collection = ee.ImageCollection('LT5_L1T') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start2, finish2) \
.sort('CLOUD_COVER', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for second time interval')
image2 = ee.Image(collection.first()).clip(rect).select('B1','B2','B3','B4','B5','B7')
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get('CLOUD_COVER').getInfo()
# iMAD:
B1 = image1.bandNames().get(0)
input_dict = ee.Dictionary({'image1':image1,'image2':image2})
first = ee.Dictionary({'weights':image1.select(ee.String(B1)).multiply(0).add(ee.Image.constant(1)),
'MAD':ee.Image.constant(0)})
# iteration not yet possible, but this is how it goes:
# result = ee.List.repeat(input_dict, nMax).iterate(imad,first)
# fake iteration:
itr = 0
while itr < niter:
result = imad(input_dict,first)
weights = result.get('weights')
first = ee.Dictionary({'weights':weights,'MAD':ee.Image.constant(0)})
itr += 1
# ---------------
MAD = ee.Image(result.get('MAD'))
bNames = MAD.bandNames()
nBands = len(bNames.getInfo())
lastMAD = ee.String(MAD.bandNames().get(nBands-1))
scale = image1.select(ee.String(B1)).projection().nominalScale().getInfo()
downloadpath = MAD.getDownloadUrl({'scale':scale, 'crs':'EPSG:4326'})
mapid = MAD.select(lastMAD).getMapId({'min': -20, 'max': 20, 'opacity': 0.7})
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(MAD,exportname,
{'scale':scale,'driveFolder':'EarthEngineImages'})
gdexportid = str(gdexport.id)
print '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
return render_template('madout.html',
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadpath = downloadpath,
systemid1 = systemid1,
systemid2 = systemid2,
cloudcover1 = cloudcover1,
cloudcover2 = cloudcover2,
timestamp1 = timestamp1,
timestamp2 = timestamp2)
except Exception as e:
return '<br />An error occurred in MAD: %s'%e
@app.route('/wishart.html', methods = ['GET', 'POST'])
def Wishart():
if request.method == 'GET':
username = getpass.getuser()
return render_template('wishart.html', navbar = 'Hi there %s!'%username)
else:
try:
start1 = ee.Date(request.form['startdate1'])
finish1 = ee.Date(request.form['enddate1'])
start2 = ee.Date(request.form['startdate2'])
finish2 = ee.Date(request.form['enddate2'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
orbit = request.form['orbit']
polarization1 = request.form['polarization']
relativeorbitnumber = request.form['relativeorbitnumber']
significance = float(request.form['significance'])
if polarization1 == 'VV,VH':
polarization = ['VV','VH']
else:
polarization = polarization1
exportname = request.form['exportname']
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
if request.form.has_key('median'):
median = True
else:
median = False
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
# get the first time point image
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start1, finish1) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', 'IW')) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for first time interval')
collection = collection.sort('system:time_start')
image1 = ee.Image(collection.first()).clip(rect)
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1= time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
relativeOrbitNumber1 = int(image1.get('relativeOrbitNumber_start').getInfo())
# get the second time point image
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start2, finish2) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', 'IW')) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for second time interval')
collection = collection.sort('system:time_start')
image2 = ee.Image(collection.first()).clip(rect)
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2= time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
relativeOrbitNumber2 = int(image2.get('relativeOrbitNumber_start').getInfo())
# Wishart change detection
if polarization1=='VV,VH':
image1 = get_vvvh(image1)
image2 = get_vvvh(image2)
elif polarization1=='VV':
image1 = get_vv(image1)
image2 = get_vv(image2)
else:
image1 = get_vh(image1)
image2 = get_vh(image2)
result = ee.Dictionary(omnibus(ee.List([image1,image2]),significance,median))
cmap = ee.Image(result.get('cmap'))
mapid = cmap.getMapId({'min':0, 'max':1 ,'palette':'black,red', 'opacity':0.4})
downloadpath = cmap.getDownloadUrl({'scale':10})
if export == 'export':
# export to Assets ---------------------------------
assexport = ee.batch.Export.image.toAsset(cmap,description="wishartTask", assetId=exportname,scale=10,maxPixels=1e9)
assexportid = str(assexport.id)
print '****Exporting to Assets, task id: %s '%assexportid
assexport.start()
else:
assexportid = 'none'
# --------------------------------------------------
return render_template('wishartout.html',
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadpath = downloadpath,
systemid1 = systemid1,
systemid2 = systemid2,
timestamp1 = timestamp1,
timestamp2 = timestamp2,
relativeOrbitNumber1 = relativeOrbitNumber1,
relativeOrbitNumber2 = relativeOrbitNumber2,
significance = significance,
polarization = polarization1,
assexportid = assexportid)
except Exception as e:
return '<br />An error occurred in wishart: %s'%e
@app.route('/omnibus.html', methods = ['GET', 'POST'])
def Omnibus():
if request.method == 'GET':
username = getpass.getuser()
return render_template('omnibus.html', navbar = 'Hi there %s!'%username,
centerlon = 8.5,
centerlat = 50.05)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
orbit = request.form['orbit']
polarization1 = request.form['polarization']
relativeorbitnumber = request.form['relativeorbitnumber']
if polarization1 == 'VV,VH':
polarization = ['VV','VH']
else:
polarization = polarization1
significance = float(request.form['significance'])
mode = request.form['mode']
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
if request.form.has_key('assexport'):
assexport = request.form['assexport']
else:
assexport = 'none'
if request.form.has_key('gdexport'):
gdexport = request.form['gdexport']
else:
gdexport = 'none'
if request.form.has_key('median'):
median = True
else:
median = False
assexportname = request.form['assexportname']
gdexportname = request.form['gdexportname']
start = ee.Date(startdate)
finish = ee.Date(enddate)
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', mode)) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
collection = collection.sort('system:time_start')
system_ids = ee.List(collection.aggregate_array('system:id'))
systemidlist = []
for systemid in system_ids.getInfo():
systemidlist.append(systemid)
systemids = str(systemidlist)
acquisition_times = ee.List(collection.aggregate_array('system:time_start'))
count = acquisition_times.length().getInfo()
if count==0:
raise ValueError('No images found')
timestamplist = []
for timestamp in acquisition_times.getInfo():
tmp = time.gmtime(int(timestamp)/1000)
timestamplist.append(time.strftime('%c', tmp))
timestamp = timestamplist[0]
timestamps = str(timestamplist)
relative_orbit_numbers = ee.List(collection.aggregate_array('relativeOrbitNumber_start'))
relativeorbitnumberlist = []
for ron in relative_orbit_numbers.getInfo():
relativeorbitnumberlist.append(ron)
relativeorbitnumbers = str(relativeorbitnumberlist)
image = ee.Image(collection.first())
systemid = image.get('system:id').getInfo()
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
projection = image.select('VV').projection().getInfo()['crs']
else:
projection = image.select('VH').projection().getInfo()['crs']
# make into collection of VV, VH or VVVH images and restore linear scale
if polarization == 'VV':
pcollection = collection.map(get_vv)
elif polarization == 'VH':
pcollection = collection.map(get_vh)
else:
pcollection = collection.map(get_vvvh)
# get the list of images and clip to roi
pList = pcollection.toList(count)
first = ee.Dictionary({'imlist':ee.List([]),'rect':rect})
imList = ee.Dictionary(pList.iterate(clipList,first)).get('imlist')
# run the algorithm
result = ee.Dictionary(omnibus(imList,significance,median))
cmap = ee.Image(result.get('cmap'))
smap = ee.Image(result.get('smap'))
fmap = ee.Image(result.get('fmap'))
cmaps = ee.Image.cat(cmap,smap,fmap).rename(['cmap','smap','fmap'])
if assexport == 'assexport':
# export to Assets ---------------------------------
assexport = ee.batch.Export.image.toAsset(cmaps,
description='assetExportTask',
assetId=assexportname,scale=10,maxPixels=1e9)
assexportid = str(assexport.id)
print '****Exporting to Assets, task id: %s '%assexportid
assexport.start()
else:
assexportid = 'none'
if gdexport == 'gdexport':
# export to Drive ----------------------------------
gdexport = ee.batch.Export.image.toDrive(cmaps,
description='driveExportTask',
folder = 'EarthEngineImages',
fileNamePrefix=gdexportname,scale=10,maxPixels=1e9)
gdexportid = str(gdexport.id)
print '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
cmapid = cmap.getMapId({'min': 0, 'max':count-1,'palette':'black,blue,yellow,red', 'opacity': 0.5})
fmapid = fmap.getMapId({'min': 0, 'max':count/2,'palette':'black,blue,yellow,red', 'opacity': 0.5})
smapid = smap.getMapId({'min': 0, 'max':count-1,'palette':'black,blue,yellow,red', 'opacity': 0.5})
return render_template('omnibusout.html',
mapid = fmapid['mapid'],
token = fmapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download change maps',
projection = projection,
systemid = systemid,
count = count,
timestamp = timestamp,
assexportid = 'none',
gdexportid = 'none',
timestamps = timestamps,
systemids = systemids,
polarization = polarization1,
relativeorbitnumbers = relativeorbitnumbers)
except Exception as e:
return '<br />An error occurred in omnibus: %s'%e
if __name__ == '__main__':
# import ee
# image = ee.apifunction.ApiFunction.call_("S1.db",ee.Image('TEST/simonf/S1/99/S1B_IW_GRDH_1SDV_20161001T171508_20161001T171533_002316_003E9D_D195'))
app.run(debug=True, host='0.0.0.0')
|
11574013
|
from dynamic_preferences.types import BooleanPreference, StringPreference, IntegerPreference, ChoicePreference
from dynamic_preferences.preferences import Section
from dynamic_preferences.registries import global_preferences_registry
from dynamic_preferences.users.registries import user_preferences_registry
from YtManagerApp.models import VIDEO_ORDER_CHOICES
from django.conf import settings
import os
# we create some section objects to link related preferences together
hidden = Section('hidden')
general = Section('general')
scheduler = Section('scheduler')
# Hidden settings
@global_preferences_registry.register
class Initialized(BooleanPreference):
section = hidden
name = 'initialized'
default = False
# General settings
@global_preferences_registry.register
class YouTubeAPIKey(StringPreference):
section = general
name = 'youtube_api_key'
default = '<KEY>'
required = True
@global_preferences_registry.register
class AllowRegistrations(BooleanPreference):
section = general
name = 'allow_registrations'
default = True
required = True
@global_preferences_registry.register
class SyncSchedule(StringPreference):
section = scheduler
name = 'synchronization_schedule'
default = '5 * * * *' # hourly
required = True
@global_preferences_registry.register
class SchedulerConcurrency(IntegerPreference):
section = scheduler
name = 'concurrency'
default = 2
required = True
# User settings
@user_preferences_registry.register
class MarkDeletedAsWatched(BooleanPreference):
name = 'mark_deleted_as_watched'
default = True
required = True
@user_preferences_registry.register
class AutoDeleteWatched(BooleanPreference):
name = 'automatically_delete_watched'
default = True
required = True
@user_preferences_registry.register
class AutoDownloadEnabled(BooleanPreference):
name = 'auto_download'
default = True
required = True
@user_preferences_registry.register
class DownloadGlobalLimit(IntegerPreference):
name = 'download_global_limit'
default = -1
required = False
@user_preferences_registry.register
class DownloadGlobalSizeLimit(IntegerPreference):
name = 'download_global_size_limit'
default = -1
required = False
@user_preferences_registry.register
class DownloadSubscriptionLimit(IntegerPreference):
name = 'download_subscription_limit'
default = 5
required = False
@user_preferences_registry.register
class DownloadMaxAttempts(IntegerPreference):
name = 'max_download_attempts'
default = 3
required = True
@user_preferences_registry.register
class DownloadOrder(ChoicePreference):
name = 'download_order'
choices = VIDEO_ORDER_CHOICES
default = 'playlist'
required = True
@user_preferences_registry.register
class DownloadPath(StringPreference):
name = 'download_path'
default = os.path.join(settings.DATA_DIR, 'downloads')
required = False
@user_preferences_registry.register
class DownloadFilePattern(StringPreference):
name = 'download_file_pattern'
default = '${channel}/${playlist}/S01E${playlist_index} - ${title} [${id}]'
required = True
@user_preferences_registry.register
class DownloadFormat(StringPreference):
name = 'download_format'
default = 'bestvideo+bestaudio'
required = True
@user_preferences_registry.register
class DownloadSubtitles(BooleanPreference):
name = 'download_subtitles'
default = True
required = True
@user_preferences_registry.register
class DownloadAutogeneratedSubtitles(BooleanPreference):
name = 'download_autogenerated_subtitles'
default = False
required = True
@user_preferences_registry.register
class DownloadAllSubtitles(BooleanPreference):
name = 'download_subtitles_all'
default = False
required = False
@user_preferences_registry.register
class DownloadSubtitlesLangs(StringPreference):
name = 'download_subtitles_langs'
default = 'en,ro'
required = False
@user_preferences_registry.register
class DownloadSubtitlesFormat(StringPreference):
name = 'download_subtitles_format'
default = ''
required = False
|
11574046
|
import os
import copy
import string
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
import torch.nn.functional as F
from librosa import output
class ConfigObject:
def __init__(self, **entries):
self.__dict__.update(entries)
def reserve_pop(x):
# Helper function to reverse a list
return x[::-1][:-1]
def id_generator(size=15, chars=string.ascii_uppercase + string.digits):
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
return ''.join(random.choice(chars) for _ in range(size))
def writer(x, y, out, sr, path):
n = x.shape[0]
for i in range(n):
# Generate a random hash
name = id_generator()
# Create paths
mix_pf = os.path.join(path, name + '_mixture.wav')
clean_pf = os.path.join(path, name + '_clean.wav')
sep_pf = os.path.join(path, name + '_separated.wav')
# Use librosa to write files
output.write_wav(mix_pf, x[i, 0, :].cpu().numpy(), sr)
output.write_wav(clean_pf, y[i, 0, :].cpu().numpy(), sr)
output.write_wav(sep_pf, out[i, 0, :].cpu().numpy(), sr)
class LibriSpeechGenerator(data.Dataset):
"""Pytorch generator for LibriSpeech
dataset and UrbanSound 8k noise"""
def __init__(self, config, X, y = None, mode="noise"):
self.n_samples = config.n_samples
self.mode = mode
# Working condition
if self.mode == "noise" and y is None:
raise ValueError("y should not be None for Noise mode")
# Datasets
self.X, self.y = X, y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, index):
if self.mode == "noise":
return self.X[index, :, :self.n_samples], self.y[index, :, :self.n_samples]
else:
return self.X[index, :, :self.n_samples], self.X[index, :, :self.n_samples]
|
11574075
|
import copy
import tensorflow as tf
import networks.vgg16 as vgg16
import networks.vgg11 as vgg11
import networks.mobilenet_for_cifar as mobilenet_for_cifar
import networks.mobilenet_for_imagenet as mobilenet_for_imagenet
import networks.resnet18 as resnet18
import networks.resnet32 as resnet32
import datasets.cifar100 as cifar100
import datasets.cifar10 as cifar10
import datasets.cifar10 as cifar10
import datasets.cifar100 as cifar100
import datasets.imagenet as imagenet
import prune_algorithm.prune_vgg11 as prune_vgg11
import prune_algorithm.prune_vgg16 as prune_vgg16
import prune_algorithm.prune_mobilenet_for_cifar as prune_mobilenet_for_cifar
import prune_algorithm.prune_mobilenet_for_imagenet as prune_mobilenet_for_imagenet
import prune_algorithm.prune_resnet32 as prune_resnet32
import prune_algorithm.prune_resnet18 as prune_resnet18
FLAGS = tf.app.flags.FLAGS
## network and dataset
dataset_name = FLAGS.dataset # "cifar10" "cifar100", "imagenet"
network_name = FLAGS.network # "vgg11" "vgg16" "mobilenet_for_cifar" "mobilenet_for_imagenet" "resnet32"
class TrainArgs(object):
def get(self, attr_name):
try:
res = getattr(self, attr_name)
except AttributeError as e:
res = None
return res
args = TrainArgs()
def parse_net_and_dataset():
if network_name == "vgg16":
network = vgg16.VGG16()
scope = "vgg_16"
# prune_alg = channel_wise_corr_vgg16
prune_alg = prune_vgg16.PruneVgg16
elif network_name == "vgg11":
network = vgg11.VGG11()
scope = "vgg_11"
prune_alg = prune_vgg11.PruneVgg11
elif network_name == "mobilenet_for_cifar":
network = mobilenet_for_cifar.MobileNetForCifar()
scope = "mobilenet_for_cifar"
prune_alg = prune_mobilenet_for_cifar.PruneMobileNetForCifar
elif network_name == "mobilenet_for_imagenet":
network = mobilenet_for_imagenet.MobileNetForImagenet()
scope = "mobilenet_for_imagenet"
prune_alg = prune_mobilenet_for_imagenet.PruneMobileNetForImagenet
elif network_name == "resnet32":
network = resnet32.ResNet32()
scope = "resnet32"
prune_alg = prune_resnet32.PruneResNet32
elif network_name == "resnet18":
network = resnet18.ResNet18()
scope = "resnet18"
prune_alg = prune_resnet18.PruneResNet18
else:
raise ValueError("unknown network name")
if dataset_name == "cifar100":
dataset = cifar100
elif dataset_name == "cifar10":
dataset = cifar10
elif dataset_name == "imagenet":
dataset = imagenet
else:
raise ValueError("unknown dataset name")
return network, dataset, scope, prune_alg
## parameters for network
if network_name == "vgg11":
if dataset_name == "imagenet":
args.train_batch_size = 128
args.test_batch_size = 100
args.image_size = [224, 224]
args.num_gpus = 2
args.use_bn = False
args.use_bias = True
args.weight_decay = 5e-4
args.staircase = False
args.regularizer = tf.contrib.slim.l2_regularizer
args.optimizer = lambda lr: tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
args.num_epochs_per_decay = [30, 60, 90]
args.learning_rate_decay_factor = [1, 0.1, 0.01, 0.001]
args.initial_learning_rate = 0.01
args.max_epochs = 120
args.ori_channels_num = [64, 128, 256, 256, 512, 512, 512, 512, 4096, 4096]
## you may wish to use data augmentation as follows when training but only use "mirroring" when pruning
## to improve the performance
args.data_augmentation_args = {"resize": True, "crop_bbox": False, "padding": False,
"bright": False, "mirroring": True,
"mean": [123.68, 116.779, 103.939], "std": [1.0, 1.0, 1.0]}
elif network_name == "vgg16":
if "cifar" in dataset_name:
args.train_batch_size = 128
args.test_batch_size = 100
args.image_size = [32, 32]
args.num_gpus = 1
args.use_bn = True
args.use_bias = True
args.weight_decay = 1.5e-3
args.staircase = False
args.regularizer = tf.contrib.slim.l2_regularizer
args.optimizer = lambda lr: tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
args.num_epochs_per_decay = 20
args.learning_rate_decay_factor = 0.5
args.initial_learning_rate = 0.1
args.max_epochs = 250
args.ori_channels_num = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512]
## you may wish to set "bright" to True when training but set it to False when pruning to improve
## the performance for VGG16 on cifar
args.data_augmentation_args = {"padding": True, "bright": True, "mirroring": True,
"mean": 120.707, "std": 64.15}
if dataset_name == "cifar10":
args.init_dropout = [0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5]
elif dataset_name == "cifar100":
args.init_dropout = [0.2, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.4, 0.4]
else:
raise ValueError("unknown dataset name")
elif network_name == "mobilenet_for_cifar":
if "cifar" in dataset_name:
args.train_batch_size = 64
args.test_batch_size = 100
args.image_size = [32, 32]
args.num_gpus = 1
args.use_bn = True
args.use_bias = False
args.ori_channels_num = [32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024]
args.weight_decay = 6e-4
args.staircase = False
args.optimizer = lambda lr: tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
args.initializer = tf.contrib.layers.variance_scaling_initializer
args.regularizer = tf.contrib.slim.l2_regularizer # depth-wise convolution do not use regularizer
args.num_epochs_per_decay = 20
args.learning_rate_decay_factor = 0.5
args.initial_learning_rate = 0.1
args.max_epochs = 125
args.data_augmentation_args = {"padding": True, "bright": True, "mirroring": True,
"mean": 120.707, "std": 64.15}
elif network_name == "mobilenet_for_imagenet":
if dataset_name == "imagenet":
batch_size_base = 256
args.train_batch_size = 128
args.test_batch_size = 100
args.image_size = [224, 224]
args.num_gpus = 2
args.use_bn = True
args.use_bias = True
args.ori_channels_num = [32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024]
args.weight_decay = 4e-5
args.use_nesterov = True
args.optimizer = lambda lr: tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=False)
args.initializer = lambda: tf.truncated_normal_initializer(stddev=0.09)
args.regularizer = tf.contrib.slim.l2_regularizer # depth-wise convolution do not use regularizer
args.num_epochs_per_decay = [30, 60, 90]
args.learning_rate_decay_factor = [1, 0.1, 0.01, 0.001]
args.initial_learning_rate = 0.1 * (args.train_batch_size * args.num_gpus / batch_size_base)
args.max_epochs = 120
args.data_augmentation_args = {"crop_bbox": True, "padding": False, "resize": False,
"bright": True, "mirroring": True,
"mean": 127.5, "std": 127.5}
elif network_name == "resnet32":
if "cifar" in dataset_name:
args.train_batch_size = 128
args.test_batch_size = 100
args.image_size = [32, 32]
args.num_gpus = 1
args.use_bias = False
args.weight_decay = 2e-4
args.staircase = True
args.optimizer = lambda lr: tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=False)
args.initializer = tf.contrib.layers.variance_scaling_initializer
args.regularizer = tf.contrib.slim.l2_regularizer
args.num_epochs_per_decay = [100, 150, 200]
args.learning_rate_decay_factor = [1, 0.1, 0.01, 0.001]
args.initial_learning_rate = 0.1
args.max_epochs = 250
args.data_augmentation_args = {"padding": True, "bright": False, "mirroring": True,
"mean": 120.707, "std": 64.15}
args.block_sizes = [5, 5, 5]
args.ori_channels_num = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64]
args.strides = [1, 2, 2]
args.resnet_version = 2 # 1 or 2
elif network_name == "resnet18":
if dataset_name == "imagenet":
batch_size_base = 256
args.train_batch_size = 256
args.test_batch_size = 100
args.num_gpus = 2
args.use_bias = False
args.weight_decay = 1e-4
args.initializer = lambda: tf.contrib.layers.variance_scaling_initializer()
args.regularizer = tf.contrib.slim.l2_regularizer
args.num_epochs_per_decay = 30
args.initial_learning_rate = 0.1 * args.num_gpus * args.train_batch_size / batch_size_base
args.learning_rate_decay_factor = 0.1
args.max_epochs = 120
args.optimizer = lambda lr: tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
args.staircase = True
args.data_augmentation_args = {"crop_bbox": True, "padding": False, "resize": False,
"bright": False, "mirroring": True,
"mean": [123.675, 116.28, 103.53], "std": [58.395, 57.12, 57.375]}
args.block_sizes = [2, 2, 2, 2]
args.ori_channels_num = [64, 64, 64, 64, 64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512]
args.resnet_version = 1
args.strides = [1, 2, 2, 2]
|
11574081
|
from Utility import logger
class Executor:
def __init__(self):
self.NLUQueue = None
def initialize(self,instructID):
director,env,start,dest,txt,set = instructID.split('_')
if self.NLUQueue:
self.NLUQueue.put(('Director',director))
self.NLUQueue.put(('Start Place',start))
self.NLUQueue.put(('Destination',dest))
self.NLUQueue.put(('Set',set))
self.robot.setRoute(start,dest)
def setRobot(self,Robot):
self.robot = Robot
def _execute(self,Model): pass
def execute(self,Model,instructID):
CaughtError = None
self.initialize(instructID)
try:
self._execute(Model)
except (KeyError,LookupError,ValueError,AttributeError,RuntimeError,TypeError),e:
logger.error('%s on %s', e.__class__.__name__, e)
CaughtError = e
if not self.robot.completed:
reward,obs = self.robot.declareGoal()
logger.stageComplete('Declared Goal to complete instruction execution %s',(reward,obs))
ResultTxt = self.robot.completed and 'Success' or 'Failure'
logger.runComplete("%s in direction following.", ResultTxt)
return self.robot.completed, CaughtError, ResultTxt
class InstructionQueueExecutor(Executor):
def _execute(self,Actions):
for i,action in enumerate(Actions):
logger.info('<%d> %s',i,action)
if self.NLUQueue: self.NLUQueue.put(('Executing',i))
try:
results = action.execute(self.robot)
logger.info('<%d> %s => %r', i,action,results)
except Warning,e:
logger.warning('<%d> %s => %s', i,action,e)
except StopIteration,e:
results = e
logger.info('<%d> %s => %r', i,action,results)
logger.info('End of Instruction Execution after <%d>', i)
def test():
import doctest
doctest.testmod()
|
11574116
|
import os, shutil
from django.core.files.base import File
from django.conf import settings
from django.test import TestCase
from django.utils.encoding import force_bytes
from test_app.models import FileTesting, ImageTesting, DependencyTesting, RenameFileTesting
def add_base(path):
return os.path.join(settings.BASE_PATH, path)
class FileBaseTestCase(TestCase):
static_path = add_base("static")
media_path = add_base("media")
def setUp(self):
self.tearDown()
os.makedirs(self.media_path)
shutil.copytree(self.static_path, os.path.join(self.media_path, "static"))
def tearDown(self):
if os.path.exists(self.media_path): shutil.rmtree(self.media_path)
pass
class FileTestCase(FileBaseTestCase):
def test_file_field(self):
instance = FileTesting.objects.create()
# test default static
self.assertEqual(instance.field_1_foo.url, "/static/defaults/foo.txt")
self.assertEqual(instance.bar.url, "/static/defaults/bar.txt")
# test default FieldFile set and processed
self.assertEqual(instance.field_1_foo.read(), force_bytes("FOO\n"))
self.assertEqual(instance.bar.read(), force_bytes("BAR\n"))
self.assertEqual(instance.field_2.read(), force_bytes("foo\n"))
field_2_path = instance.field_2.path
self.assertTrue(os.path.isfile(field_2_path))
# test assignment of file
foo_bar = File(open(add_base("media/static/defaults/foo-bar.txt"), 'r'))
instance.field_1 = foo_bar
instance.save()
foo_bar.close()
# make sure default file was not removed
self.assertTrue(os.path.isfile(field_2_path))
# check new content
self.assertEqual(instance.field_1.read(), force_bytes("FOO BAR\n"))
self.assertEqual(instance.field_1_foo.read(), force_bytes("FOO BAR\n"))
instance.field_2.seek(0)
self.assertEqual(instance.field_2.read(), force_bytes("foo\n"))
# testing setting default value again
instance.field_2 = None
instance.save()
# make sure previous file was removed
self.assertFalse(os.path.isfile(field_2_path))
self.assertEqual(instance.field_2.read(), force_bytes("foo bar\n"))
# test deletion of file together with instance
field_1_path = instance.field_1.path
field_1_foo_path = instance.field_1_foo.path
field_2_path = instance.field_2.path
self.assertTrue(os.path.isfile(field_1_path))
self.assertTrue(os.path.isfile(field_1_foo_path))
self.assertTrue(os.path.isfile(field_2_path))
instance.delete()
self.assertFalse(os.path.isfile(field_1_path))
self.assertFalse(os.path.isfile(field_1_foo_path))
self.assertFalse(os.path.isfile(field_2_path))
def test_file_cleanup_after_delete(self):
instance = FileTesting.objects.create()
foo_bar = File(open(add_base("media/static/defaults/foo-bar.txt"), 'r'))
instance.field_3 = foo_bar
instance.field_4 = foo_bar
instance.save()
foo_bar.close()
field_3_path = instance.field_3.path
field_4_path = instance.field_4.path
self.assertTrue(os.path.isfile(field_3_path))
self.assertTrue(os.path.isfile(field_4_path))
instance.delete()
# testing cleanup without dependencies
self.assertFalse(os.path.isfile(field_3_path))
# testing keep_orphans=True
self.assertTrue(os.path.isfile(field_4_path))
def test_file_cleanup_after_replace(self):
instance = FileTesting.objects.create()
foo_bar = File(open(add_base("media/static/defaults/foo-bar.txt"), 'r'))
instance.field_3 = foo_bar
instance.field_4 = foo_bar
instance.save()
foo_bar.close()
field_3_path = instance.field_3.path
field_4_path = instance.field_4.path
self.assertTrue(os.path.isfile(field_3_path))
self.assertTrue(os.path.isfile(field_4_path))
foo = File(open(add_base("media/static/defaults/foo.txt"), 'r'))
instance.field_3 = foo
instance.field_4 = foo
instance.save()
foo.close()
# testing cleanup without dependencies
self.assertFalse(os.path.isfile(field_3_path))
# testing keep_orphans=True
self.assertTrue(os.path.isfile(field_4_path))
class ImageTestCase(FileBaseTestCase):
def test_image_field_mimic_django(self):
instance = ImageTesting.objects.create()
lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'),
name="lenna_rect.jpg")
instance.image_1 = lenna_rect
instance.image_2 = lenna_rect
instance.save()
lenna_rect.close()
# make sure width and heigth values are correct and same as django's
self.assertEqual(instance.image_1_width, instance.image_2_width)
self.assertEqual(instance.image_1_height, instance.image_2_height)
self.assertEqual(instance.image_2_width, 400)
self.assertEqual(instance.image_2_height, 225)
# make sure values are saved properly
instance = ImageTesting.objects.get(pk=instance.pk)
self.assertEqual(instance.image_2_width, 400)
self.assertEqual(instance.image_2_height, 225)
# make sure image is still there and can properly retrieve dims
self.assertEqual(instance.image_2.width, 400)
self.assertEqual(instance.image_2.height, 225)
self.assertEqual(instance.image_1.url, "/media/image_1/lenna_rect.jpg")
self.assertEqual(instance.image_2.url, "/media/image_2/lenna_rect.jpg")
# test image replacing
lenna_square = File(open(add_base("media/static/images/lenna_square.png"), 'rb'))
instance.image_2 = lenna_square
self.assertTrue(os.path.isfile(add_base("media/image_2/lenna_rect.jpg")))
instance.save()
lenna_square.close()
self.assertFalse(os.path.isfile(add_base("media/image_2/lenna_rect.jpg")))
self.assertEqual(instance.image_2.width, 512)
self.assertEqual(instance.image_2.height, 512)
instance.image_2 = None
instance.save()
self.assertIsNone(instance.image_2_width)
self.assertIsNone(instance.image_2_height)
# remove django's ImageFieldFile manually
instance.image_1.delete()
instance.delete()
self.assertFalse(os.path.isfile(add_base("media/image_2/lenna_square.png")))
def test_wand_image_processor(self):
instance = ImageTesting.objects.create()
lenna_square = File(open(add_base("media/static/images/lenna_square.png"), 'rb'))
instance.image_5 = lenna_square
instance.save()
# make sure conversion went through properly
self.assertEquals(instance.image_5_jpeg.width, 150)
self.assertEquals(instance.image_5_jpeg.height, 150)
# save instance, so files get commited to storage
path = instance.image_5.path
path_jpeg = instance.image_5_jpeg.path
# check to see that files got commited
self.assertTrue(os.path.isfile(path))
self.assertTrue(os.path.isfile(path_jpeg))
def test_image_processor(self):
instance = ImageTesting.objects.create()
lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'))
instance.image_3 = lenna_rect
instance.save()
# make sure conversion went through properly
self.assertEquals(instance.image_3_png.width, 200)
self.assertEquals(instance.image_3_png.height, 112)
# save instance, so files get commited to storage
path = instance.image_3.path
path_png = instance.image_3_png.path
# check to see that files got commited
self.assertTrue(os.path.isfile(path))
self.assertTrue(os.path.isfile(path_png))
# make sure dependency gets reattached as expected
instance = ImageTesting.objects.get(pk=instance.pk)
self.assertEquals(instance.image_3_png.width, 200)
self.assertEquals(instance.image_3_png.height, 112)
self.assertTrue(os.path.isfile(path))
self.assertTrue(os.path.isfile(path_png))
# test problematic processor (JPEG2000 is missing a required library)
instance.image_4 = lenna_rect
instance.save()
# check to see that files got commited
# It is possible than `libjpeg` isn't installed which will cause the test to fail
self.assertEquals(instance.image_4_jpeg2000.width, 400)
self.assertEquals(instance.image_4_jpeg2000.height, 225)
lenna_rect.close()
# delete instance and check if everything is cleaned up
instance.delete()
self.assertFalse(os.path.isfile(path))
self.assertFalse(os.path.isfile(path_png))
def test_self_dependency(self):
instance = DependencyTesting.objects.create()
lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'))
instance.image_1 = lenna_rect
instance.save()
lenna_rect.close()
self.assertEqual(instance.image_1.width, 50)
self.assertEqual(
instance.image_1.url,
"/media/test_app/dependencytesting/%s/image_1.bmp" % instance.pk)
self.assertEqual(instance.image_1_gif.width, 50)
self.assertEqual(
instance.image_1_gif.url,
"/media/test_app/dependencytesting/%s/image_1_gif.gif" % instance.pk)
instance.delete()
def test_value_restoration_1(self):
lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'))
text_file = File(open(add_base("media/static/defaults/foo.txt"), 'rb'))
instance = DependencyTesting.objects.create()
instance.image_1 = lenna_rect
instance.save()
lenna_rect.close()
image_1 = instance.image_1
image_1_gif = instance.image_1_gif
instance.image_1 = text_file
instance.save()
text_file.close()
self.assertIs(instance.image_1, image_1)
self.assertIs(instance.image_1_gif, image_1_gif)
instance.delete()
def test_value_restoration_2(self):
lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'))
text_file = File(open(add_base("media/static/defaults/foo.txt"), 'rb'))
instance = DependencyTesting.objects.create()
instance.image_2 = lenna_rect
instance.save()
lenna_rect.close()
image_3 = instance.image_3
image_4 = instance.image_4
# restores values since new file is a text file that cannot be processed
instance.image_2 = text_file
instance.save()
text_file.close()
self.assertEqual(instance.image_3, image_3)
self.assertEqual(instance.image_4, image_4)
self.assertEqual(instance.image_3.path, image_3.path)
self.assertEqual(instance.image_4.path, image_4.path)
instance.delete()
def test_forward_dependency(self):
instance = DependencyTesting.objects.create()
lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'))
instance.image_3 = lenna_rect
instance.image_4 = lenna_rect
instance.save()
image_3_path = instance.image_3.path
image_4_path = instance.image_4.path
self.assertEqual(instance.image_3.width, 400)
self.assertEqual(instance.image_4.width, 400)
self.assertEqual(
instance.image_3.url,
"/media/test_app/dependencytesting/%s/image_3.jpg" % instance.pk)
self.assertEqual(
instance.image_4.url,
"/media/test_app/dependencytesting/%s/image_4.jpg" % instance.pk)
instance.image_2 = lenna_rect
self.assertTrue(os.path.isfile(image_3_path))
self.assertTrue(os.path.isfile(image_4_path))
instance.save()
lenna_rect.close()
self.assertEqual(instance.image_3.width, 100)
self.assertEqual(instance.image_4.width, 150)
# forward dependencies on django's FileFields will also do the cleanup
self.assertFalse(os.path.isfile(image_3_path))
self.assertFalse(os.path.isfile(image_4_path))
instance.delete()
def test_dependency_error(self):
instance = ImageTesting()
image_1 = instance._meta.get_field('image_1')
image_2 = instance._meta.get_field('image_2')
self.assertRaises(AssertionError, image_2.manager.dependencies[0].set_field, image_1)
def test_rename_file_testing(self):
instance = RenameFileTesting()
lenna = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb'))
instance.label = 'foo'
instance.dynamic_name_file = lenna
instance.save()
self.assertEqual(instance.dynamic_name_file.url,
"/media/test_app/renamefiletesting/foo.jpg")
foo_path = instance.dynamic_name_file.path
self.assertTrue(os.path.isfile(foo_path))
instance.label = "bar"
instance.save()
self.assertEqual(instance.dynamic_name_file.url,
"/media/test_app/renamefiletesting/bar.jpg")
bar_path = instance.dynamic_name_file.path
self.assertNotEqual(foo_path, bar_path)
self.assertFalse(os.path.isfile(foo_path))
self.assertTrue(os.path.isfile(bar_path))
instance.delete()
self.assertFalse(os.path.isfile(bar_path))
|
11574186
|
import Program.GlobalVar as gv
from CommonDef.DefStr import *
from Program.Common import *
from Program.ProcessData import *
from builtins import int
from numpy import NaN
from Statistics_TechIndicators.CalcStatistics import *
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import *
from sklearn.svm import *
from sklearn.naive_bayes import *
from sklearn.neural_network import *
from sklearn.tree import *
from sklearn.gaussian_process import *
from sklearn import metrics
from imblearn.over_sampling import RandomOverSampler
from attr._make import validate
def PredSoarCrashA(param):
# CrashA param
neighbor_size = 5
pred_days = 3
change_rate = 5
validateDataSize = 3
IsSoar = False
print("Process Data...")
df = TransToAdjOHLCbyAdjC(GetStockPriceVolumeData("DIS", gv.StockDataPoolPath, "2000-1-3", "2018-6-01", True))
df = AddSMAIndictor(df, window=5, DropNan = False)
df = AddBollingerBandsIndictor(df, window=5, DropNan = False)
df = AddKDJIndictor(df, window=5, DropNan = False)
#print(df)
df = AddNeighborFeatures(df, neighbor_size, DropNan = True)
#print(df)
if IsSoar:
SoarOrCrash = strIsSoar
RollingVal = pandas.Series.rolling(df[strClose], window = pred_days, center = False).min()
else:
SoarOrCrash = strIsCrash
RollingVal = pandas.Series.rolling(df[strClose], window = pred_days, center = False).max()
IsChangeN = pandas.Series(numpy.zeros(shape=(df[strClose].count()),dtype=bool), index=df.index)
IsChangeN.rename(SoarOrCrash+'_'+str(change_rate)+'%', inplace=True)
tmp_index = 0
for i,v in IsChangeN.items():
pred_index = tmp_index+pred_days
if pred_index < IsChangeN.count():
change = (RollingVal.iloc[pred_index] - df[strClose][i]) / df[strClose][i] *100
if (change < -1*change_rate and not IsSoar) or (change > change_rate and IsSoar):
IsChangeN[i] = True
else:
IsChangeN[i] = IsChangeN.iloc[tmp_index-1]
tmp_index+=1
#print(df)
#print(IsChangeN)
print('DataSize: {0}, PostitiveSize: {1}'.format(len(IsChangeN), sum(IsChangeN)))
print("Process Data finished.")
#OutputData = pandas.concat([ df, IsChangeN], axis=1)
#OutputData.to_csv("D:\\PredSoarCrashA_data.csv", sep=',');
print("split Tr, Ts Data...")
ValidateDataList = []
TempData = df.copy()
TempTarget = IsChangeN.copy();
SplitSize = int(len(df.index) / validateDataSize)
print('SplitSize: {0}, Total Size: {1}'.format(SplitSize, len(df.index)))
for i in range(validateDataSize-1):
data_t = TempData[:SplitSize]
target_t = TempTarget[:SplitSize]
ValidateDataList.append({strData: data_t, strTarget: target_t})
TempData = TempData[SplitSize:]
TempTarget = TempTarget[SplitSize:]
ValidateDataList.append({strData: TempData, strTarget: TempTarget})
"""
x_train, x_test, y_train, y_test = train_test_split(df, IsChangeN, test_size=0.5)
ValidateDataList.append({strData: x_train, strTarget: y_train})
ValidateDataList.append({strData: x_test, strTarget: y_test})
"""
#print(ValidateDataList)
print("split Tr, Ts over.")
print("Training & Testing Model...")
ValidateResult = []
for index in range(len(ValidateDataList)-1):
print("Training & Testing Model{0}...".format(index))
x_train = ValidateDataList[index][strData]
y_train = ValidateDataList[index][strTarget]
x_test = ValidateDataList[index+1][strData]
y_test = ValidateDataList[index+1][strTarget]
# OverSampling
#ros = RandomOverSampler(random_state=0)
#x_train, y_train = ros.fit_sample(x_train, y_train)
#print(len(y_train))
#print(sum(y_train))
scaler = preprocessing.StandardScaler().fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
#clf= KNeighborsClassifier(n_neighbors=3)
#clf = LinearSVC(random_state=0)
#clf = GaussianNB()
#clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf = DecisionTreeClassifier(random_state=0)
#clf = GaussianProcessClassifier(kernel=1.0 * kernels.RBF(length_scale=1.0))
clf.fit(x_train, y_train)
predict_result = clf.predict(x_test).tolist()
ValidateResult.append({strPredictVal: predict_result, strAnsVal: y_test.tolist()})
print("Training & Testing Model{0} finished".format(index))
print("Training & Testing finished.")
#print(ValidateResult)
total_result = {strPredictVal:[], strAnsVal:[]}
for index in range(len(ValidateResult)):
total_result[strPredictVal] = total_result[strPredictVal] + ValidateResult[index][strPredictVal]
total_result[strAnsVal] = total_result[strAnsVal] + ValidateResult[index][strAnsVal]
#print(total_result)
ShowSensitivitySpecificity(total_result[strAnsVal], total_result[strPredictVal])
PredSoarCrashFuncDict = {
strPredSoarCrashA: {strFuncName: PredSoarCrashA, strParam:{}},
}
|
11574214
|
from collections import deque
def splitby(pred, seq):
trues = deque()
falses = deque()
iseq = iter(seq)
def pull(source, pred, thisval, thisbuf, otherbuf):
while 1:
while thisbuf:
yield thisbuf.popleft()
newitem = next(source)
# uncomment next line to show that source is processed only once
# print "pulled", newitem
if pred(newitem) == thisval:
yield newitem
else:
otherbuf.append(newitem)
true_iter = pull(iseq, pred, True, trues, falses)
false_iter = pull(iseq, pred, False, falses, trues)
return true_iter, false_iter
|
11574239
|
import os
import tempfile
from pathlib import Path
def local_file_path(name):
directory = tempfile.mkdtemp()
return str(Path(directory, name))
def remove_file(file_path):
os.remove(file_path)
|
11574249
|
from unittest import TestCase
from svtools.bedpe import Bedpe
from svtools.cluster import Cluster
class ClusterTests(TestCase):
def test_can_add(self):
bedpe = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b = Bedpe(bedpe)
c = Cluster()
c.chrom_a = b.c1
c.chrom_b = b.c2
c.min_a = b.s1
c.max_a = b.e1
c.min_b = b.s2
c.max_b = b.e2
c.strand_a = b.o1
c.strand_b = b.o2
self.assertTrue(c.can_add(b, 1))
c.size = 1
c.sv_event = 'DEL'
self.assertFalse(c.can_add(b, 1))
c.sv_event = 'BND'
self.assertTrue(c.can_add(b, 1))
c.chrom_a = 'X'
self.assertFalse(c.can_add(b, 1))
c.chrom_a = b.c1
c.chrom_b = 'X'
self.assertFalse(c.can_add(b, 1))
c.chrom_b = b.c2
c.min_a = 305
self.assertFalse(c.can_add(b, 1))
c.min_a = b.s1
c.max_a = 150
self.assertFalse(c.can_add(b, 1))
c.max_a = b.e1
c.min_b = 405
self.assertFalse(c.can_add(b, 1))
c.min_b = b.s1
c.max_b = 150
self.assertFalse(c.can_add(b, 1))
def test_add(self):
bedpe1 = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b1 = Bedpe(bedpe1)
bedpe2= [ '1', '195', '305', '2', '295', '405', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND;AF=0.3' ]
b2 = Bedpe(bedpe2)
c = Cluster()
c.add(b1, None)
self.assertEqual(c.size, 1)
self.assertEqual(c.sv_event, 'BND')
self.assertEqual(c.filter, '0.2')
self.assertEqual(c.chrom_a, '1')
self.assertEqual(c.min_a, 200)
self.assertEqual(c.max_a, 300)
self.assertEqual(c.chrom_b, '2')
self.assertEqual(c.min_b, 300)
self.assertEqual(c.max_b, 400)
self.assertEqual(c.strand_a, '+')
self.assertEqual(c.strand_b, '-')
c.add(b2, None)
self.assertEqual(c.size, 2)
self.assertEqual(c.sv_event, 'BND')
self.assertEqual(c.filter, '0.3')
self.assertEqual(c.chrom_a, '1')
self.assertEqual(c.min_a, 195)
self.assertEqual(c.max_a, 305)
self.assertEqual(c.chrom_b, '2')
self.assertEqual(c.min_b, 295)
self.assertEqual(c.max_b, 405)
self.assertEqual(c.strand_a, '+')
self.assertEqual(c.strand_b, '-')
def test_get_cluster_string(self):
bedpe = [ '1', '200', '300', '2', '300', '400', '777_1', '57', '+', '-', 'BND', 'PASS', '.', '.', '.', '.', '.', '.', 'MISSING', 'SVTYPE=BND;AF=0.2' ]
b = Bedpe(bedpe)
c = Cluster()
with self.assertRaises(ValueError):
c.get_cluster_string()
c.add(b, None)
self.assertEqual(c.get_cluster_string(), str(b))
|
11574265
|
from flask import Flask
from flask import request
from flask import jsonify
app = Flask(__name__)
import json
from jittor.utils.pytorch_converter import convert
@app.route('/', methods=["GET", "POST"])
def hello():
msg = request
data = msg.data.decode("utf-8")
try:
data = json.loads(data)
src = data["src"]
pjmap = json.loads(data["pjmap"])
jt_src = convert(src, pjmap)
except Exception as e:
jt_src = str(e)
response = jsonify(jt_src=jt_src)
# Enable Access-Control-Allow-Origin
response.headers.add("Access-Control-Allow-Origin", "*")
return response
if __name__ == '__main__':
app.run(host="0.0.0.0")
|
11574275
|
import math
from glob import glob
import tensorflow as tf
from tensorflow.keras import initializers
import numpy as np
import random
from utils import *
class RelationModule(tf.keras.Model):
def __init__(self, channels=128, output_dim=128, key_dim=128, **kwargs):
super(RelationModule, self).__init__(**kwargs)
self.key_dim = channels
self.output_dim = channels
self.channels = channels
self.key = tf.keras.layers.Conv2D(output_dim, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.query = tf.keras.layers.Conv2D(key_dim, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.value = tf.keras.layers.Conv2D(key_dim, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.projection = tf.keras.layers.Conv2D(channels, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
def call(self, inputs):
f_k = tf.reshape(self.key(inputs), [
inputs.shape[0], inputs.shape[1]*inputs.shape[2], self.key_dim])
f_q = tf.reshape(self.query(inputs), [
inputs.shape[0], inputs.shape[1]*inputs.shape[2], self.key_dim])
f_q = tf.transpose(f_q, perm=[0, 2, 1])
f_v = tf.reshape(self.value(inputs), [
inputs.shape[0], inputs.shape[1]*inputs.shape[2], self.output_dim])
attention_weight = tf.matmul(
f_k, f_q)/(inputs.shape[1]*inputs.shape[2])
out = tf.matmul(tf.transpose(attention_weight, perm=[0, 2, 1]), f_v)
out = tf.reshape(
out, [inputs.shape[0], inputs.shape[1], inputs.shape[2], self.output_dim])
out = self.projection(out)
return out
class Discriminator(tf.keras.Model):
def __init__(self, n_filters=32, n_hidden=128, layout_dim=(28, 28), render=layout_bbox, **kwargs):
super(Discriminator, self).__init__(**kwargs)
self.layout_dim = layout_dim
self.render = render
self.act = tf.keras.layers.LeakyReLU(alpha=0.2)
self.conv1 = tf.keras.layers.Conv2D(32, (5, 5), input_shape=layout_dim, strides=(
2, 2), padding='valid', kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv2 = tf.keras.layers.Conv2D(32*2, (5, 5), strides=(2, 2), padding='valid', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn2 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.flatten = tf.keras.layers.Flatten()
self.fc1 = tf.keras.layers.Dense(512, kernel_initializer=initializers.RandomNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn3 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.fc2 = tf.keras.layers.Dense(1, kernel_initializer=initializers.RandomNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
def call(self, inputs):
x = self.render(inputs, self.layout_dim[0], self.layout_dim[1])
x = self.act(self.bn1(self.conv1(x)))
x = self.act(self.bn2(self.conv2(x)))
x = self.flatten(x)
x = self.act(self.bn3(self.fc1(x)))
out = self.fc2(x)
return out
class Generator(tf.keras.Model):
def __init__(self, n_filters=128, output_dim=2, n_component=128, n_class=1, include_probability=False, **kwargs):
super(Generator, self).__init__(**kwargs)
self.n_filters = n_filters
self.output_dim = output_dim
self.n_component = n_component
self.n_class = n_class
self.include_probability = include_probability
self.act = tf.keras.layers.ReLU()
self.conv1_1 = tf.keras.layers.Conv2D(n_filters//4, (1, 1), input_shape=(self.n_component, 1, self.n_class+self.output_dim), strides=(
1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn1_1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv1_2 = tf.keras.layers.Conv2D(n_filters//16, (1, 1), strides=(1, 1), padding='same',
kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn1_2 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv1_3 = tf.keras.layers.Conv2D(n_filters//16, (1, 1), strides=(1, 1), padding='same',
kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn1_3 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv1_4 = tf.keras.layers.Conv2D(n_filters//4, (1, 1), strides=(1, 1), padding='same',
kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn1_4 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.relation1 = RelationModule(
channels=n_filters//4, output_dim=n_filters//4, key_dim=n_filters//4)
self.relation2 = RelationModule(
channels=n_filters//4, output_dim=n_filters//4, key_dim=n_filters//4)
self.relation3 = RelationModule(
channels=n_filters, output_dim=n_filters, key_dim=n_filters)
self.relation4 = RelationModule(
channels=n_filters, output_dim=n_filters, key_dim=n_filters)
self.bn_x1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x2 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x3 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x4 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x5 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x6 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x7 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.bn_x8 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv2_1 = tf.keras.layers.Conv2D(n_filters, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn2_1 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv2_2 = tf.keras.layers.Conv2D(n_filters//4, (1, 1), strides=(1, 1), padding='same',
kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn2_2 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv2_3 = tf.keras.layers.Conv2D(n_filters//4, (1, 1), strides=(1, 1), padding='same',
kernel_initializer=initializers.TruncatedNormal(stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn2_3 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.conv2_4 = tf.keras.layers.Conv2D(n_filters, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
self.bn2_4 = tf.keras.layers.BatchNormalization(
epsilon=1e-5, momentum=0.9)
self.geometric_param = tf.keras.layers.Conv2D(output_dim, (1, 1), strides=(
1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(stddev=0.001, mean=0.0), bias_initializer=initializers.constant(0.0))
self.class_score = tf.keras.layers.Conv2D(n_class, (1, 1), strides=(1, 1), padding='same', kernel_initializer=initializers.TruncatedNormal(
stddev=0.02, mean=0.0), bias_initializer=initializers.constant(0.0))
def call(self, x):
x = tf.reshape(x, [x.shape[0], self.n_component,
1, self.n_class+self.output_dim])
h1_0 = self.bn1_1(self.conv1_1(x))
h1_1 = self.act(self.bn1_2(self.conv1_2(x)))
h1_2 = self.act(self.bn1_3(self.conv1_3(h1_1)))
h1_3 = self.bn1_4(self.conv1_4(h1_2))
embedding = self.act(tf.add(h1_0, h1_3))
embedding = tf.reshape(
embedding, [x.shape[0], self.n_component, 1, 256])
context = self.act(self.bn_x2(
tf.add(embedding, self.bn_x1(self.relation1(embedding)))))
context = self.act(self.bn_x4(
tf.add(context, self.bn_x3(self.relation2(context)))))
h2_0 = self.bn2_1(self.conv2_1(context))
h2_1 = self.act(self.bn2_2(self.conv2_2(h2_0)))
h2_2 = self.act(self.bn2_3(self.conv2_3(h2_1)))
h2_3 = self.bn2_4(self.conv2_4(h2_2))
decoded = self.act(tf.add(h2_0, h2_3))
decoded = self.act(self.bn_x6(
tf.add(decoded, self.bn_x5(self.relation3(decoded)))))
decoded = self.act(self.bn_x8(
tf.add(decoded, self.bn_x7(self.relation4(decoded)))))
out = self.geometric_param(decoded)
out = tf.sigmoid(tf.reshape(
out, [-1, self.n_component, self.output_dim]))
cls_score = self.class_score(decoded)
cls_prob = tf.sigmoid(tf.reshape(
cls_score, [-1, self.n_component, self.n_class]))
final_pred = tf.concat([out, cls_prob], axis=-1)
return final_pred
|
11574290
|
import unittest
from datetime import datetime
from datetime import timedelta
from keydra.providers.base import BaseProvider
from keydra.providers.base import exponential_backoff_retry
@exponential_backoff_retry(1, delay=1)
def _failing_function():
raise Exception('Boom')
@exponential_backoff_retry(1, delay=1)
def _passing_function():
return True
class Dummy(object):
@exponential_backoff_retry(1, delay=1)
def fail(self):
raise Exception('Boom')
@exponential_backoff_retry(1, delay=1)
def success(self):
return True
class TestBaseProvider(unittest.TestCase):
def test_exponential_backoff_retry_successful_call(self):
self.assertEqual(_passing_function(), True)
dummy = Dummy()
self.assertEqual(dummy.success(), True)
def test_exponential_backoff_retry_failing_call(self):
starttime = datetime.now()
with self.assertRaises(Exception):
_failing_function()
self.assertGreaterEqual(
datetime.now() - starttime, timedelta(seconds=1)
)
starttime = datetime.now()
with self.assertRaises(Exception):
dummy = Dummy()
dummy.fail()
self.assertGreaterEqual(
datetime.now() - starttime, timedelta(seconds=1)
)
def test_redact_result_no_override(self):
class Dummy(BaseProvider):
def rotate(self, spec):
pass
def distribute(self, secret, dest):
pass
result = {'result': 'stuff'}
self.assertEqual(result, Dummy().redact_result(result))
def test_redact_result_override(self):
class Dummy(BaseProvider):
def rotate(self, spec):
pass
def distribute(self, secret, dest):
pass
@classmethod
def redact_result(cls, result):
result['result'] = '***'
return result
result = {'result': 'stuff'}
r_result = Dummy().redact_result(result)
self.assertNotEqual(r_result['result'], 'stuff')
def test_validate_spec_base(self):
class DummyA(BaseProvider):
def rotate(self, spec):
pass
def distribute(self, spec):
pass
dummyA = DummyA()
valid, _ = dummyA.validate_spec(
{
'provider': 'provider',
'key': 'key'
}
)
self.assertTrue(valid)
invalid, msg = dummyA.validate_spec({})
self.assertFalse(invalid)
def test_validate_spec_override(self):
class DummyA(BaseProvider):
def rotate(self, spec):
pass
def distribute(self, spec):
pass
@classmethod
def validate_spec(cls, spec):
return True, 'It is alive'
dummyA = DummyA()
_, msg = dummyA.validate_spec(None)
self.assertEqual(msg, 'It is alive')
def test_pre_process_bypass(self):
class DummyA(BaseProvider):
pass
secret = {'a': 'b'}
resp = DummyA.pre_process_spec(secret)
self.assertEqual(secret, resp)
|
11574293
|
from abc import ABC, abstractmethod
from typing import Optional, Union
import numpy as np
class Arm(ABC):
"""Arm
:param Optional[str] name: alias name
"""
def __init__(self, name: Optional[str]):
self.__name = self._name() if name is None else name
@property
def name(self) -> str:
"""Arm name"""
return self.__name
@abstractmethod
def _name(self) -> str:
"""
Returns:
default arm name
"""
class StochasticArm(Arm):
"""Stochastic arm
:param Optional[str] name: alias name
"""
def __init__(self, name: Optional[str]):
super().__init__(name)
@property
@abstractmethod
def mean(self) -> float:
"""Mean of rewards"""
@abstractmethod
def pull(self, pulls: Optional[int] = None) -> Union[float, np.ndarray]:
"""Pull the arm
When `pulls` is `None`, a float number will be returned. Otherwise, a numpy
array will be returned.
Args:
pulls: number of times to pull
Returns:
stochastic rewards
"""
|
11574304
|
import time
from qibullet import SimulationManager
from qibullet import NaoVirtual
from qibullet import NaoFsr
if __name__ == "__main__":
simulation_manager = SimulationManager()
client = simulation_manager.launchSimulation(gui=True)
nao = simulation_manager.spawnNao(client, spawn_ground_plane=True)
# Alternative solution, get the FsrHandler of the robot:
# fsr_handler = nao.getFsrHandler
try:
while True:
# Get the FSR value of the front left FSR of NAO's left foot
value = nao.getFsrValue(NaoFsr.LFOOT_FL)
# Get the FSR value of the rear right FSR of NAO's right foot
value = nao.getFsrValue(NaoFsr.RFOOT_RR)
# Get all of the values of the FSRs of NAO's left foot
values = nao.getFsrValues(NaoFsr.LFOOT)
# Get the total weight value on the FSRs of NAO's right foot
total_weight = nao.getTotalFsrValues(NaoFsr.RFOOT)
print("Total weight on the right foot: " + str(total_weight))
# Alternative solution:
# fsr_handler.getValue(NaoFsr.LFOOT_FL)
# fsr_handler.getValue(NaoFsr.RFOOT_RR)
# fsr_handler.getValues(NaoFsr.LFOOT)
# fsr_handler.getTotalValue(NaoFsr.RFOOT)
time.sleep(0.5)
except KeyboardInterrupt:
pass
finally:
simulation_manager.stopSimulation(client)
|
11574338
|
from collections import namedtuple
Root = namedtuple('Root', 'root_node root_parser')
Creds = namedtuple('Creds', 'username password')
root_node = Root('rtr1', 'CiscoBaseParser')
credentials = Creds('user1', '<PASSWORD>1')
ignore_regex = r'(^NA\-|^SEP|^ACVD|^ACWD|^ACPDC|^AP|WAP|WLC|CMP)'
django_app_name = 'net_system'
|
11574363
|
import os
from datetime import datetime
import numpy as np
from mapcache import NumpyCache, compute_parallel
from LSSTsims import LSSTsims
from gatspy.periodic import (LombScargleMultiband,
SuperSmootherMultiband,
LombScargleMultibandFast)
class SuperSmoother1Band(SuperSmootherMultiband):
"""
Convenience class to fit a single band of data with supersmoother
This class ignores all data not associated with the given band.
The main reason for this is that it can then be used as a stand-in for
any multiband class.
"""
def __init__(self, optimizer=None, band='g'):
self.band = band
SuperSmootherMultiband.__init__(self, optimizer)
def _fit(self, t, y, dy, filts):
import numpy as np
mask = (filts == self.band)
self.t, self.y, self.dy, self.filts = (t[mask], y[mask],
dy[mask], filts[mask])
self.unique_filts_ = np.unique(self.filts)
return SuperSmootherMultiband._fit(self, self.t, self.y,
self.dy, self.filts)
def compute_and_save_periods(Model, outfile,
pointing_indices, ndays,
gmags, template_indices,
model_args=None, model_kwds=None,
Nperiods=5, save_every=5,
parallel=True, client=None,
num_results=None):
"""Function to compute periods and save the results"""
cache = NumpyCache(outfile)
keys = list(np.broadcast(pointing_indices, ndays, gmags, template_indices))
if num_results is not None:
keys = keys[:num_results]
# Define a function which, given a key, computes the desired periods.
def find_periods(key, Nperiods=Nperiods, Model=Model, LSSTsims=LSSTsims,
model_args=model_args, model_kwds=model_kwds):
import numpy as np
lsstsim = LSSTsims()
t, y, dy, filts = lsstsim.generate_lc(*key, random_state=0)
model = Model(*(model_args or ()), **(model_kwds or {}))
model.optimizer.period_range = (0.2, 1.2)
model.optimizer.verbose = 0
model.fit(t, y, dy, filts)
try:
periods = model.find_best_periods(Nperiods)
except np.linalg.LinAlgError:
periods = np.nan + np.zeros(Nperiods)
except ValueError:
periods = np.nan + np.zeros(Nperiods)
return key, periods
results = compute_parallel(cache, find_periods, keys,
save_every=save_every,
parallel=parallel, client=client)
if num_results is not None:
return results
else:
return gather_results(outfile, pointing_indices, ndays,
gmags, template_indices)
def gather_results(outfile, pointing_indices, ndays, gmags, template_indices):
if not os.path.exists(outfile):
raise ValueError("Cannot gather results from {0}".format(outfile))
results = NumpyCache(outfile)
brd = np.broadcast(pointing_indices, ndays, gmags, template_indices)
results = np.array([results.get_row(key) for key in brd])
return results.reshape(brd.shape + results.shape[-1:])
if __name__ == '__main__':
parallel = True
if parallel:
# Need some imports on the engine
from IPython.parallel import Client
client = Client()
dview = client.direct_view()
with dview.sync_imports():
from gatspy.periodic import (LombScargleMultiband,
LombScargleMultibandFast,
SuperSmootherMultiband)
else:
client = None
template_indices = np.arange(2 * 23).reshape(2, 23).T
pointing_indices = np.arange(1, 24)[:, None]
ndays = np.array([90, 180, 365, 2*365, 5*365])[:, None, None]
gmags = np.array([20, 21, 22, 23, 24.5])[:, None, None, None]
kwargs = dict(pointing_indices=pointing_indices,
ndays=ndays,
gmags=gmags,
template_indices=template_indices,
parallel=parallel, client=client,
save_every=4)
compute_and_save_periods(LombScargleMultiband, 'resultsLSST.npy',
model_kwds=dict(Nterms_base=1, Nterms_band=0),
**kwargs)
compute_and_save_periods(LombScargleMultiband, 'resultsLSST01.npy',
model_kwds=dict(Nterms_base=0, Nterms_band=1),
**kwargs)
for i, band in enumerate('ugrizy'):
filename = 'resultsLSST_ssm_{0}.npy'.format(band)
compute_and_save_periods(SuperSmoother1Band, filename,
model_kwds=dict(band=i),
**kwargs)
|
11574432
|
from distutils.core import setup
setup(name='BEE',
version='0.1',
py_modules=['BEE'],
)
|
11574519
|
import csv
import inspect
import logging
import os
from contextlib import redirect_stdout
from os.path import join
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import keras
from qac.evaluation import evaluation
from qac.experiments import preprocessing
logger = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name
class CNNStaticGenerator(keras.utils.Sequence):
def __init__(self, x_set, y_set, batch_size, embedding_weights, return_y=True):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.return_y = return_y
self.embedding_weights = embedding_weights
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
embedded_x = np.stack([np.stack([self.embedding_weights[word]
for word in sentence]) for sentence in batch_x])
if self.return_y:
return embedded_x, np.array(batch_y)
return embedded_x
def filter_func_args(params, func):
signature = inspect.signature(func)
keys = list(signature.parameters.keys())
filtered = {key: params[key] for key in params.keys() if key in keys}
return filtered
def save_config(run_dir, config, filename='params'):
keys = sorted(config.keys())
with open(join(run_dir, '{}.csv'.format(filename)), 'w') as file:
writer = csv.writer(file)
writer.writerow(keys)
writer.writerow([config[key] for key in keys])
def save_summary(run_dir, model):
with open(join(run_dir, 'modelsummary.txt'), 'w') as file:
with redirect_stdout(file):
model.summary()
def save_history(run_dir, history):
keys = ['epoch', 'loss', 'acc', 'val_loss', 'val_acc']
history['epoch'] = list(range(1, len(history['loss']) + 1))
with open(join(run_dir, 'history.csv'), 'w') as file:
writer = csv.writer(file)
writer.writerow(keys)
writer.writerows(zip(*[history[key] for key in keys]))
def save_best_epoch(run_dir, early_stopping):
stopped = early_stopping.stopped_epoch
best = stopped - early_stopping.patience + 1
logger.info('Best epoch %d', best)
with open(join(run_dir, 'best_epoch.txt'), 'w') as file:
file.write('{}\n'.format(best))
def plot_runs(out_dir, run_id, histories, title, config_names):
f, axs = plt.subplots(nrows=2, ncols=2, figsize=(13, 10), sharex=True)
for history, name in zip(histories, config_names):
axs[0][0].plot(history['epoch'], history['acc'], label=name)
axs[0][0].set_title('Accuracy')
axs[0][1].plot(history['epoch'], history['loss'], label="_nolegend_")
axs[0][1].set_title('Loss')
axs[1][0].plot(history['epoch'], history['val_acc'], label="_nolegend_")
axs[1][0].set_title('Val. Accuracy')
axs[1][1].plot(history['epoch'], history['val_loss'], label="_nolegend_")
axs[1][1].set_title('Val. Loss')
f.legend(loc='upper right')
f.suptitle(title, fontsize=14)
f.tight_layout()
f.subplots_adjust(top=0.93)
f.savefig('{}/{}.png'.format(out_dir, run_id))
def consolidate_runs(args, metadata_dir, run_names, execution_id):
run_dfs = []
accuracies = []
for run_name in run_names:
run_dir = join(metadata_dir, run_name)
val_results = pd.read_csv(join(run_dir, 'val_results.csv'))
accuracies.append(val_results['accuracy'].values[0])
params = pd.read_csv(join(run_dir, 'params.csv'))
val_results['best_epoch'] = open(join(run_dir, 'best_epoch.txt')).readlines()[0].strip()
joined = pd.concat([val_results, params], axis=1)
joined['run_name'] = '{}-{}'.format(execution_id, run_name)
joined.set_index('run_name', inplace=True)
run_dfs.append(joined)
histories = []
n_best = np.argsort(accuracies)[-5:]
best_runs = [run_names[i] for i in n_best]
for run_name in best_runs:
run_dir = join(metadata_dir, run_name)
histories.append(pd.read_csv(join(run_dir, 'history.csv')))
plot_runs(metadata_dir, 'training', histories, args.community, best_runs)
pd.concat(run_dfs).to_excel(join(metadata_dir, 'summary.xlsx'))
def save_predictions(pred, out_dir, run_name):
os.makedirs(out_dir, exist_ok=True)
y_pred = pred[:, 1].astype(int)
y_pred = preprocessing.LABEL_ENCODER.inverse_transform(y_pred)
pd.DataFrame.from_dict({'id': pred[:, 0], 'y_pred': y_pred, 'y_pred_proba': pred[:, 2]}) \
.to_csv(join(out_dir, '{}_test.csv'.format(run_name)), index=False)
|
11574557
|
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
setup(
name='test_pkg',
packages=find_packages(where='src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
package_data={"": ["*.txt", "*.rst", "*.sql", "*.ipynb"]},
)
|
11574579
|
import subprocess
import optparse
import re
def mac_changer(iface, mac):
print("[*]changing mac address for " + iface + " to " + mac)
subprocess.call(["ifconfig", iface, "down"])
subprocess.call(["ifconfig", iface, "hw", "ether", mac])
subprocess.call(["ifconfig", iface, "up"])
print("[*]mac address of " + iface + " changed to " + mac)
def get_arguments():
parser = optparse.OptionParser()
parser.add_option("-i", "--interface", dest="iface", help="interface")
parser.add_option("-m", "--mac", dest="mac", help="new mac address")
(arguments,options) = parser.parse_args()
if not arguments.iface:
parser.error("please specify interface,use -help to know more")
elif not arguments.mac:
parser.error("please specify mac ,use -help to know more")
return arguments
def current_mac(interface):
ifconfig_result = subprocess.check_output(["ifconfig", interface]).decode("utf-8")
curr_mac = re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", ifconfig_result)
if not curr_mac:
print("[*]sorry unable to get your mac adress")
else:
print("[*]your current mac is " + curr_mac[0])
return curr_mac[0]
arguments = get_arguments()
curr_mac = current_mac(arguments.iface)
mac_changer(arguments.iface, arguments.mac)
curr_mac = current_mac(arguments.iface)
if curr_mac == arguments.mac:
print("[*]mac changed successfully to the desired mac")
else:
print("sorry we are unable to change the mac address")
|
11574585
|
import torch
from collections import OrderedDict
from utils import misc
from models.base_models import BaseModel
from models.modules.base_modules import ModuleFactory
class ModelCombine(BaseModel):
def __init__(self, opt):
super(ModelCombine, self).__init__(opt)
self._opt = opt
self._init_create_networks()
if self._is_train:
self._init_train_vars()
if not self._is_train or self._opt.load_epoch > 0:
self.load()
self._bs = self._opt.batch_size
# init input
self._input_mask = self._Tensor([0])
self._input_img_LR_masked = self._Tensor([0])
self._input_img_LR = self._Tensor([0])
self._input_img_SR = self._Tensor([0])
self._input_img_landmark = self._Tensor([0])
self._input_img_face_parsing = self._Tensor([0])
self._input_point = self._Tensor([0])
# init visual
self._vis_batch_mask = self._Tensor([0])
self._vis_batch_img_LR_masked = self._Tensor([0])
self._vis_batch_img_LR_fc = self._Tensor([0])
self._vis_batch_img_LR_syn = self._Tensor([0])
self._vis_batch_img_LR = self._Tensor([0])
self._vis_batch_img_coarse = self._Tensor([0])
self._vis_batch_img_fine = self._Tensor([0])
self._vis_batch_img_patch = self._Tensor([0])
self._vis_batch_img_SR = self._Tensor([0])
self._vis_batch_img_landmark = self._Tensor([0])
self._vis_batch_img_landmark_GT = self._Tensor([0])
self._vis_batch_img_face_parsing = self._Tensor([0])
self._vis_batch_img_face_parsing_GT = self._Tensor([0])
# init loss
self._loss_1_hole = self._Tensor([0])
self._loss_1_vaild = self._Tensor([0])
self._loss_1_sty = self._Tensor([0])
self._loss_1_per = self._Tensor([0])
self._loss_1_synth_smooth = self._Tensor([0])
self._loss_2_coarse = self._Tensor([0])
self._loss_2_landmark = self._Tensor([0])
self._loss_2_face_parsing = self._Tensor([0])
self._loss_2_fine = self._Tensor([0])
self._loss_2_per = self._Tensor([0])
self._loss_g_global_adv = self._Tensor([0])
self._loss_d_global_adv = self._Tensor([0])
self._loss_d_global_adv_gp = self._Tensor([0])
def _init_create_networks(self):
if self._opt.fc_module == 'pconv':
self._FC = ModuleFactory.get_by_name(self._opt.fc_module, self._opt.freeze_enc_bn).to(self._device)
else:
raise ValueError('error!')
if self._opt.fc_pretrain:
self._FC.load_state_dict(torch.load(self._opt.fc_pretrain_model_path))
self._FC.eval()
self._SR = ModuleFactory.get_by_name(self._opt.sr_module).to(self._device)
self._vgg = ModuleFactory.get_by_name('vgg').to(self._device)
self._global_d = ModuleFactory.get_by_name('d').to(self._device)
def _init_train_vars(self):
self._current_lr_g = self._opt.lr_g
if self._opt.fix_fc:
self._opti_g = torch.optim.Adam(self._SR.parameters(), lr=self._current_lr_g,
betas=[self._opt.g_adam_b1, self._opt.g_adam_b2])
else:
self._opti_g = torch.optim.Adam(list(self._FC.parameters()) + list(self._SR.parameters()),
lr=self._current_lr_g, betas=[self._opt.g_adam_b1, self._opt.g_adam_b2])
self._current_lr_d = self._opt.lr_d
self._opti_global_d = torch.optim.Adam(self._global_d.parameters(), lr=self._current_lr_d,
betas=[self._opt.d_adam_b1, self._opt.d_adam_b2])
def set_input(self, input_dict):
self._input_mask = input_dict['mask'].to(self._device)
self._input_img_LR_masked = input_dict['img_LR_masked'].to(self._device)
self._input_img_LR = input_dict['img_LR'].to(self._device)
self._input_img_SR = input_dict['img_SR'].to(self._device)
self._input_img_landmark = input_dict['img_landmark'].to(self._device)
self._input_img_face_parsing = input_dict['img_face_parsing'].to(self._device)
self._input_point = input_dict['point'].to(self._device)
def set_train(self):
if self._opt.fix_fc:
self._SR.train()
else:
self._FC.train()
self._SR.train()
self._global_d.train()
self._is_train = True
def set_eval(self):
if self._opt.fix_fc:
self._SR.eval()
else:
self._FC.eval()
self._SR.eval()
self._is_train = False
def forward(self, keep_data_for_visuals=False):
# if not self._is_train:
# print('........')
if self._opt.fc_module == 'pconv':
img_fc, _ = self._FC.forward(self._input_img_LR_masked, self._input_mask)
elif self._opt.fc_module == 'gfc_128' or self._opt.fc_module == 'gfc_32':
img_fc = self._FC.forward(self._input_img_LR_masked)
else:
raise ValueError('error')
img_synth = img_fc * (1 - self._input_mask) + self._input_mask * self._input_img_LR
img_coarse_sr, img_coarse_sr_landmark, img_coarse_sr_fp, img_fine_sr = self._SR.forward(img_synth)
if keep_data_for_visuals:
self._vis_batch_mask = misc.tensor2im(self._input_mask[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR_masked = misc.tensor2im(
self._input_img_LR_masked[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR_fc = misc.tensor2im(img_fc[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR_syn = misc.tensor2im(img_synth[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR = misc.tensor2im(self._input_img_LR[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_coarse = misc.tensor2im(img_coarse_sr.data[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_fine = misc.tensor2im(img_fine_sr.data[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_SR = misc.tensor2im(self._input_img_SR[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_landmark = misc.landmark2im(img_coarse_sr_landmark.data[:self._opt.show_max])
self._vis_batch_img_landmark_GT = misc.landmark2im(self._input_img_landmark.data[:self._opt.show_max])
face_parsing_v = torch.argmax(img_coarse_sr_fp, dim=1, keepdim=False)
self._vis_batch_img_face_parsing = misc.faceparsing2im(face_parsing_v.data[:self._opt.show_max])
# self._vis_batch_img_face_parsing_GT = misc.faceparsing2im(
# self._input_img_face_parsing.squeeze_().unsqueeze_(0).data[:self._opt.show_max])
self._vis_batch_img_face_parsing_GT = misc.faceparsing2im(
self._input_img_face_parsing.squeeze_().data[:self._opt.show_max])
def optimize_parameters(self, train_generator, keep_data_for_visuals=False):
if self._is_train:
loss_d_global_adv, synth_img_global = self._forward_global_d()
loss_d_global_adv = loss_d_global_adv * self._opt.lambda_global_d_prob
self._opti_global_d.zero_grad()
loss_d_global_adv.backward()
self._opti_global_d.step()
self._loss_d_global_adv = loss_d_global_adv
loss_d_global_adv_gp = self._gradinet_penalty_d(synth_img_global, self._input_img_SR,
self._global_d) * self._opt.lambda_global_d_gp
self._opti_global_d.zero_grad()
loss_d_global_adv_gp.backward()
self._opti_global_d.step()
self._loss_d_global_adv_gp = loss_d_global_adv_gp
if train_generator:
loss_g = self._forward_g(keep_data_for_visuals)
self._opti_g.zero_grad()
loss_g.backward()
self._opti_g.step()
def _forward_g(self, keep_data_for_visuals):
if self._opt.fc_module == 'pconv':
img_fc, _ = self._FC.forward(self._input_img_LR_masked, self._input_mask)
elif self._opt.fc_module == 'gfc_128' or self._opt.fc_module == 'gfc_32':
img_fc = self._FC.forward(self._input_img_LR_masked)
else:
raise ValueError('error')
img_synth = img_fc * (1 - self._input_mask) + self._input_mask * self._input_img_LR
img_coarse_sr, img_coarse_sr_landmark, img_coarse_sr_fp, img_fine_sr = self._SR.forward(img_synth)
if self._opt.fix_fc is False:
loss_1_hole, loss_1_vaild, loss_1_sty, loss_1_per, loss_1_synth_smooth = \
self._inpainting_loss(img_synth, img_fc, self._input_img_LR)
self._loss_1_hole = loss_1_hole * self._opt.lambda_loss_1_hole
self._loss_1_vaild = loss_1_vaild * self._opt.lambda_loss_1_vaild
self._loss_1_sty = loss_1_sty * self._opt.lambda_loss_1_sty
self._loss_1_per = loss_1_per * self._opt.lambda_loss_1_per
self._loss_1_synth_smooth = loss_1_synth_smooth * self._opt.lambda_loss_1_synth_smooth
self._loss_2_coarse = misc.compute_loss_l2(img_coarse_sr, self._input_img_SR) * self._opt.lambda_loss_2_coarse
self._loss_2_landmark = misc.compute_loss_l2(img_coarse_sr_landmark,
self._input_img_landmark) * self._opt.lambda_loss_2_landmark
self._loss_2_face_parsing = misc.compute_loss_cross_entropy(
img_coarse_sr_fp, self._input_img_face_parsing) * self._opt.lambda_loss_2_parsing
img_fine_sr_feature = self._vgg(img_fine_sr)
img_gt_feature = self._vgg(self._input_img_SR)
loss_per = 0
for i in range(len(img_fine_sr_feature)):
loss_per += misc.compute_loss_l1(img_fine_sr_feature[i], img_gt_feature[i])
self._loss_2_per = loss_per * self._opt.lambda_loss_2_per
self._loss_2_fine = misc.compute_loss_l2(img_fine_sr, self._input_img_SR) * self._opt.lambda_loss_2_fine
d_fake_global_prob = self._global_d.forward(img_fine_sr)
self._loss_g_global_adv = misc.compute_loss_d(d_fake_global_prob, True) * self._opt.lambda_global_d_prob
if keep_data_for_visuals:
self._vis_batch_mask = misc.tensor2mask(self._input_mask[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR_masked = misc.tensor2im(
self._input_img_LR_masked[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR_fc = misc.tensor2im(img_fc[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR_syn = misc.tensor2im(img_synth[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_LR = misc.tensor2im(self._input_img_LR[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_coarse = misc.tensor2im(img_coarse_sr.data[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_fine = misc.tensor2im(img_fine_sr.data[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_SR = misc.tensor2im(self._input_img_SR[:self._opt.show_max], idx=-1, nrows=1)
self._vis_batch_img_landmark = misc.landmark2im(img_coarse_sr_landmark.data[:self._opt.show_max])
self._vis_batch_img_landmark_GT = misc.landmark2im(self._input_img_landmark.data[:self._opt.show_max])
face_parsing_v = torch.argmax(img_coarse_sr_fp, dim=1, keepdim=False)
self._vis_batch_img_face_parsing = misc.faceparsing2im(face_parsing_v.data[:self._opt.show_max])
self._vis_batch_img_face_parsing_GT = misc.faceparsing2im(
self._input_img_face_parsing.squeeze_().data[:self._opt.show_max])
loss_1 = self._loss_1_hole + self._loss_1_vaild + self._loss_1_sty + \
self._loss_1_per + self._loss_1_synth_smooth
loss_2 = self._loss_2_per + self._loss_2_fine + self._loss_2_coarse + \
self._loss_2_landmark + self._loss_2_face_parsing
loss_g_d = self._loss_g_global_adv
return loss_1 + loss_2 + loss_g_d
def _forward_global_d(self):
if self._opt.fc_module == 'pconv':
img_fc, _ = self._FC.forward(self._input_img_LR_masked, self._input_mask)
elif self._opt.fc_module == 'gfc_128' or self._opt.fc_module == 'gfc_32':
img_fc = self._FC.forward(self._input_img_LR_masked)
else:
raise ValueError('error')
img_synth = img_fc * (1 - self._input_mask) + self._input_mask * self._input_img_LR
_, _, _, img_fine_sr = self._SR.forward(img_synth)
d_fake_img_prob = self._global_d.forward(img_fine_sr.detach())
self._loss_d_fake = misc.compute_loss_d(d_fake_img_prob, False) * self._opt.lambda_global_d_prob
d_real_img_prob = self._global_d.forward(self._input_img_SR)
self._loss_d_real = misc.compute_loss_d(d_real_img_prob, True) * self._opt.lambda_global_d_prob
return self._loss_d_real + self._loss_d_fake, img_fine_sr
def _gradinet_penalty_d(self, synth_img, gt_img, discrinimator):
alpha = torch.rand(self._bs, 1, 1, 1).expand_as(gt_img).to(self._device)
interpolated = alpha * gt_img.data + (1 - alpha) * synth_img.data
interpolated.requires_grad = True
interpolated_prob = discrinimator.forward(interpolated)
grad = torch.autograd.grad(outputs=interpolated_prob,
inputs=interpolated,
grad_outputs=torch.ones(interpolated_prob.size()).to(self._device),
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad = grad.view(grad.size(0), -1)
grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
self._loss_d_gp = torch.mean((grad_l2norm - 1) ** 2)
return self._loss_d_gp
def _inpainting_loss(self, img_synth, img_fc, img_gt):
target = (1 - self._input_mask) * img_gt
target = target.detach()
loss_hole = misc.compute_loss_l1((1 - self._input_mask) * img_fc, target)
target = self._input_mask * img_gt
target = target.detach()
loss_vaild = misc.compute_loss_l1(self._input_mask * img_fc, target)
rec_img_feature = self._vgg(img_fc)
synth_img_feature = self._vgg(img_synth)
gt_img_feature = self._vgg(img_gt)
loss_sty = 0
loss_per = 0
for i in range(len(rec_img_feature)):
loss_sty += misc.compute_loss_l1(
misc.compute_loss_gram_matrix(rec_img_feature[i]),
misc.compute_loss_gram_matrix(gt_img_feature[i]))
loss_sty += misc.compute_loss_l1(
misc.compute_loss_gram_matrix(synth_img_feature[i]),
misc.compute_loss_gram_matrix(gt_img_feature[i]))
loss_per += misc.compute_loss_l1(rec_img_feature[i], gt_img_feature[i])
loss_per += misc.compute_loss_l1(synth_img_feature[i], gt_img_feature[i])
loss_synth_smooth = misc.compute_loss_smooth(img_synth)
return loss_hole, loss_vaild, loss_sty, loss_per, loss_synth_smooth
def get_current_errors(self):
loss_dict = OrderedDict([('loss_1_hole', self._loss_1_hole.item()),
('loss_1_vaild', self._loss_1_vaild.item()),
('loss_1_sty', self._loss_1_sty.item()),
('loss_1_per', self._loss_1_per.item()),
('loss_1_synth_smooth', self._loss_1_synth_smooth.item()),
('loss_2_landmark', self._loss_2_landmark.item()),
('loss_2_face_parsing', self._loss_2_face_parsing.item()),
('loss_2_coarse', self._loss_2_coarse.item()),
('loss_2_per', self._loss_2_per.item()),
('loss_2_fine', self._loss_2_fine.item()),
('loss_g_global_adv', self._loss_g_global_adv.item()),
('loss_d_global_adv', self._loss_d_global_adv.item()),
('loss_d_global_adv_gp', self._loss_d_global_adv_gp.item()),
])
return loss_dict
def get_current_scalars(self):
return OrderedDict([('lr_g', self._current_lr_g), ('lr_d', self._current_lr_d)])
def get_current_visuals(self):
visuals = OrderedDict()
visuals['batch_img_mask'] = self._vis_batch_mask
visuals['batch_img_LR_masked'] = self._vis_batch_img_LR_masked
visuals['batch_img_LR_fc'] = self._vis_batch_img_LR_fc
visuals['batch_img_LR_syn'] = self._vis_batch_img_LR_syn
visuals['batch_img_LR'] = self._vis_batch_img_LR
visuals['batch_img_coarse'] = self._vis_batch_img_coarse
visuals['batch_img_landmark'] = self._vis_batch_img_landmark
visuals['batch_img_landmark_GT'] = self._vis_batch_img_landmark_GT
visuals['batch_img_face_parsing'] = self._vis_batch_img_face_parsing
visuals['batch_img_face_parsing_GT'] = self._vis_batch_img_face_parsing_GT
visuals['batch_img_fine'] = self._vis_batch_img_fine
visuals['batch_img_SR'] = self._vis_batch_img_SR
return visuals
def save(self, label):
if self._opt.fix_fc:
self._save_network(self._SR, 'SR', label)
self._save_optimizer(self._opti_g, 'opti_SR', label)
else:
self._save_network(self._FC, 'FC', label)
self._save_network(self._SR, 'SR', label)
self._save_optimizer(self._opti_g, 'opti_FCSR', label)
self._save_network(self._global_d, 'global_d', label)
self._save_optimizer(self._opti_global_d, 'opti_global_d', label)
def load(self):
load_epoch = self._opt.load_epoch
if self._opt.fix_fc:
self._load_network(self._SR, 'SR', load_epoch)
else:
self._load_network(self._FC, 'FC', load_epoch)
self._load_network(self._SR, 'SR', load_epoch)
if self._is_train:
self._load_network(self._global_d, 'global_d', load_epoch)
self._load_optimizer(self._opti_global_d, 'opti_global_d', load_epoch)
if self._opt.fix_fc:
self._load_optimizer(self._opti_g, 'opti_SR', load_epoch)
else:
self._load_optimizer(self._opti_g, 'opti_FCSR', load_epoch)
def update_learning_rate(self):
lr_decay = self._opt.lr_g / self._opt.nepochs_decay
self._current_lr_g -= lr_decay
lr_decay_g = self._opt.lr_g / self._opt.nepochs_decay
self._current_lr_g -= lr_decay_g
for param_group in self._opti_g.param_groups:
param_group['lr'] = self._current_lr_g
print('update G learning rate: %f -> %f' % (self._current_lr_g + lr_decay_g, self._current_lr_g))
lr_decay_d = self._opt.lr_d / self._opt.nepochs_decay
self._current_lr_d -= lr_decay_d
for param_group in self._opti_global_d.param_groups:
param_group['lr'] = self._current_lr_d
print('update global D learning rate: %f -> %f' % (self._current_lr_d + lr_decay_d, self._current_lr_d))
|
11574589
|
import struct
def read(filename):
with open(filename, 'rb') as file:
entries = []
if file.read(4) != b'RMAP':
return
name = file.read(8)
if name != b'resource':
return
catalog_size = struct.unpack('<i', file.read(4))[0]
if catalog_size % 16 != 0:
return
num_entries = catalog_size // 16
catalog = []
for i in range(num_entries):
resource_type = file.read(4).decode('ascii')
raw_name = file.read(8)
if 0 in raw_name:
name = raw_name[0 : raw_name.index(0)].decode('ascii')
else:
name = raw_name.decode('ascii')
length = struct.unpack('<i', file.read(4))[0]
catalog.append((resource_type, name, length))
entries = []
for i in range(num_entries):
expected_resource_type = catalog[i][0]
expected_name = catalog[i][1]
expected_length = catalog[i][2]
resource_type = file.read(4).decode('ascii')
raw_name = file.read(8)
if 0 in raw_name:
name = raw_name[0 : raw_name.index(0)].decode('ascii')
else:
name = raw_name.decode('ascii')
length = struct.unpack('<i', file.read(4))[0]
if resource_type != expected_resource_type:
return
elif expected_name != name:
return
elif expected_length != length:
return
data = file.read(length)
entries.append((f'{name}.{resource_type}', data))
return entries
|
11574590
|
from typing import Any, Callable, List, Union
from tartiflette.coercers.arguments import coerce_arguments
from tartiflette.coercers.outputs.common import complete_value_catching_error
from tartiflette.execution.types import build_resolve_info
from tartiflette.types.helpers.get_directive_instances import (
compute_directive_nodes,
)
from tartiflette.utils.directives import (
introspection_directives_executor,
wraps_with_directives,
)
__all__ = ("resolve_field",)
async def resolve_field_value_or_error(
execution_context: "ExecutionContext",
field_definition: "GraphQLField",
field_nodes: List["FieldNode"],
resolver: Callable,
source: Any,
info: "ResolveInfo",
) -> Union[Exception, Any]:
"""
Coerce the field's arguments and then try to resolve the field.
:param execution_context: instance of the query execution context
:param field_definition: GraphQLField instance of the resolved field
:param field_nodes: AST nodes related to the resolved field
:param resolver: callable to use to resolve the field
:param source: default root value or field parent value
:param info: information related to the execution and the resolved field
:type execution_context: ExecutionContext
:type field_definition: GraphQLField
:type field_nodes: List[FieldNode]
:type resolver: Callable
:type source: Any
:type info: ResolveInfo
:return: the resolved field value
:rtype: Union[Exception, Any]
"""
# pylint: disable=too-many-locals
try:
computed_directives = []
for field_node in field_nodes:
computed_directives.extend(
compute_directive_nodes(
execution_context.schema,
field_node.directives,
execution_context.variable_values,
)
)
if computed_directives:
resolver = wraps_with_directives(
directives_definition=computed_directives,
directive_hook="on_field_execution",
func=resolver,
is_resolver=True,
with_default=True,
)
result = await resolver(
source,
await coerce_arguments(
field_definition.arguments,
field_nodes[0],
execution_context.variable_values,
execution_context.context,
coercer=field_definition.arguments_coercer,
),
execution_context.context,
info,
context_coercer=execution_context.context,
)
if info.is_introspection:
return await introspection_directives_executor(
result,
execution_context.context,
info,
context_coercer=execution_context.context,
)
return result
except Exception as e: # pylint: disable=broad-except
return e
async def resolve_field(
execution_context: "ExecutionContext",
parent_type: "GraphQLObjectType",
source: Any,
field_nodes: List["FieldNode"],
path: "Path",
is_introspection_context: bool,
field_definition: "GraphQLField",
resolver: Callable,
output_coercer: Callable,
) -> Any:
"""
Resolves the field value and coerce it before returning it.
:param execution_context: instance of the query execution context
:param parent_type: GraphQLObjectType of the field's parent
:param source: default root value or field parent value
:param field_nodes: AST nodes related to the resolved field
:param path: the path traveled until this resolver
:param field_definition: GraphQLField instance of the resolved field
:param resolver: callable to use to resolve the field
:param output_coercer: callable to use to coerce the resolved field value
:param is_introspection_context: determines whether or not the resolved
field is in a context of an introspection query
:type execution_context: ExecutionContext
:type parent_type: GraphQLObjectType
:type source: Any
:type field_nodes: List[FieldNode]
:type path: Path
:type field_definition: GraphQLField
:type resolver: Callable
:type output_coercer: Callable
:type is_introspection_context: bool
:return: the coerced resolved field value
:rtype: Any
"""
# pylint: disable=too-many-arguments
info = build_resolve_info(
execution_context,
field_definition,
field_nodes,
parent_type,
path,
is_introspection_context,
)
return await complete_value_catching_error(
await resolve_field_value_or_error(
execution_context,
field_definition,
field_nodes,
resolver,
source,
info,
),
info,
execution_context,
field_nodes,
path,
field_definition.graphql_type,
output_coercer,
)
|
11574650
|
import abc
import re
from decimal import Decimal, InvalidOperation
from django.core import exceptions
from .validator import (
Validator,
ValidatorOutput,
UnsupportedException,
UnsupportedContentTypeException,
)
from ..ingest_settings import UPLOAD_SETTINGS
from .. import utils
class RowwiseValidator(Validator):
"""Subclass this for any validator applied to one row at a time.
Rule file should be JSON or YAML with fields
Then each subclass only needs an `evaluate(self, rule, row)`
method returning Boolean
"""
SUPPORTS_HEADER_OVERRIDE = True
if "headers" not in UPLOAD_SETTINGS["STREAM_ARGS"]:
raise exceptions.ImproperlyConfigured(
"setting DATA_INGEST['STREAM_ARGS']['headers'] is required"
)
if UPLOAD_SETTINGS.get("OLD_HEADER_ROW") and not isinstance(
UPLOAD_SETTINGS["STREAM_ARGS"]["headers"], list
):
raise exceptions.ImproperlyConfigured(
"""DATA_INGEST['OLD_HEADER_ROW'] should be used with a
list of headers in DATA_INGEST['STREAM_ARGS']['header']"""
)
@staticmethod
def cast_value(value):
"""
This will help clean the value and cast the value to its type
i.e. "123" is an integer, so it will be casted to become 123
Parameters:
value - a string that needs to be casted
Returns:
a value that has been processed and casted to its type (string, integer, or float)
"""
newval = value
if type(newval) == str:
newval = newval.strip()
try:
dnewval = Decimal(newval.replace(",", ""))
try:
inewval = int(dnewval)
fnewval = float(dnewval)
if inewval == fnewval:
newval = inewval
else:
newval = fnewval
except ValueError:
# will take the newval.strip() as the value
pass
except InvalidOperation:
# will take the newval.strip() as the value
pass
return newval
@staticmethod
def cast_values(row_values):
"""
This will help clean up a list of data and cast them to numbers when appropriate
Parameters:
row_values - a list of values
Returns:
a list of casted values
"""
return [RowwiseValidator.cast_value(value) for value in row_values]
@staticmethod
def replace_message(message, row_dict):
"""
String Intepolation for message. Anything that is included inside the curly brackets {} will
be evaluated and replaced by its value.
- {column}: By putting the column name inside the curly brackets, this will be replaced with the
actual value of this row's column.
- {A op B}: A is a column name or a number (integer or decimal number), op is an arithmetic operator
+, -, * or /, and B is a column name or a number (integer or decimal number).
- {A op B:C}: A op B is the same as above, C is the number of decimal places to display after the decimal.
Parameters:
message - a string
row_dict - a dictionary of key(field name) / value(field data) pair
Returns:
string - a new message with content in {} replaced
"""
# create message
new_message = message
# Pattern that will match everything that looks like this: {...}
pattern = re.compile(r"\{.*?\}")
fields = pattern.findall(new_message)
for field in fields:
# Remove { }
key = field[1:-1].strip()
# Direct Substitution
if key in row_dict.keys():
new_message = new_message.replace(field, row_dict[key])
# Expression Calculation and Substitution
else:
# This will put out the two field names (strip out any spaces), and the operator
# and the rest of field to check for precision specification
# (operand1 operator operand2 rest)
# current supported operator is seen in the 2nd parenthesis
expression = re.match(
r"^\s*(\S+)\s*([\+\-\*/])\s*([^:\s]+)(\S*)\s*$", key
)
try:
# only supporting int/float operations
supported_type = (float, int)
operand1, operator, operand2, rest = expression.groups()
# If operands are numbers
value1 = RowwiseValidator.cast_value(operand1)
value2 = RowwiseValidator.cast_value(operand2)
# If operands are not numbers, they may be key to row_dict, get the real values
if not any(isinstance(value1, t) for t in supported_type):
value1 = RowwiseValidator.cast_value(row_dict[operand1])
if not any(isinstance(value2, t) for t in supported_type):
value2 = RowwiseValidator.cast_value(row_dict[operand2])
# If they are all supported type, then this expression can be evaluated
if any(isinstance(value1, t) for t in supported_type) and any(
isinstance(value2, t) for t in supported_type
):
# Right now being super explicit about which operator we support
if operator == "+":
result = value1 + value2
elif operator == "-":
result = value1 - value2
elif operator == "*":
result = value1 * value2
elif operator == "/":
result = value1 / value2
else:
# it really shouldn't have gotten here because we are only matching the allowed
# operation above
raise UnsupportedException()
# Will only use this when we are very sure there is no issue
# result = eval(f'{value1} {operator} {value2}')
# If precision is supplied, the "rest" should include this information in the following form:
# ':number_of_digits_after_decimal_place'
if rest:
if len(rest) > 1 and rest[0] == ":":
precision = int(rest[1:])
result = f"{result:.{precision}f}"
else:
# This means this is malformed
raise ValueError
new_message = new_message.replace(field, str(result))
except (KeyError, AttributeError, ValueError):
# This means the expression is malformed or key are misspelled
new_message = f"Unable to evaluate {field}"
break
except UnsupportedException:
new_message = f"Unsupported operation in {field}"
break
return new_message
def validate(self, source, content_type):
"""
Implemented validate method
"""
if content_type == "application/json":
data = utils.to_tabular(source)
elif content_type == "text/csv":
data = utils.reorder_csv(source)
else:
raise UnsupportedContentTypeException(content_type, type(self).__name__)
(headers, numbered_rows) = Validator.rows_from_source(data)
output = ValidatorOutput(numbered_rows, headers=headers)
for (rn, row) in numbered_rows.items():
# This is to remove the header row
if rn == UPLOAD_SETTINGS["OLD_HEADER_ROW"]:
continue
# Check for columns required by validator
received_columns = set(headers)
for rule in self.validator:
expected_columns = set(rule["columns"])
missing_columns = expected_columns.difference(received_columns)
if missing_columns:
output.add_row_error(
rn,
"Error",
rule.get("error_code"),
f"Unable to evaluate, missing columns: {missing_columns}",
[],
)
continue
try:
if rule["code"] and not self.invert_if_needed(
self.evaluate(rule["code"], row)
):
output.add_row_error(
rn,
rule.get("severity", "Error"),
rule.get("error_code"),
RowwiseValidator.replace_message(
rule.get("message", ""), row
),
[
k
for (idx, k) in enumerate(row.keys())
if k in rule["columns"]
],
)
except Exception as e:
output.add_row_error(
rn,
"Error",
rule.get("error_code"),
f"{type(e).__name__}: {e.args[0]}",
[],
)
return output.get_output()
@abc.abstractmethod
def evaluate(self, rule, row):
"""
Evaluate the row based on the rule
Parameters:
rule - the rule that needs to apply to the row
row - the dictionary of key(field name)/value(field data) pair
Returns:
Boolean - True/False
"""
|
11574737
|
import iam_floyd as statement
import importlib
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
helperDir = '%s/../../helper/python' % currentdir
sys.path.insert(0, helperDir)
test = importlib.import_module('python_test')
out = getattr(test, 'out')
deploy = getattr(test, 'deploy')
def get_statement():
# doc-start
my_statement = statement.Ec2()
my_statement.allow()
my_statement.to_start_instances()
my_statement.to_stop_instances()
# doc-end
return my_statement
all = [get_statement()]
out(all)
deploy(all)
|
11574778
|
import unittest
from pandas import DataFrame
import pandas as pd
from .data_explorer import SignNames
from .data_explorer import DataExplorer
from .traffic_data import TrafficDataSets
from .traffic_data import TrafficDataProviderAutoSplitValidationData
from .traffic_data import TrafficDataRealFileProviderAutoSplitValidationData
from .traffic_data import DataSetWithGenerator
from .traffic_data import DataSetType
from .traffic_data_enhance import *
from .traffic_data_enhance import _enhance_one_image_randomly
from .traffic_data_enhance import _zoomin_image_randomly
from .traffic_data_enhance import _enhance_one_image_with_random_funcs
from .traffic_data_enhance import _image_grayscale
from .traffic_data_enhance import _normalise_image_zero_mean
from .traffic_data_enhance import _normalise_image, _normalise_image_whitening, \
_enhance_one_image_with_tensorflow_random_operations
from .data_explorer import TrainingPlotter
from tensorflow.python.framework import dtypes
import pickle
import numpy.testing
import os
import numpy as np
from .traffic_test_data_provider import real_data_provider
from .traffic_test_data_provider import real_data_provider_no_shuffer
from .traffic_test_data_provider import clear_subset_data_provider
class TestTrafficDataGenerator(unittest.TestCase):
@staticmethod
def generate(ratio):
images, labels = enhance_with_brightness_contrast(real_data_provider.X_train, real_data_provider.y_train, ratio)
provider = real_data_provider.to_other_provider(images, labels)
provider.save_to_file("traffic_data_training_{}".format(len(images)))
def test_generate_brightness_contrast_data(self):
self.generate(1)
def test_generate_brightness_contrast_data_2(self):
self.generate(2)
|
11574788
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor, ble_client
from esphome.const import (
STATE_CLASS_MEASUREMENT,
UNIT_BECQUEREL_PER_CUBIC_METER,
CONF_ID,
CONF_RADON,
CONF_RADON_LONG_TERM,
ICON_RADIOACTIVE,
)
DEPENDENCIES = ["ble_client"]
radon_eye_rd200_ns = cg.esphome_ns.namespace("radon_eye_rd200")
RadonEyeRD200 = radon_eye_rd200_ns.class_(
"RadonEyeRD200", cg.PollingComponent, ble_client.BLEClientNode
)
CONFIG_SCHEMA = cv.All(
cv.Schema(
{
cv.GenerateID(): cv.declare_id(RadonEyeRD200),
cv.Optional(CONF_RADON): sensor.sensor_schema(
unit_of_measurement=UNIT_BECQUEREL_PER_CUBIC_METER,
icon=ICON_RADIOACTIVE,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_RADON_LONG_TERM): sensor.sensor_schema(
unit_of_measurement=UNIT_BECQUEREL_PER_CUBIC_METER,
icon=ICON_RADIOACTIVE,
accuracy_decimals=0,
state_class=STATE_CLASS_MEASUREMENT,
),
}
)
.extend(cv.polling_component_schema("5min"))
.extend(ble_client.BLE_CLIENT_SCHEMA),
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await ble_client.register_ble_node(var, config)
if CONF_RADON in config:
sens = await sensor.new_sensor(config[CONF_RADON])
cg.add(var.set_radon(sens))
if CONF_RADON_LONG_TERM in config:
sens = await sensor.new_sensor(config[CONF_RADON_LONG_TERM])
cg.add(var.set_radon_long_term(sens))
|
11574865
|
import numpy as np
from matplotlib import pyplot as plt
import sys
import os
def run(seq='00',folder="/media/l/yp2/KITTI/odometry/dataset/poses/"):
pose_file=os.path.join(folder,seq+".txt")
poses=np.genfromtxt(pose_file)
poses=poses[:,[3,11]]
inner=2*np.matmul(poses,poses.T)
xx=np.sum(poses**2,1,keepdims=True)
dis=xx-inner+xx.T
dis=np.sqrt(np.abs(dis))
id_pos=np.argwhere(dis<3)
id_neg=np.argwhere(dis>20)
id_pos=id_pos[id_pos[:,0]-id_pos[:,1]>50]
id_neg=id_neg[id_neg[:,0]>id_neg[:,1]]
print(len(id_pos))
np.savez(seq+'.npz',pos=id_pos,neg=id_neg)
if __name__=='__main__':
seq="05"
if len(sys.argv)>1:
seq=sys.argv[1]
run(seq,"/media/l/yp2/KITTI/odometry/dataset/poses/")
|
11574885
|
import os
import tensorflow as tf
from tensorflow.python.framework import ops
_current_path = os.path.dirname(os.path.realpath(__file__))
_primitive_gen_module = tf.load_op_library(os.path.join(_current_path, 'libprimitive_gen.so'))
# octree ops
octree_database = _primitive_gen_module.octree_database
octree_conv = _primitive_gen_module.octree_conv
octree_pooling = _primitive_gen_module.octree_pooling
octree_conv_grad = _primitive_gen_module.octree_conv_grad
octree_pooling_grad = _primitive_gen_module.octree_pooling_grad
# primitive ops
primitive_mutex_loss = _primitive_gen_module.primitive_mutex_loss
primitive_group_points = _primitive_gen_module.primitive_group_points
primitive_cube_coverage_loss = _primitive_gen_module.primitive_cube_coverage_loss
primitive_coverage_loss = _primitive_gen_module.primitive_coverage_loss
primitive_consistency_loss = _primitive_gen_module.primitive_consistency_loss
primitive_symmetry_loss = _primitive_gen_module.primitive_symmetry_loss
primitive_aligning_loss = _primitive_gen_module.primitive_aligning_loss
primitive_cube_volume = _primitive_gen_module.primitive_cube_volume
primitive_cube_area_average_loss = _primitive_gen_module.primitive_cube_area_average_loss
primitive_points_suffix_index = _primitive_gen_module.primitive_points_suffix_index
primitive_mutex_loss_grad = _primitive_gen_module.primitive_mutex_loss_grad
primitive_cube_coverage_loss_grad = _primitive_gen_module.primitive_cube_coverage_loss_grad
primitive_coverage_loss_grad = _primitive_gen_module.primitive_coverage_loss_grad
primitive_consistency_loss_grad = _primitive_gen_module.primitive_consistency_loss_grad
primitive_symmetry_loss_grad = _primitive_gen_module.primitive_symmetry_loss_grad
primitive_aligning_loss_grad = _primitive_gen_module.primitive_aligning_loss_grad
primitive_cube_area_average_loss_grad = _primitive_gen_module.primitive_cube_area_average_loss_grad
# mask prediction
primitive_coverage_split_loss = _primitive_gen_module.primitive_coverage_split_loss
primitive_consistency_split_loss = _primitive_gen_module.primitive_consistency_split_loss
primitive_tree_generation = _primitive_gen_module.primitive_tree_generation
primitive_coverage_split_loss_grad = _primitive_gen_module.primitive_coverage_split_loss_grad
primitive_consistency_split_loss_grad = _primitive_gen_module.primitive_consistency_split_loss_grad
# cube update
primitive_coverage_select_loss = _primitive_gen_module.primitive_coverage_select_loss
primitive_consistency_select_loss = _primitive_gen_module.primitive_consistency_select_loss
primitive_mutex_select_loss = _primitive_gen_module.primitive_mutex_select_loss
primitive_coverage_select_loss_grad = _primitive_gen_module.primitive_coverage_select_loss_grad
primitive_consistency_select_loss_grad = _primitive_gen_module.primitive_consistency_select_loss_grad
primitive_mutex_select_loss_grad = _primitive_gen_module.primitive_mutex_select_loss_grad
ops.NotDifferentiable('OctreeDatabase')
ops.NotDifferentiable('PtimitiveGroupPoints')
ops.NotDifferentiable('PrimitiveCubeVolume')
ops.NotDifferentiable('PrimitivePointsSuffixIndex')
ops.NotDifferentiable('PrimitiveTreeGeneration')
@ops.RegisterGradient('OctreeConv')
def _OctreeConvGrad(op, grad):
return octree_conv_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.get_attr('curr_depth'),
op.get_attr('num_output'),
op.get_attr('kernel_size'),
op.get_attr('stride')) + \
(None,)
@ops.RegisterGradient('OctreePooling')
def _OctreePoolingGrad(op, *grad):
return [octree_pooling_grad(grad[0],
op.inputs[0],
op.outputs[1],
op.inputs[1],
op.get_attr('curr_depth')),
None]
@ops.RegisterGradient('PrimitiveMutexLoss')
def _PrimitiveMutexLossGrad(op, grad):
return primitive_mutex_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.get_attr('scale'))
@ops.RegisterGradient('PrimitiveCoverageLoss')
def _PrimitiveCoverageLossGrad(op, grad):
return primitive_coverage_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3]) + \
(None,)
@ops.RegisterGradient('PrimitiveConsistencyLoss')
def _PrimitiveConsistencyLossGrad(op, grad):
return primitive_consistency_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.get_attr('scale'),
op.get_attr('num_sample')) + \
(None,)
@ops.RegisterGradient('PrimitiveSymmetryLoss')
def _PrimitiveSymmetryLossGrad(op, grad):
return primitive_symmetry_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.get_attr('scale'),
op.get_attr('depth'))
@ops.RegisterGradient('PrimitiveAligningLoss')
def _PrimitiveligningLossGrad(op, grad):
return [primitive_aligning_loss_grad(grad,
op.inputs[0],
op.inputs[1]),
None]
@ops.RegisterGradient('PrimitiveCubeAreaAverageLoss')
def _PrimitiveCubeAreaAverageLossGrad(op, grad):
return primitive_cube_area_average_loss_grad(grad,
op.inputs[0])
@ops.RegisterGradient('PrimitiveCoverageSplitLoss')
def _PrimitiveCoverageSplitLossGrad(op, *grad):
return primitive_coverage_split_loss_grad(grad[0],
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3]) + \
(None,)
@ops.RegisterGradient('PrimitiveConsistencySplitLoss')
def _PrimitiveConsistencySplitLossGrad(op, grad):
return primitive_consistency_split_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.get_attr('scale'),
op.get_attr('num_sample')) + \
(None,)
@ops.RegisterGradient('PrimitiveCubeCoverageLoss')
def _PrimitiveCubeCoverageLossGrad(op, *grad):
return primitive_cube_coverage_loss_grad(grad[0],
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.inputs[4],
op.get_attr('n_src_cube')) + \
(None, None)
@ops.RegisterGradient("PrimitiveMutexSelectLoss")
def _PrimitiveMutexSelectLossGrad(op, grad):
return primitive_mutex_select_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.get_attr("scale")) + \
(None,)
@ops.RegisterGradient("PrimitiveCoverageSelectLoss")
def _PrimitiveCoverageSelectLossGrad(op, grad):
return primitive_coverage_select_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.inputs[4]) + \
(None, None)
@ops.RegisterGradient("PrimitiveConsistencySelectLoss")
def _PrimitiveConsistencySelectLossGrad(op, grad):
return primitive_consistency_select_loss_grad(grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
op.inputs[3],
op.inputs[4],
op.get_attr("scale"),
op.get_attr("num_sample")) + \
(None, None)
|
11574896
|
import mxnet as mx
import numpy as np
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def init_from_vgg16(ctx, rpn_symbol, vgg16fc_args, vgg16fc_auxs):
fc_args = vgg16fc_args.copy()
fc_auxs = vgg16fc_auxs.copy()
for k, v in fc_args.items():
if v.context != ctx:
fc_args[k] = mx.nd.zeros(v.shape, ctx)
v.copyto(fc_args[k])
for k, v in fc_auxs.items():
if v.context != ctx:
fc_auxs[k] = mx.nd.zeros(v.shape, ctx)
v.copyto(fc_auxs[k])
return fc_args, fc_auxs
|
11574897
|
import pytest
# basic test
def test_basic():
assert True
# test with configration parameters (see conftest.py)
def test_withFixture( unium_endpoint ):
assert isinstance( unium_endpoint, str )
|
11574908
|
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from activity_grid.models import ActivityCard, ActivityPairCard
from twitterbot.constants import RESPONSE_TYPE_SINGLE_OFFICER, RESPONSE_TYPE_COACCUSED_PAIR
class ActivityGridUpdater():
def process(self, response):
if response['type'] == RESPONSE_TYPE_SINGLE_OFFICER:
officer = response['entity']
activity_card, _ = ActivityCard.objects.get_or_create(officer_id=officer['id'])
activity_card.last_activity = timezone.now()
activity_card.save()
elif response['type'] == RESPONSE_TYPE_COACCUSED_PAIR:
officer1 = response['officer1']
officer2 = response['officer2']
# Make sure we don't create duplicated pair card with the same members
try:
activity_pair_card = ActivityPairCard.objects.get(
officer1_id=officer2.id, officer2_id=officer1.id
)
except ObjectDoesNotExist:
activity_pair_card, _ = ActivityPairCard.objects.get_or_create(
officer1_id=officer1.id, officer2_id=officer2.id
)
activity_pair_card.last_activity = timezone.now()
activity_pair_card.save()
|
11574930
|
from . import function_type
from . import string_utilities
from . import utilities
from . import trampoline
from . import error
from . import parser
def evaluate(ast, functions={}):
return _evaluate_entity_list(ast, functions)
def evaluate_code(code, functions={}, target='evaluation'):
result, errors = parser.parse_code(code, functions.copy(), target)
if target != 'evaluation' or len(errors) != 0:
return result, errors
try:
return _evaluate_entity_list(result, functions), []
except error.Error as exception:
return None, [exception]
def _evaluate_entity_list(ast, functions):
result = None
for entity in ast.children:
result = _try_evaluate_entity(entity, functions)
return result
def _try_evaluate_entity(entity, functions):
try:
return _evaluate_entity(entity, functions)
except error.Error as exception:
raise exception
except Exception as exception:
if hasattr(entity, 'offset'):
raise error.Error(str(exception), entity.offset)
else:
raise exception
def _evaluate_entity(entity, functions):
if entity.name == 'INTEGRAL_NUMBER' or entity.name == 'REAL_NUMBER':
return float(entity.value)
elif entity.name == 'HEXADECIMAL_NUMBER':
return float(int(entity.value, base=16))
elif entity.name == 'CHARACTER':
return float(ord(string_utilities.unquote(entity.value)))
elif entity.name == 'STRING':
return string_utilities.make_list_from_string(
string_utilities.unquote(entity.value),
)
elif entity.name == 'IDENTIFIER':
return trampoline.closure_trampoline(functions[entity.value])
elif entity.name == 'function':
return _evaluate_function(entity, functions)
elif entity.name == 'assignment':
return _evaluate_assignment(entity, functions)
elif entity.name == 'cast':
return _evaluate_cast(entity, functions)
elif entity.name == 'call':
return _evaluate_call(entity, functions)
else:
raise Exception('the unexpected entity {}'.format(entity))
def _evaluate_function(entity, functions):
entity_type = utilities.extract_and_add_function(entity, functions)
entity_type.handler = _make_function_handler(entity, functions.copy())
return entity_type
def _make_function_handler(function_node, functions):
def handler(*args):
for i, argument in enumerate(
function_node.children[0].children[1].children,
):
entity_type = function_type.make_type(argument.children[1])
entity_type.handler = _make_value_wrapper(args[i], entity_type)
functions[argument.children[0].value] = entity_type
return _evaluate_entity_list(
function_node.children[1],
functions.copy(),
)
return handler
def _make_value_wrapper(value, value_type):
return value if value_type.arity > 0 else lambda: value
def _evaluate_assignment(entity, functions):
new_functions = functions.copy()
entity_type = utilities.extract_and_add_assignment(entity, functions)
entity_type.handler = _make_value_wrapper(
_evaluate_entity_list(entity.children[1], new_functions),
entity_type,
)
return entity_type
def _evaluate_cast(entity, functions):
return _evaluate_entity_list(entity.children[0], functions.copy())
def _evaluate_call(call, functions):
inner_function = _try_evaluate_entity(
call.children[0].children[0].children[0],
functions,
)
parameters = [
_try_evaluate_entity(parameter, functions)
for parameter in call.children[1].children
]
return trampoline.closure_trampoline(inner_function(*parameters))
|
11574940
|
from PyQt5.QtCore import QObject, pyqtSignal, qInstallMessageHandler
import argparse
class QtWarningHandler(QObject):
sigGeometryWarning = pyqtSignal(object)
def _resizeWarningHandler(self, msg_type, msg_log_context, msg_string):
if msg_string.find('Unable to set geometry') != -1:
self.sigGeometryWarning.emit(msg_type)
elif msg_string:
print(msg_string)
warningHandler = QtWarningHandler()
qInstallMessageHandler(warningHandler._resizeWarningHandler)
ap = argparse.ArgumentParser(description='spotMAX inputs')
ap.add_argument(
'-d', '--debug', action='store_true',
help=(
'Used for debugging. Test code with'
'"from cellacdc.config import parser_args, debug = parser_args["debug"]", '
'if debug: <debug code here>'
)
)
parser_args = vars(ap.parse_args())
|
11574959
|
from .options import Options
class Header:
def __init__(self, _type: str, title: str, options: Options = None):
self.type = _type
self.title = title
self.options = options.__dict__ if options is not None else {}
|
11574983
|
from staffjoy.resource import Resource
class MobiusTask(Resource):
PATH = "internal/tasking/mobius/{schedule_id}"
ID_NAME = "schedule_id"
ENVELOPE = None
|
11574994
|
from functools import partial
from ox.ast import Tree
from ox.ast.utils import intersperse
from ox.target.python.operators import Inplace as InplaceOpEnum
from sidekick import curry, alias
from .expr_ast import (
Expr,
Name,
Void,
ArgDef,
Starred,
Tuple,
to_expr,
Atom,
As,
to_expr_or_name,
)
from .utils import Cmd as CmdEnum
from ... import ast
from ...ast import Stmt as StmtBase
__all__ = [
"Stmt",
"StmtLeaf",
"StmtNode",
"to_stmt",
"register_stmt",
"Return",
"Function",
"Block",
"Del",
]
class Stmt(StmtBase):
"""
Base class for Python AST nodes that represent statements.
"""
class Meta:
root = True
abstract = True
class StmtLeaf(ast.StmtLeaf, Stmt):
"""
Base class for Python Expression leaf nodes.
"""
class Meta:
abstract = True
class StmtNode(ast.StmtNode, Stmt):
"""
Base class for Python Expression leaf nodes.
"""
class Meta:
abstract = True
to_stmt = Stmt._meta.coerce
register_stmt = curry(2, to_stmt.register)
register_stmt(Expr, lambda x: ExprStmt(x))
register_stmt(tuple, lambda x: Block(x))
register_stmt(list, lambda x: Block(x))
# ==============================================================================
# NODES
# ==============================================================================
class Return(ast.StmtExprMixin, StmtNode):
"""
A return statement (return <expr>)
"""
expr: Expr
class Meta:
command = "return {expr}"
sexpr_symbol = "return"
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
return {"return": cls}
class Cmd(StmtLeaf):
"""
A return statement (return <expr>)
"""
value: CmdEnum
@classmethod
def Break(cls, **kwargs):
"""
Create Cmd instance representing a "break" statement.
"""
return cls(CmdEnum.BREAK, **kwargs)
@classmethod
def Continue(cls, **kwargs):
"""
Create Cmd instance representing a "continue" statement.
"""
return cls(CmdEnum.CONTINUE, **kwargs)
@classmethod
def Pass(cls, **kwargs):
"""
Create Cmd instance representing a "pass" statement.
"""
return cls(CmdEnum.PASS, **kwargs)
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
return {
"break": cls.Break,
"continue": cls.Continue,
CmdEnum.BREAK: cls.Break,
CmdEnum.CONTINUE: cls.Continue,
}
def tokens(self, ctx):
yield self.value.value
class Block(ast.BlockMixin, Stmt):
"""
Python block of statements.
"""
class Meta:
separators = (":", "")
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
return {"do": lambda *args: cls(map(to_stmt, args))}
def __init__(self, children=(), **kwargs):
super().__init__(map(to_stmt, children), **kwargs)
class Function(StmtNode):
"""
A function definition.
"""
name: Name
args: Tree
body: Block
annotation: Expr
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
def to_arg(expr):
if isinstance(expr, ArgDef):
return expr
elif isinstance(expr, (Name, Starred)):
return ArgDef(expr)
else:
cls_name = type(expr).__name__
raise TypeError(f"invalid argument type: {cls_name}")
def function(name, args, body):
fn = Function(Name(name), Tree("args", map(to_arg, args)), Block(body))
return fn
return {"def": function}
def __init__(self, name: Name, args: Tree, body: Block, annotation=None, **kwargs):
annotation = Void() if annotation is None else annotation
super().__init__(name, args, body, annotation, **kwargs)
def tokens(self, ctx):
yield "def "
yield from self.name.tokens(ctx)
yield "("
if self.args.children:
yield from intersperse(
", ", map(lambda x: x.tokens(ctx), self.args.children)
)
yield ")"
if self.annotation:
yield " -> "
yield from self.annotation.tokens(ctx)
yield from self.body.tokens_as_block(ctx)
class Del(StmtNode):
"""
Del statement (e.g., del <value>).
"""
expr: Expr
class Meta:
command = "del {expr}"
sexpr_symbol = "del"
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
def del_statement(expr, *args, **kwargs):
if args:
expr = Tuple([expr, *args])
return cls(expr, **kwargs)
return {"del": del_statement}
def tokens(self, ctx):
if isinstance(self.expr, Tuple) and self.expr.children:
yield ctx.start_line()
yield "del "
yield from self.expr.tokens(ctx, mode="expr-list")
else:
yield from super().tokens(ctx)
class Assign(StmtNode):
"""
Assignment statement. (e.g., <lhs> = <rhs>)
"""
lhs: Expr
rhs: Expr
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
e = to_expr
return {"=": lambda x, y: cls(e(x), e(y))}
def tokens(self, ctx):
yield ctx.start_line()
yield from self.lhs.tokens(ctx)
yield " = "
yield from self.rhs.tokens(ctx)
class Inplace(StmtNode):
"""
Complex assignment statement with inplace operator (e.g., x += 1)
"""
tag: InplaceOpEnum
lhs: Expr
rhs: Expr
op = alias("tag")
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
e = to_expr
fn = lambda op, x, y: cls(op, e(x), e(y))
sexprs = {op: partial(fn, op) for op in InplaceOpEnum}
sexprs.update({op.value: partial(fn, op) for op in InplaceOpEnum})
return sexprs
def tokens(self, ctx):
yield ctx.start_line()
yield from self.lhs.tokens(ctx)
yield f" {self.tag.value} "
yield from self.rhs.tokens(ctx)
class ExprStmt(StmtNode):
"""
Statement formed by a single expression.
"""
expr: Expr
def tokens(self, ctx):
yield ctx.start_line()
yield from self.expr.tokens(ctx)
class If(ast.BodyElseMixin, Stmt):
"""
A if block.
"""
cond: Expr
body: Block
other: Stmt
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
e = to_expr
s = to_stmt
return {"if": lambda cond, *args: cls(e(cond), *map(s, args))}
def __init__(self, cond, block, other=None, **kwargs):
other = Block([]) if other is None else other
super().__init__(cond, block, other, **kwargs)
def tokens(self, ctx, _command="if "):
yield ctx.start_line() + _command
yield from self.cond.tokens(ctx)
if isinstance(self.other, If):
yield from self.body.tokens_as_block(ctx)
yield from self.other.tokens(ctx, "elif ")
else:
yield from super().tokens(ctx)
class While(ast.BodyElseMixin, Stmt):
"""
While block.
"""
cond: Expr
body: Block
other: Block
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
e = to_expr
s = to_stmt
return {"while": lambda cond, *args: cls(e(cond), *map(s, args))}
def __init__(self, expr, block=None, other=None, **kwargs):
block = Block([]) if block is None else block
other = Block([]) if other is None else other
super().__init__(expr, block, other, **kwargs)
def tokens(self, ctx):
yield ctx.start_line() + "while "
yield from self.cond.tokens(ctx)
yield from super().tokens(ctx)
class ImportFrom(StmtNode):
"""
Import statement (from <mod> import <expr>)
"""
mod: Expr
expr: Expr
level: int
@classmethod
def _meta_sexpr_symbol_map(cls) -> dict:
def to_import(x):
if isinstance(x, dict) and len(x) == 1:
k, v = next(iter(x.items()))
return As(to_expr_or_name(k), to_expr_or_name(v))
elif isinstance(x, dict):
return Tuple([to_import({k: v}) for k, v in x.items()])
else:
return to_expr(x)
def import_from(mod, *args):
args = [to_import(x) for x in args]
args = args[0] if len(args) == 1 else args
return cls(to_expr(mod), args)
return {"import from": import_from}
def __init__(self, mod, expr=None, level=0, **kwargs):
expr = Atom(...) if expr is None else expr
super().__init__(mod, expr, level, **kwargs)
def tokens(self, ctx):
yield ctx.start_line()
yield "from "
yield from self.mod.tokens(ctx)
yield " import "
yield from self.expr.tokens(ctx)
|
11575001
|
from . import IBaseDev
from torch.cuda import empty_cache
class IBaseDevTorch(IBaseDev):
"""PyTorch specific base interface for development."""
def train(self, *args, **kwargs):
empty_cache()
return super(IBaseDevTorch, self).train(*args, **kwargs)
def eval(self, *args, **kwargs):
empty_cache()
return super(IBaseDevTorch, self).eval(*args, **kwargs)
|
11575004
|
from collections import namedtuple
CommandOption = namedtuple('CommandOption', ['option', 'value'])
CommandOptionWithNoValue = namedtuple('CommandOptionWithNoValue', ['option'])
ExecuteCommand = namedtuple('ExecuteCommand', ['bin', 'bin_value'])
def command_option_string_from(command):
if isinstance(command, CommandOption):
return "--{} \"{}\"".format(command.option, command.value) if command.value is not None else None
elif isinstance(command, CommandOptionWithNoValue):
return "--{}".format(command.option)
elif isinstance(command, ExecuteCommand):
return " ".join([command.bin, command.bin_value])
else:
return command
def command_string_from(command_options):
return " ".join(x for x in map(command_option_string_from, command_options) if x is not None)
def command_option_from(args_value, option_name, option_value=None):
if args_value is None:
return None
if args_value is True and option_value is None:
return CommandOptionWithNoValue(option_name)
return CommandOption(option_name, option_value)
|
11575018
|
import os
import Myloss
import dataloader
from modeling import model
import torch.optim
from modeling.fpn import *
from option import *
from utils import *
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # GPU only
device = get_device()
class Trainer():
def __init__(self):
self.scale_factor = args.scale_factor
self.net = model.enhance_net_nopool(self.scale_factor, conv_type=args.conv_type).to(device)
self.seg = fpn(args.num_of_SegClass).to(device)
self.seg_criterion = FocalLoss(gamma=2).to(device)
self.train_dataset = dataloader.lowlight_loader(args.lowlight_images_path)
self.train_loader = torch.utils.data.DataLoader(self.train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
self.L_color = Myloss.L_color()
self.L_spa = Myloss.L_spa8(patch_size=args.patch_size)
self.L_exp = Myloss.L_exp(16)
self.L_TV = Myloss.L_TV()
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.num_epochs = args.num_epochs
self.E = args.exp_level
self.grad_clip_norm = args.grad_clip_norm
self.display_iter = args.display_iter
self.snapshot_iter = args.snapshot_iter
self.snapshots_folder = args.snapshots_folder
if args.load_pretrain == True:
self.net.load_state_dict(torch.load(args.pretrain_dir, map_location=device))
print("weight is OK")
def get_seg_loss(self, enhanced_image):
# segment the enhanced image
seg_input = enhanced_image.to(device)
seg_output = self.seg(seg_input).to(device)
# build seg output
target = (get_NoGT_target(seg_output)).data.to(device)
# calculate seg. loss
seg_loss = self.seg_criterion(seg_output, target)
return seg_loss
def get_loss(self, A, enhanced_image, img_lowlight, E):
Loss_TV = 1600 * self.L_TV(A)
loss_spa = torch.mean(self.L_spa(enhanced_image, img_lowlight))
loss_col = 5 * torch.mean(self.L_color(enhanced_image))
loss_exp = 10 * torch.mean(self.L_exp(enhanced_image, E))
loss_seg = self.get_seg_loss(enhanced_image)
loss = Loss_TV + loss_spa + loss_col + loss_exp + 0.1 * loss_seg
return loss
def train(self):
self.net.train()
for epoch in range(self.num_epochs):
for iteration, img_lowlight in enumerate(self.train_loader):
img_lowlight = img_lowlight.to(device)
enhanced_image, A = self.net(img_lowlight)
loss = self.get_loss(A, enhanced_image, img_lowlight, self.E)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(self.net.parameters(), self.grad_clip_norm)
self.optimizer.step()
if ((iteration + 1) % self.display_iter) == 0:
print("Loss at iteration", iteration + 1, ":", loss.item())
if ((iteration + 1) % self.snapshot_iter) == 0:
torch.save(self.net.state_dict(), self.snapshots_folder + "Epoch" + str(epoch) + '.pth')
if __name__ == "__main__":
t = Trainer()
t.train()
|
11575027
|
from typing import Dict, NamedTuple, Iterable
from evaluation.metric import Metric
class EvaluationAverages(NamedTuple):
inputs: float
outputs: float
conversions: float
moves: float
overall: float
class Evaluation:
def __init__(self, scores: Dict[int, "QuestionScores"]) -> None: # type: ignore
precision = Evaluation._precision(scores.values())
recall = Evaluation._recall(scores.values())
self.inputs = Metric(precision=precision.inputs, recall=recall.inputs)
self.outputs = Metric(precision=precision.outputs, recall=recall.outputs)
self.conversions = Metric(precision=precision.conversions, recall=recall.conversions)
self.moves = Metric(precision=precision.moves, recall=recall.moves)
self.overall = Metric(precision=precision.overall, recall=recall.overall)
@staticmethod
def _precision(scores: Iterable["QuestionScores"]) -> EvaluationAverages: # type: ignore
inputs = 0.0
outputs = 0.0
conversions = 0.0
moves = 0.0
num_processes = 0
for score in scores:
inputs += score.inputs.precision
outputs += score.outputs.precision
conversions += score.conversions.precision
moves += score.moves.precision
num_processes += 1
inputs_avg = round(inputs / num_processes, 3)
outputs_avg = round(outputs / num_processes, 3)
conversions_avg = round(conversions / num_processes, 3)
moves_avg = round(moves / num_processes, 3)
overall = (inputs_avg + outputs_avg + conversions_avg + moves_avg) / 4
return EvaluationAverages(
inputs=inputs_avg,
outputs=outputs_avg,
conversions=conversions_avg,
moves=moves_avg,
overall=overall,
)
@staticmethod
def _recall(scores: Iterable["QuestionScores"]) -> EvaluationAverages: # type: ignore
inputs = 0.0
outputs = 0.0
conversions = 0.0
moves = 0.0
num_processes = 0
for score in scores:
inputs += score.inputs.recall
outputs += score.outputs.recall
conversions += score.conversions.recall
moves += score.moves.recall
num_processes += 1
inputs_avg = round(inputs / num_processes, 3)
outputs_avg = round(outputs / num_processes, 3)
conversions_avg = round(conversions / num_processes, 3)
moves_avg = round(moves / num_processes, 3)
overall = (inputs_avg + outputs_avg + conversions_avg + moves_avg) / 4
return EvaluationAverages(
inputs=inputs_avg,
outputs=outputs_avg,
conversions=conversions_avg,
moves=moves_avg,
overall=overall,
)
|
11575036
|
from __future__ import print_function
import os
import tempfile
from piecash import open_book, create_book, GnucashException
FILE_1 = os.path.join(tempfile.gettempdir(), "not_there.gnucash")
FILE_2 = os.path.join(tempfile.gettempdir(), "example_file.gnucash")
if os.path.exists(FILE_2):
os.remove(FILE_2)
# open a file that isn't there, detect the error
try:
book = open_book(FILE_1)
except GnucashException as backend_exception:
print("OK", backend_exception)
# create a new file, this requires a file type specification
with create_book(FILE_2) as book:
pass
# open the new file, try to open it a second time, detect the lock
# using the session as context manager automatically release the lock and close the session
with open_book(FILE_2) as book:
try:
with open_book(FILE_2) as book_2:
pass
except GnucashException as backend_exception:
print("OK", backend_exception)
os.remove(FILE_2)
|
11575071
|
from secml.ml.scalers.tests import CScalerTestCases
from sklearn.preprocessing import StandardScaler
from secml.ml.scalers import CScalerStd
class TestCScalerStd(CScalerTestCases):
"""Unittests for CScalerStd."""
def test_forward(self):
"""Test for `.forward()` method."""
# mean should not be used for sparse arrays
for with_std in (True, False):
self.logger.info("Testing using std? {:}".format(with_std))
self._compare_scalers(CScalerStd(with_std=with_std),
StandardScaler(with_std=with_std),
self.array_dense)
self._compare_scalers(CScalerStd(with_std=with_std,
with_mean=False),
StandardScaler(with_std=with_std,
with_mean=False),
self.array_sparse)
self._compare_scalers(CScalerStd(with_std=with_std),
StandardScaler(with_std=with_std),
self.row_dense.atleast_2d())
self._compare_scalers(CScalerStd(with_std=with_std,
with_mean=False),
StandardScaler(with_std=with_std,
with_mean=False),
self.row_sparse)
self._compare_scalers(CScalerStd(with_std=with_std),
StandardScaler(with_std=with_std),
self.column_dense)
self._compare_scalers(CScalerStd(with_std=with_std,
with_mean=False),
StandardScaler(with_std=with_std,
with_mean=False),
self.column_sparse)
def test_mean_std(self):
"""Test using specific mean/std."""
for (mean, std) in [(1.5, 0.1),
((1.0, 1.1, 1.2, 1.3), (0.0, 0.1, 0.2, 0.3))]:
for array in [self.array_dense, self.array_sparse]:
self.logger.info("Original array is:\n{:}".format(array))
self.logger.info(
"Normalizing using mean: {:} std: {:}".format(mean, std))
n = CScalerStd(with_mean=not array.issparse)
n._fit(array)
n.sklearn_scaler.mean = mean
n.sklearn_scaler.std = std
out = n._forward(array)
self.logger.info("Result is:\n{:}".format(out))
out_mean = out.mean(axis=0, keepdims=False)
out_std = out.std(axis=0, keepdims=False)
self.logger.info("Result mean is:\n{:}".format(out_mean))
self.logger.info("Result std is:\n{:}".format(out_std))
# def _array_test(array, ):
def test_chain(self):
"""Test a chain of preprocessors."""
self._test_chain(self.array_dense,
['minmax', 'pca', 'std'],
[{'feature_range': (-5, 5)}, {}, {}])
def test_chain_gradient(self):
"""Check gradient of a chain of preprocessors."""
self._test_chain_gradient(self.array_dense,
['minmax', 'std'],
[{'feature_range': (-5, 5)}, {}])
if __name__ == '__main__':
CScalerTestCases.main()
|
11575095
|
from . import models
def authenticate(chat):
"""
Authenticating an user means:
1. Getting the chat id.
2. Creating the user if it doesn't exist.
3. Updating the user info. Useful to get the latest username, first name & last name values.
4. Reporting the date and time of the latest user action.
This method should be called whenever you want to authenticate an user of your bot.
"""
# anonymous
if chat.username is not None:
if chat.username == 'GroupAnonymousBot':
return None, True
# not anonymoyus
user, _ = models.BotUser.objects.get_or_create(chat_id=chat['id'])
user.chat_id = chat['id']
# get username
try:
user.username = chat.username
except Exception as e:
user.username = None
# get first name
try:
user.first_name = chat.first_name
except Exception as e:
user.first_name = None
# get last name
try:
user.last_name = chat.last_name
except Exception as e:
user.last_name = None
# get language preferences
try:
user.language = chat.language_code.upper()
except Exception as e:
user.language = None
user.report_last_action()
user.save()
return user, False
|
11575146
|
import asyncio
from .constants import *
from .wireformat import *
from .hosts import HostsResolver
from .system import SystemResolver
from .mdns import MulticastResolver
class SmartResolver(object):
def __init__(self):
self.sys = SystemResolver()
self.hosts = HostsResolver()
self.mdns = MulticastResolver()
def lookup(self, query, prefer_ipv6=None, should_cache=True, recursive=False):
outer = asyncio.Future()
# First, try looking in /etc/hosts
f = self.hosts.lookup(query)
def callback(f):
if f.cancelled():
outer.cancel()
return
exc = f.exception()
if exc is not None:
outer.set_exception(exc)
return
reply = f.result()
if reply.rcode == NXDOMAIN:
if query.name.endswith(b'.local'):
f2 = self.mdns.lookup(query, use_ipv6=prefer_ipv6)
else:
f2 = self.sys.lookup(query, prefer_ipv6,
should_cache, recursive)
def cb2(f):
if f.cancelled():
outer.cancel()
return
exc = f.exception()
if exc is not None:
outer.set_exception(exc)
return
outer.set_result(f.result())
f2.add_done_callback(cb2)
else:
outer.set_result(reply)
f.add_done_callback(callback)
return outer
|
11575192
|
from collections import OrderedDict
from datetime import timedelta
import django.utils.timezone
import sal.plugin
class NewMachines(sal.plugin.Widget):
description = 'New machines'
def _get_range(self):
today = django.utils.timezone.now() - timedelta(hours=24)
ranges = OrderedDict(Today=today)
ranges['This Week'] = today - timedelta(days=7)
ranges['This Month'] = today - timedelta(days=30)
return ranges
def get_context(self, queryset, **kwargs):
context = self.super_get_context(queryset, **kwargs)
data = OrderedDict()
for key, date_range in self._get_range().items():
data[key] = queryset.filter(first_checkin__gte=date_range).count()
context['data'] = data
return context
def filter(self, machines, data):
try:
machines = machines.filter(first_checkin__gte=self._get_range()[data])
except KeyError:
return None, None
title = 'Machines first seen {}'.format(data.lower())
return machines, title
|
11575217
|
from itertools import permutations
# 产生0-6的所有排列组合,写入numbers.txt
with open("numbers.txt", "w") as f:
for i in permutations(range(7), 7):
line = str(i).strip("()")
line = line.split(",")
line = "".join(line)
f.write(line)
f.write("\n")
|
11575323
|
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
from sklearn.metrics import pairwise
from PIL import ImageGrab
from imutils.video import FPS
import copy
font = cv2.FONT_HERSHEY_SIMPLEX
# def all_lines(img, lines, store):
# height, width = img.shape
# try:
# for line in lines:
# coords = line[0]
# cv2.line(store, (coords[0], coords[1]), (coords[2], coords[3]), [0, 255, 255], 3) # yellow color vertical
# except:
# pass
# print("exception")
# cv2.imshow("store", store)
def click_and_crop(event, x, y, flags, param):
global refPt
# if the left mouse button was clicked, record the starting (x, y) coordinates
if event == cv2.EVENT_LBUTTONDOWN:
refPt.append([x, y])
def roi(img, vertices):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, [255, 255, 255])
# cv2.imshow("mask",mask)
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lines(lanePointer , dashPointer , lane_image , image_np , flagLanes):
height , width , channels= image_np.shape
gray_image = cv2.cvtColor(lane_image , cv2.COLOR_BGR2GRAY)
canny_image = cv2.Canny(gray_image, threshold1 = 100 , threshold2 = 100)
cv2.imshow("entire canny",canny_image)
canny_image = cv2.GaussianBlur(canny_image,(3,3),0)
mask = np.zeros_like(canny_image)
vertices = np.array(lanePointer, np.int32)
cv2.fillPoly(mask, [vertices], [255,255,255])
cv2.imshow("mask",mask)
vertices = np.array(dashPointer, np.int32)
cv2.fillPoly(mask, [vertices], [0,0,0])
canny_image = cv2.bitwise_and(canny_image, mask)
cv2.imshow("canny with mask",canny_image)
# cv2.putText(lane_image, str(flagLanes), (30,130), font, 1.2, (0,0,255), 2,cv2.LINE_AA) # array of 20 integers in flagLanes
lines = cv2.HoughLinesP(canny_image, 1, np.pi/180, 180, np.array([]), minLineLength = 15, maxLineGap = 15)
try:
flagCounter = 0
if len(lines):
flagLanes.pop(0)
for line in lines:
coords = line[0]
x1 , y1 , x2 , y2 = coords[0] , coords[1] , coords[2] , coords[3]
if x2 == x1:
cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,255,255], 3) # yellow color vertical
just_to_pass = 0
else:
slope=(y1 - y2)/(x2 - x1)
if -0.3 < slope < 0.3:
# cv2.line(lane_image, (x1 , y1), (x2 , y2), [255,0,0], 2) # blue color horizontal
justcomment = 0
elif slope < 0:
if width//2 > max(x1 , x2):
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,0,0], 2) # black color vertical
flagCounter = 1
else:
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,255,255], 2) # yellow color vertical
elif slope > 0:
if width//2 < min(x1 , x2):
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,0,0], 2) # black color vertical
flagCounter = 1
else:
slope=str(slope)[:5]
# cv2.putText(lane_image, str(slope), (x1 , y1), font, 3, [122,32,12], 2)
cv2.line(lane_image, (x1 , y1), (x2 , y2), [0,255,255], 2) # yellow color vertical
if flagCounter == 1:
flagLanes.append(1)
else:
flagLanes.append(0)
if sum(flagLanes) > 12:
cv2.putText(lane_image, "Get back to your lane" , (370,80), font , 1.2, (0,255,0), 2,cv2.LINE_AA)
except:
pass
cv2.imshow("lane_image",lane_image)
# out1.write(lane_image)
cap=cv2.VideoCapture('../videos/r.mp4')
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out1 = cv2.VideoWriter('lanes.avi', fourcc, 25, (1280,720))
start_frame = 0*24
flagLanes = [0] * 20
def selectRegions(image , text , flag):
global refPt
clone = copy.deepcopy(image)
while True:
key = cv2.waitKey(1) & 0xFF
# display the image and wait for a keypress
if flag==1:
cv2.putText(image, text , (240,30), font , 1.2, [0,255,255], 2,cv2.LINE_AA)
cv2.putText(image, "Press 'r' key to reset everything.", (290,70), font , 1.2, [0,255,255], 2,cv2.LINE_AA)
cv2.putText(image, "Press 'd' key if the region selection is done.", (180,110), font , 1.2, [0,255,255], 2,cv2.LINE_AA)
else:
cv2.putText(image, text , (240,30), font , 1.2, [0,255,0], 2,cv2.LINE_AA)
cv2.putText(image, "Press 'r' key to reset everything.", (290,70), font , 1.2, [0,255,0], 2,cv2.LINE_AA)
cv2.putText(image, "Press 'd' key if the region selection is done.", (180,110), font , 1.2, [0,255,0], 2,cv2.LINE_AA)
for pt in range(len(refPt)-1):
pt1 , pt2 = refPt[pt] , refPt[pt+1]
cv2.line(image, (pt1[0],pt1[1]), (pt2[0],pt2[1]), [0,255,255], 3)
cv2.imshow("ROI", image)
if key == ord("r"):
image = copy.deepcopy(clone)
refPt = []
elif key == ord("d"):
if flag == 1:
return 0
elif flag == 2:
return 0
elif key == ord('q'):
return 1
def day():
global refPt
_ , image = cap.read()
image=imutils.resize(image, width=1280)
ctt = 0
Quit = selectRegions(copy.deepcopy(image) , "Click points to select your vehicle dash." , 1)
dashPointer = refPt
if len(dashPointer) <= 2:
dashPointer = [[0,0], [0,0], [0,0]]
refPt = []
print("For dash: ",dashPointer)
if Quit == 1:
return
Quit = selectRegions(copy.deepcopy(image) , "Click points to select bird's eye view." , 2)
lanePointer = refPt
if len(lanePointer) <= 2:
lanePointer = [[114, 690], [502, 384], [819, 391], [1201, 695]]
print("For lanes: ",lanePointer)
if Quit == 1:
return
cv2.destroyWindow("ROI")
fps = FPS().start()
while True:
_,frame = cap.read()
frame = imutils.resize(frame, width=1280)
if _ == False:
break
# print(ctt ,fps._numFrames)
# ctt = ctt + 1
lane_image = copy.deepcopy(frame)
draw_lines(lanePointer , dashPointer , lane_image , frame , flagLanes)
cv2.imshow("frame", frame)
key = cv2.waitKey(1) & 0xFF
fps.update()
if key == ord('q'):
break
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
refPt = [] # to store refernece pointers
flag_night_counter = 0 # counter to count night frames
cap.set(1,start_frame)
_ , image = cap.read()
image=imutils.resize(image, width=1280)
cv2.namedWindow("ROI")
cv2.setMouseCallback("ROI", click_and_crop)
cap.set(1 , start_frame)
day()
cv2.destroyAllWindows()
cap.release()
# out1.release()
# lanes r
# a 451(lanes showing good)
# b 115(warning shows good )
# d 0
# d 81
|
11575344
|
import envi
import envi.bits as e_bits
import struct
# from disasm import H8ImmOper, H8RegDirOper, H8RegIndirOper, H8AbsAddrOper, H8PcOffsetOper, H8RegMultiOper, H8MemIndirOper
import envi.archs.h8.regs as e_regs
import envi.archs.h8.const as e_const
import envi.archs.h8.operands as h8_operands
bcc = [
('bra', envi.IF_NOFALL | envi.IF_BRANCH),
('brn', envi.IF_BRANCH | envi.IF_COND),
('bhi', envi.IF_BRANCH | envi.IF_COND),
('bls', envi.IF_BRANCH | envi.IF_COND),
('bcc', envi.IF_BRANCH | envi.IF_COND),
('bcs', envi.IF_BRANCH | envi.IF_COND),
('bne', envi.IF_BRANCH | envi.IF_COND),
('beq', envi.IF_BRANCH | envi.IF_COND),
('bvc', envi.IF_BRANCH | envi.IF_COND),
('bvs', envi.IF_BRANCH | envi.IF_COND),
('bpl', envi.IF_BRANCH | envi.IF_COND),
('bmi', envi.IF_BRANCH | envi.IF_COND),
('bge', envi.IF_BRANCH | envi.IF_COND),
('blt', envi.IF_BRANCH | envi.IF_COND),
('bgt', envi.IF_BRANCH | envi.IF_COND),
('ble', envi.IF_BRANCH | envi.IF_COND),
]
def p_CCR_Rd(va, val, buf, off, tsize):
# stc
iflags = 0
op = val >> 4
rd = val & 0xf
exr = op & 1
opers = (
h8_operands.H8RegDirOper(e_regs.REG_CCR + exr, 4, va),
h8_operands.H8RegDirOper(rd, tsize, va),
)
return (op, None, opers, iflags, 2)
def p_Rs_CCR(va, val, buf, off, tsize):
# ldc
iflags = 0
op = val >> 4
rs = val & 0xf
exr = op & 1
opers = (
h8_operands.H8RegDirOper(rs, tsize, va),
h8_operands.H8RegDirOper(e_regs.REG_CCR + exr, 4, va),
)
return (op, None, opers, iflags, 2)
def p_aAA8_Rd(va, val, buf, off, tsize):
# mov 0x2###
iflags = 0
op = val >> 12
Rd = (val >> 8) & 0xf
aAA8 = val & 0xff
opers = (
h8_operands.H8AbsAddrOper(aAA8, tsize, aasize=1),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_Rs_aAA8(va, val, buf, off, tsize):
# mov 0x3###
iflags = 0
op = val >> 12
Rs = (val >> 8) & 0xf
aAA8 = val & 0xff
opers = (
h8_operands.H8RegDirOper(Rs, tsize, va, 0),
h8_operands.H8AbsAddrOper(aAA8, tsize, aasize=1),
)
return (op, None, opers, iflags, 2)
def p_i2(va, val, buf, off, tsize):
# trapa
iflags = 0
op = 0x57
i2 = (val >> 4) & 0x3
opers = (
h8_operands.H8ImmOper(i2, tsize),
)
return (op, None, opers, iflags, 2)
def p_i3_Rd(va, val, buf, off, tsize):
# band, bclr, biand, bild, bior, bist, bixor, bld, bnot, bor, bset, bst, btst, bxor
iflags = 0
op = val >> 7
i3 = (val >> 4) & 0x7
Rd = val & 0xf
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_i3_aERd(va, val, buf, off, tsize):
# band, bclr, biand, bild, bior, bist, bixor, bld, bnot, bor, bset, bst, btst, bxor
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = ((((val >> 3) & 0xfff0) | (val & 0xf)) << 13) | ((val2 >> 3) & 0xfff0) | (val & 0xf)
i3 = (val2 >> 4) & 0x7
ERd = (val >> 4) & 0x7
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8RegIndirOper(ERd, tsize, va),
)
return (op, None, opers, iflags, 4)
def p_i3_aAA8(va, val, buf, off, tsize):
# band, bclr, biand, bild, bior, bist, bixor, bld, bnot, bor, bset, bst, btst, bxor
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = (val >> 16) | (val & 0xf) | (val2 >> 15) | (val & 0xf)
i3 = (val2 >> 4) & 0x7
aa = val & 0xff
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8AbsAddrOper(aa, tsize, aasize=1),
)
return (op, None, opers, iflags, 4)
def p_i8_CCR(va, val, buf, off, tsize, exr=0):
# andc
iflags = 0
op = val >> 8
i8 = val & 0xff
opers = (
h8_operands.H8ImmOper(i8, 1),
h8_operands.H8RegDirOper(e_regs.REG_CCR + exr, 4, 0),
)
return (op, None, opers, iflags, 2)
def p_i8_Rd(va, val, buf, off, tsize):
# add.b, addx, and.b, cmp.b
iflags = 0
op = val >> 4
i8 = val & 0xff
Rd = (val >> 8) & 0xf
opers = (
h8_operands.H8ImmOper(i8, 1),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_i16_Rd(va, val, buf, off, tsize):
# add.w, and.w, cmp.w
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = val >> 4
i16 = val2
Rd = val & 0xf
opers = (
h8_operands.H8ImmOper(i16, 2),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 4)
def p_i32_ERd(va, val, buf, off, tsize):
# add.l, and.l, cmp.l
val2, = struct.unpack('>I', buf[off+2: off+6])
iflags = 0
op = val >> 3
i32 = val2
ERd = val & 0x7
opers = (
h8_operands.H8ImmOper(i32, 4),
h8_operands.H8RegDirOper(ERd, tsize, va, 0),
)
return (op, None, opers, iflags, 6)
def p_Rd(va, val, buf, off, tsize):
# daa, das, dec.b, exts.w, extu.w, inc.b
iflags = 0
op = val >> 4
Rd = val & 0xf
opers = (
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_Rs_Rd(va, val, buf, off, tsize):
# add.b, add.w, addx, and.b, and.w, cmp.b, cmp.w, divxu.b
iflags = 0
op = val >> 16
Rs = (val >> 4) & 0xf
Rd = val & 0xf
opers = (
h8_operands.H8RegDirOper(Rs, tsize, va, 0),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_Rs_Rd_mul(va, val, buf, off, tsize):
iflags = 0
op = val >> 16
Rs = (val >> 4) & 0xf
Rd = val & 0xf
opers = (
h8_operands.H8RegDirOper(Rs, 1, va, 0),
h8_operands.H8RegDirOper(Rd, 2, va, 0),
)
return (op, None, opers, iflags, 2)
def p_Rs_Rd_4b(va, val, buf, off, tsize):
# divxs.b, mulxs.b
val2, = struct.unpack('>H', buf[off+2: off+4])
oszbit = (val2 >> 9) & 1
iflags = (e_const.IF_B, e_const.IF_W)[oszbit]
stsize, dtsize = ((1, 2), (2, 4))[oszbit]
op = (val << 8) | (val2 >> 8)
Rs = (val2 >> 4) & 0xf
Rd = val2 & 0xf
opers = (
h8_operands.H8RegDirOper(Rs, stsize, va, 0),
h8_operands.H8RegDirOper(Rd, dtsize, va, 0),
)
return (op, None, opers, iflags, 4)
def p_Rs_ERd(va, val, buf, off, tsize):
# mulxu.w, divxu.w
iflags = 0
op = ((val >> 8) << 1) | ((val >> 3) & 1)
Rs = (val >> 4) & 0xf
ERd = val & 0x7
# FIXME: make sure ER# and R# have correct metaregister values
opers = (
h8_operands.H8RegDirOper(Rs, tsize, va, 0),
h8_operands.H8RegDirOper(ERd, 4, va, 0),
)
return (op, None, opers, iflags, 2)
def p_ERs_ERd(va, val, buf, off, tsize):
# add.l, cmp.l
iflags = e_const.IF_L
op = ((val >> 6) & 0xfffe) | ((val >> 3) & 1) # first byte, and bits 3 and 7 of second byte
ERs = (val >> 4) & 0x7
ERd = val & 0x7
opers = (
h8_operands.H8RegDirOper(ERs, tsize, va, 0),
h8_operands.H8RegDirOper(ERd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_Rs_ERd_4b(va, val, buf, off, tsize):
# divxs.w
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = (val << 8) | (val2 >> 8)
Rs = (val2 >> 4) & 0xf
ERd = val2 & 0x7
opers = (
h8_operands.H8RegDirOper(Rs, tsize, va, 0),
h8_operands.H8RegDirOper(ERd, tsize, va, 0),
)
return (op, None, opers, iflags, 4)
def p_ERd(va, val, buf, off, tsize):
# exts.l, extu.l
iflags = 0
op = val >> 4
ERd = val & 0x7
opers = (h8_operands.H8RegDirOper(ERd, tsize, va, 0),)
return (op, None, opers, iflags, 2)
def p_Rn_Rd(va, val, buf, off, tsize):
# bclr, bset, btst
iflags = 0
op = val >> 8
Rn = (val >> 4) & 0xf
Rd = val & 0xf
opers = (
h8_operands.H8RegDirOper(Rn, tsize, va, 0),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_68_69_6e_6f(va, val, buf, off, tsize):
# mov 0x68, 0x69, 0x6e, 0x6f
iflags = 0
op = (val >> 7)
aERs = (val >> 4) & 0x7
Rd = (val) & 0xf
if (val & 0x600):
disp, = struct.unpack('>H', buf[off+2: off+4])
dispsz = 2
isz = 4
else:
disp, dispsz = 0, 0
isz = 2
if val & 0x80: # reverse operand order
opers = (
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
h8_operands.H8RegIndirOper(aERs, tsize, va, disp=disp, dispsz=dispsz, oflags=0),
)
else:
opers = (
h8_operands.H8RegIndirOper(aERs, tsize, va, disp=disp, dispsz=dispsz, oflags=0),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, None, opers, iflags, isz)
def p_Rn_aERd(va, val, buf, off, tsize):
# bclr, bset, btst
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = ((val >> 12) & 0xfff0) | (val & 0xf) | ((val2 >> 4) & 0xfff0) | (val2 & 0xf)
aERd = (val >> 4) & 0x7
Rn = (val2 >> 4) & 0xf
opers = (
h8_operands.H8RegDirOper(Rn, tsize, va, 0),
h8_operands.H8RegIndirOper(aERd, tsize, va, disp=0, oflags=0),
)
return (op, None, opers, iflags, 4)
def p_Rn_aAA8(va, val, buf, off, tsize):
# bclr, bset, btst
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = (val & 0xff00) | ((val2 >> 4) & 0xff0) | (val2 & 0xf)
Rn = (val2 >> 4) & 0xf
aAA8 = val & 0xff
opers = (
h8_operands.H8RegDirOper(Rn, tsize, va, 0),
h8_operands.H8AbsAddrOper(aAA8, tsize, aasize=1),
)
return (op, None, opers, iflags, 4)
def p_aERn(va, val, buf, off, tsize):
# jmp, jsr
iflags = 0
op = ((val >> 3) & 0xfff0) | (val & 0xf)
aERn = (val >> 4) & 0x7
opers = (
h8_operands.H8RegIndirOper(aERn, tsize, va, 0),
)
return (op, None, opers, iflags, 2)
def p_aAA24(va, val, buf, off, tsize):
# jmp, jsr
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = val >> 8
aAA24 = ((val & 0xf) << 16) | val2
opers = (
h8_operands.H8AbsAddrOper(aAA24, tsize, aasize=3),
)
return (op, None, opers, iflags, 4)
def p_aaAA8(va, val, buf, off, tsize):
# jmp, jsr
iflags = 0
op = val >> 8
aaAA8 = val & 0xff
opers = (
h8_operands.H8MemIndirOper(aaAA8),
)
return (op, None, opers, iflags, 2)
def p_disp8(va, val, buf, off, tsize):
# bcc, bsr
iflags = 0
op = val >> 8
disp8 = e_bits.signed(val & 0xfe, 1)
opers = (
h8_operands.H8PcOffsetOper(disp8, va, 1),
)
return (op, None, opers, iflags, 2)
def p_disp16(va, val, buf, off, tsize):
# bcc, bsr
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = val
disp16 = e_bits.signed(val2 & 0xfffffe, 2)
mnem = None
if (op & 0xf00 == 0x800):
opnibble = (val >> 4) & 0xf
mnem, iflags = bcc[opnibble]
opers = (
h8_operands.H8PcOffsetOper(disp16, va, 2),
)
return (op, mnem, opers, iflags, 4)
def p_Rs_aAA16(va, val, buf, off, tsize):
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = val >> 4
Rs = val & 0xf
aAA16 = val2
opers = (
h8_operands.H8RegDirOper(Rs, tsize, va),
h8_operands.H8AbsAddrOper(aAA16, tsize, aasize=2),
)
return (op, None, opers, iflags, 4)
def p_Rs_aAA24(va, val, buf, off, tsize):
val2, = struct.unpack('>I', buf[off+2: off+6])
iflags = 0
op = val >> 4
Rs = val & 0xf
aAA24 = val2 & 0xffffff
opers = (
h8_operands.H8RegDirOper(Rs, tsize, va),
h8_operands.H8AbsAddrOper(aAA24, tsize, aasize=4),
)
return (op, None, opers, iflags, 6)
def p_aAA16_Rd(va, val, buf, off, tsize):
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op = val >> 4
Rd = val & 0xf
aAA16 = val2
opers = (
h8_operands.H8AbsAddrOper(aAA16, tsize, aasize=2),
h8_operands.H8RegDirOper(Rd, tsize, va),
)
return (op, None, opers, iflags, 4)
def p_aAA24_Rd(va, val, buf, off, tsize):
val2, = struct.unpack('>I', buf[off+2: off+6])
iflags = 0
op = val >> 4
Rd = val & 0xf
aAA24 = val2 & 0xffffff
opers = (
h8_operands.H8AbsAddrOper(aAA24, tsize, aasize=4),
h8_operands.H8RegDirOper(Rd, tsize, va),
)
return (op, None, opers, iflags, 6)
def p_nooperands(va, val, buf, off, tsize):
# eepmov.b, eepmov.w,
iflags = 0
op = val
opers = tuple()
return (op, None, opers, iflags, 2)
# 60-67, 70-77, 7d, 7f, 7c, 7e (converge?)
bit_dbles = [
('bset', 0),
('bset', 0),
('bnot', 0),
('bnot', 0),
('bclr', 0),
('bclr', 0),
('btst', 0),
('bist', 0),
('bor', 0),
('bior', 0),
('bxor', 0),
('bixor', 0),
('band', 0),
('biand', 0),
('bst', 0),
('bist', 0),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
]
bit_dbles.extend(bit_dbles)
bit_dbles[0x2e] = ('bld', 0)
bit_dbles[0x2f] = ('bild', 0)
def getBitDbl_OpMnem(val, bitlist=bit_dbles):
op = val >> 7
mnem, flags = bitlist[(op & 0x3f)]
return op, mnem, flags
def p_Bit_Doubles(va, val, buf, off, tsize):
op, mnem, iflags = getBitDbl_OpMnem(val)
i3 = (val >> 4) & 0x7
Rd = val & 0xf
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8RegDirOper(Rd, tsize, va, 0),
)
return (op, mnem, opers, iflags, 2)
def p_01(va, val, buf, off, tsize):
mnem = None
iflags = 0
opers = None
diff = (val >> 4) & 0xf
if diff == 8:
# sleep
op = 0x0180
mnem = 'sleep'
opers = tuple()
return op, mnem, opers, iflags, 2
val2, = struct.unpack('>H', buf[off+2: off+4])
isz = 4
op = (val << 9) | (val2 >> 7)
if diff == 0:
mnem = 'mov'
# all 0100#### opcodes share these:
tsize = 4
iflags |= e_const.IF_L
d2 = val2 >> 8
# mov 0100##... where ## is basically another mov encoding with different register sizes
if d2 == 0x69:
erd = (val2 >> 4) & 7
ers = val2 & 7
if val2 & 0x80:
opers = (
h8_operands.H8RegDirOper(ers, tsize, va),
h8_operands.H8RegIndirOper(erd, tsize, va),
)
else:
opers = (
h8_operands.H8RegIndirOper(erd, tsize, va),
h8_operands.H8RegDirOper(ers, tsize, va),
)
elif d2 == 0x6b:
if val2 & 0x20:
isz = 8
val3, = struct.unpack('>I', buf[off+4:off+8])
if val2 & 0x80:
# a
erd = val2 & 7
aa = val3 & 0xffffffff
opers = (
h8_operands.H8RegDirOper(erd, tsize, va),
h8_operands.H8AbsAddrOper(aa, tsize, aasize=4),
)
else:
# 2
ers = val2 & 7
aa = val3 & 0xffffffff
opers = (
h8_operands.H8AbsAddrOper(aa, tsize, aasize=4),
h8_operands.H8RegDirOper(ers, tsize, va),
)
else:
val3, = struct.unpack('>H', buf[off+4:off+6])
isz = 6
if val2 & 0x80:
# 8
erd = val2 & 7
aa = val3 & 0xffff
opers = (
h8_operands.H8RegDirOper(erd, tsize, va),
h8_operands.H8AbsAddrOper(aa, tsize, aasize=2),
)
else:
# 0
ers = val2 & 7
aa = val3 & 0xffff
opers = (
h8_operands.H8AbsAddrOper(aa, tsize, aasize=2),
h8_operands.H8RegDirOper(ers, tsize, va),
)
elif d2 == 0x6d: # TODO: test me!!
newop, mnem, opers, iflags, nisz = p_6c_6d_0100(va, val2, buf, off+2, 4)
isz = nisz + 2
op = newop | (0x01000000)
elif d2 == 0x6f:
disp, = struct.unpack('>H', buf[off+4:off+6])
isz = 6
er0 = val2 & 7
er1 = (val2 >> 4) & 7
if val2 & 0x80:
# mov.l ERs, @(d:16,ERd)
opers = (
h8_operands.H8RegDirOper(er0, tsize, va),
h8_operands.H8RegIndirOper(er1, tsize, va, disp, dispsz=2),
)
else:
# mov.l @(d:16,ERs), ERd
opers = (
h8_operands.H8RegIndirOper(er1, tsize, va, disp, dispsz=2),
h8_operands.H8RegDirOper(er0, tsize, va),
)
elif d2 == 0x78:
isz = 10
val3, disp = struct.unpack('>HI', buf[off + 4:off + 10])
if val3 & 0xff20 != 0x6b20:
raise envi.InvalidInstruction(bytez=buf[off:off + 16], va=va)
er0 = val3 & 7
er1 = (val2 >> 4) & 7
if (val3 & 0x80):
# mov.l ERs, @(d:24,ERd)
opers = (
h8_operands.H8RegDirOper(er0, tsize, va),
h8_operands.H8RegIndirOper(er1, tsize, va, disp, dispsz=4),
)
else:
# mov.l @(d:24,ERs), ERd
opers = (
h8_operands.H8RegIndirOper(er1, tsize, va, disp, dispsz=4),
h8_operands.H8RegDirOper(er0, tsize, va),
)
elif diff in (1, 2, 3):
# ldm/stm (ERn-ERn+diff), @-SP
iflags = e_const.IF_L
tsize = 4
optest = val2 & 0xfff8
rn = val2 & 0x7
rcount = diff + 1
if optest == 0x6df0:
mnem = 'stm'
opers = (
h8_operands.H8RegMultiOper(rn, rcount),
h8_operands.H8RegIndirOper(e_const.REG_SP, tsize, va, 0, oflags=e_const.OF_PREDEC),
)
elif optest == 0x6d70:
mnem = 'ldm'
opers = (
h8_operands.H8RegIndirOper(e_const.REG_SP, tsize, va, 0, oflags=e_const.OF_POSTINC),
h8_operands.H8RegMultiOper(rn-diff, rcount),
)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
elif diff == 4:
# ldc/stc - anything that touches ccr or exr
# we'll build it for ldc, and reverse it if it's stc
d2 = val2 >> 8
isStc = (val2 >> 7) & 1
oflags = 0
tsize = 2
exr = val & 0x1
if d2 == 6:
op, nmnem, opers, iflags, nisz = p_i8_CCR(va, val2, buf, off, tsize, exr)
return op, 'andc', opers, iflags, isz
elif d2 == 5:
op, nmnem, opers, iflags, nisz = p_i8_CCR(va, val2, buf, off, tsize, exr)
return op, 'xorc', opers, iflags, isz
else:
iflags = e_const.IF_W
tsize = 2
if d2 == 0x04: # xx:8, EXR
op, nmnem, opers, iflags, nisz = p_i8_CCR(va, val2, buf, off, tsize, exr)
return op, 'orc', opers, iflags, isz
elif d2 == 0x07: # xx:8, EXR
op, nmnem, opers, niflags, nisz = p_i8_CCR(va, val2, buf, off, tsize, exr)
iflags = e_const.IF_B
return op, 'ldc', opers, iflags, isz
elif d2 in (0x69, 0x6d): # @ERs,CCR / @ERs+,CCR
if d2 == 0x6d:
oflags = e_const.OF_POSTINC
ers = (val2 >> 4) & 0x7
opers = (
h8_operands.H8RegIndirOper(ers, tsize, va, oflags=oflags),
h8_operands.H8RegDirOper(e_regs.REG_CCR + exr, 4, va)
)
elif d2 in (0x6f, 0x78): # @(d:16,ERs),CCR / @(d:24,ERs)
if d2 == 0x78:
val3, disp = struct.unpack('>HI', buf[off+4:off+10])
isStc = (val3 >> 7) & 1
isz = 10
dispsz = 4
else:
disp, = struct.unpack('>H', buf[off+4:off+6])
isz = 6
dispsz = 2
ers = (val2 >> 4) & 0x7
opers = (
h8_operands.H8RegIndirOper(ers, tsize, va, disp, dispsz),
h8_operands.H8RegDirOper(e_regs.REG_CCR + exr, 4, va)
)
elif d2 == 0x6b: # @aa:16,CCR / @aa:24,CCR
if val2 & 0x20:
aa, = struct.unpack('>I', buf[off+4:off+8])
isz = 8
aasize = 4
else:
aa, = struct.unpack('>H', buf[off+4:off+6])
isz = 6
aasize = 2
isStc = (val2 >> 7) & 1
opers = (
h8_operands.H8AbsAddrOper(aa, tsize, aasize),
h8_operands.H8RegDirOper(e_regs.REG_CCR + exr, 4, va)
)
# after all the decisions...
mnem = ('ldc', 'stc')[isStc]
if isStc:
opers = opers[::-1]
elif diff == 0xc:
if val2 & 0xfd00 == 0x5000:
# mulxs
mnem = 'mulxs'
op, nmnem, opers, iflags, nisz = p_Rs_Rd_4b(va, val, buf, off, tsize=1)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
elif diff == 0xd:
if val2 & 0xfd00 == 0x5100:
mnem = 'divxs'
# divxs
op, nmnem, opers, iflags, nisz = p_Rs_Rd_4b(va, val, buf, off, tsize)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
elif diff == 0xe:
if val2 & 0xff00 == 0x7b00:
mnem = 'tas' # FIXME: check out what this decodes to
tsize = 1
erd = (val2 >> 4) & 7
opers = (h8_operands.H8RegIndirOper(erd, tsize, va, oflags=0),)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
elif diff == 0xf:
if val2 & 0xfc00 == 0x6400:
# or/xor/and
nop, nmnem, opers, iflags, nisz = p_ERs_ERd(va, val2, buf, off, tsize=4)
op = (val << 8) | (val2 >> 8)
mnembits = (val2 >> 8) & 3
mnem = ('or', 'xor', 'and')[mnembits]
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
return (op, mnem, opers, iflags, isz)
def p_0a_1a(va, val, buf, off, tsize):
diff = (val >> 12)
if val & 0xf0 == 0:
mnem = ('inc', 'dec')[diff]
op, nmnem, opers, iflags, isz = p_Rd(va, val, buf, off, tsize=1)
iflags = e_const.IF_B
elif val & 0xf0 >= 0x80:
mnem = ('add', 'sub')[diff]
op, nmnem, opers, iflags, isz = p_ERs_ERd(va, val, buf, off, tsize=4)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
return (op, mnem, opers, iflags, isz)
data_0b = (
(4, 0, 1, 'adds'),
None,
None,
None,
None,
(2, e_const.IF_W, 1, 'inc'),
None,
(4, e_const.IF_L, 1, 'inc'),
(4, 0, 2, 'adds'),
(4, 0, 4, 'adds'),
None,
None,
None,
(2, e_const.IF_W, 2, 'inc'),
None,
(4, e_const.IF_L, 2, 'inc'),
)
data_1b = (
(4, 0, 1, 'subs'),
None,
None,
None,
None,
(2, e_const.IF_W, 1, 'dec'),
None,
(4, e_const.IF_L, 1, 'dec'),
(4, 0, 2, 'subs'),
(4, 0, 4, 'subs'),
None,
None,
None,
(2, e_const.IF_W, 2, 'dec'),
None,
(4, e_const.IF_L, 2, 'dec'),
)
def p_0b_1b(va, val, buf, off, tsize):
table = (data_0b, data_1b)[val >> 12]
diff = (val >> 4) & 0xf
tsize, iflags, imm, mnem = table[diff]
op = val >> 4
ERd = val & 0xf
opers = (
h8_operands.H8ImmOper(imm, tsize),
h8_operands.H8RegDirOper(ERd, tsize, va, 0),
)
return (op, mnem, opers, iflags, 2)
def p_0f_1f(va, val, buf, off, tsize):
aors = val >> 12
diff = val & 0xf0
if diff == 0:
op = val >> 4
mnem = ('daa', 'das')[aors]
iflags = 0
rd = val & 0xf
opers = (h8_operands. H8RegDirOper(rd, 1, va=va, oflags=0),)
elif diff >= 0x80:
mnem = ('mov', 'cmp')[aors]
op, nmnem, opers, iflags, isz = p_ERs_ERd(va, val, buf, off, tsize=4)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
return (op, mnem, opers, iflags, 2)
shift_info = []
for name in ('shll', 'shal', 'shlr', 'shar', 'rotxl', 'rotl', 'rotxr', 'rotr'):
shift_info.append((name, 1, 0))
shift_info.append((name, 2, 0))
shift_info.append(None)
shift_info.append((name, 4, 0))
shift_info.append((name, 1, 2))
shift_info.append((name, 2, 2))
shift_info.append(None)
shift_info.append((name, 4, 2))
for nothing in range(0x14, 0x17):
for xnothing in range(16):
shift_info.append(None)
for name1, name2 in (('not', 'extu'), ('neg', 'exts')):
shift_info.append((name1, 1, 0))
shift_info.append((name1, 2, 0))
shift_info.append(None)
shift_info.append((name1, 4, 0))
shift_info.append(None)
shift_info.append((name2, 2, 0))
shift_info.append(None)
shift_info.append((name2, 4, 0))
def p_shift_10_11_12_13_17(va, val, buf, off, tsize):
op = val >> 4
mnem, osz, xtra = shift_info[(val >> 4) & 0xff]
iflags = e_const.OSZ_FLAGS[osz]
# if 32bit (ERd), top bit should always be 0 anyway
rd = val & 0xf
if xtra:
opers = (
h8_operands.H8ImmOper(xtra, osz),
h8_operands.H8RegDirOper(rd, osz, va, 0),
)
else:
opers = (h8_operands.H8RegDirOper(rd, osz, va, 0),)
return (op, mnem, opers, iflags, 2)
def p_6A_6B(va, val, buf, off, tsize):
op = val >> 4
diff = op & 0xf
osz = 1 + ((val >> 8) & 1)
if op & 0x8:
# Rs, @aa:16/24
if diff == 0xa:
op, mnem, opers, iflags, isz = p_Rs_aAA24(va, val, buf, off, tsize)
iflags |= e_const.OSZ_FLAGS[osz]
return op, mnem, opers, iflags, isz
elif diff == 0x8:
op, mnem, opers, iflags, isz = p_Rs_aAA16(va, val, buf, off, tsize)
iflags |= e_const.OSZ_FLAGS[osz]
return op, mnem, opers, iflags, isz
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
else:
# @aa:16/24, Rd
if diff == 0x2:
op, mnem, opers, iflags, isz = p_aAA24_Rd(va, val, buf, off, tsize)
iflags |= e_const.OSZ_FLAGS[osz]
return op, mnem, opers, iflags, isz
elif diff == 0x0:
op, mnem, opers, iflags, isz = p_aAA16_Rd(va, val, buf, off, tsize)
iflags |= e_const.OSZ_FLAGS[osz]
return op, mnem, opers, iflags, isz
elif val in (0x6a10, 0x6a18, 0x6a30, 0x6a38):
# non-MOV instructions
isz, aasize, fmt = (None, (6, 2, '>HH'), None, (8, 4, '>IH'))[(val >> 4) & 3]
aa, val2 = struct.unpack(fmt, buf[off+2:off+isz])
op, mnem, niflags = getBitDbl_OpMnem(val2)
if val2 & 0x1c00:
i3 = (val2 >> 4) & 7
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8AbsAddrOper(aa, tsize, aasize),
)
else:
rn = (val2 >> 4) & 0xf
opers = (
h8_operands.H8RegDirOper(rn, tsize, va, ),
h8_operands.H8AbsAddrOper(aa, tsize, aasize),
)
return op, mnem, opers, 0, isz
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
def p_6c_6d_0100(va, val, buf, off, tsize):
op = val >> 7
iflags = e_const.OSZ_FLAGS[tsize]
isz = 2
mnem = None
er0 = val & 0xf
er1 = (val >> 4) & 7
if val & 0x80:
# mov ERs, @-ERd
if val & 0xf0 == 0xf0:
# push
mnem = 'push'
opers = (h8_operands.H8RegDirOper(er0, tsize, va),)
else:
# mov
mnem = 'mov'
opers = (
h8_operands.H8RegDirOper(er0, tsize, va),
h8_operands.H8RegIndirOper(er1, tsize, va, 0, oflags=e_const.OF_PREDEC),
)
else:
# mov @ERs+,ERd
if val & 0xf0 == 0x70:
# pop
mnem = 'pop'
opers = (h8_operands.H8RegDirOper(er0, tsize, va),)
else:
# mov
mnem = 'mov'
opers = (
h8_operands.H8RegIndirOper(er1, tsize, va, 0, oflags=e_const.OF_POSTINC),
h8_operands.H8RegDirOper(er0, tsize, va),
)
return (op, mnem, opers, iflags, isz)
def p_Mov_78(va, val, buf, off, tsize):
val2, val3_4 = struct.unpack('>HI', buf[off+2:off+8])
op = (val3_4 >> 24) | ((val2 & 0xfff0) << 4) | ((val & 0xff80) << (20 + 1)) | ((val & 0xf) << 20)
# FIXME: complex and ugly. do we even need these in this impl?
mnem = None
disp = val3_4 & 0xffffffff
# tsize is all over the map. must determine here.
tsz_opt = (val2 >> 8) & 1
tsize = (1, 2)[tsz_opt]
iflags = e_const.OSZ_FLAGS[tsize]
if (val2 & 0x80):
erd = (val >> 4) & 0x7
rs = val2 & 0xf
opers = (
h8_operands.H8RegDirOper(rs, tsize),
h8_operands.H8RegIndirOper(erd, tsize, va, disp=disp, dispsz=4, oflags=0),
)
else:
ers = (val >> 4) & 0x7
rd = val2 & 0xf
opers = (
h8_operands.H8RegIndirOper(ers, tsize, va, disp=disp, dispsz=4, oflags=0),
h8_operands.H8RegDirOper(rd, tsize),
)
return (op, mnem, opers, iflags, 8)
mnem_79a = (
'mov',
'add',
'cmp',
'sub',
'or',
'xor',
'and',
)
def p_79(va, val, buf, off, tsize):
op, m, opers, iflags, isz = p_i16_Rd(va, val, buf, off, tsize)
mnem = mnem_79a[(val >> 4) & 0xf]
return op, mnem, opers, iflags, isz
def p_7a(va, val, buf, off, tsize):
op, m, opers, iflags, isz = p_i32_ERd(va, val, buf, off, tsize)
mnem = mnem_79a[(val >> 4) & 0xf]
return op, mnem, opers, iflags, isz
def p_eepmov(va, val, buf, off, tsize):
val2, = struct.unpack('>H', buf[off+2: off+4])
op = (val << 8) | val2
# tsize = (1, 2)[(val >> 7) & 1]
diff = val & 0xff
if diff == 0x5c:
iflags = e_const.IF_B
elif diff == 0xd4:
iflags = e_const.IF_W
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
return op, None, (), iflags, 4
def p_7c(va, val, buf, off, tsize):
# btst, bor, bior, bxor, bixor, band, biand, bid, bild (erd)
val2, = struct.unpack('>H', buf[off+2: off+4])
iflags = 0
op, mnem, flags = getBitDbl_OpMnem(val2)
op |= ((val & 0xff80) << 9)
telltale = (val2 >> 8)
# FIXME: is any of this redundant with previous encodings?
if telltale == 0x63:
# btst (0x####63##
mnem = 'btst'
erd = (val >> 4) & 0x7
rn = (val2 >> 4) & 0xf
opers = (
h8_operands.H8RegDirOper(rn, tsize=tsize),
h8_operands.H8RegIndirOper(erd, tsize, va),
)
elif telltale == 0x73:
# btst (0x####73##
mnem = 'btst'
erd = (val >> 4) & 0x7
imm = (val2 >> 4) & 0x7
opers = (
h8_operands.H8ImmOper(imm, tsize),
h8_operands.H8RegIndirOper(erd, tsize, va),
)
elif 0x78 > telltale > 0x73:
# other bit-halves:
i3 = (val2 >> 4) & 0x7
erd = (val >> 4) & 0x7
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8RegIndirOper(erd, tsize, va),
)
return op, mnem, opers, iflags, 4
def p_7d(va, val, buf, off, tsize):
# bset, bnor, bclr, bst/bist
val2, = struct.unpack('>H', buf[off+2: off+4])
op, mnem, iflags = getBitDbl_OpMnem(val2)
op |= ((val & 0xff80) << 9)
erd = (val >> 4) & 0x7
immreg = (val2 >> 4) & 0x7
if val2 & 0x1c00:
opers = (
h8_operands.H8ImmOper(immreg, tsize),
h8_operands.H8RegIndirOper(erd, tsize, va),
)
else:
opers = (
h8_operands.H8RegDirOper(immreg, tsize, va),
h8_operands.H8RegIndirOper(erd, tsize, va),
)
return op, mnem, opers, iflags, 4
def p_7e(va, val, buf, off, tsize):
# btst, bor, bior, bxor, bixor, band, biand, bid, bild (erd)
val2, = struct.unpack('>H', buf[off+2: off+4])
op, mnem, iflags = getBitDbl_OpMnem(val2)
op |= ((val & 0xff80) << 9)
aa = val & 0xff
telltale = (val2 >> 8)
# FIXME: is any of this redundant with previous encodings?
if telltale == 0x63:
# btst (0x####63##
mnem = 'btst'
rn = (val2 >> 4) & 0xf
opers = (
h8_operands.H8RegDirOper(rn, tsize, va, 0),
h8_operands.H8AbsAddrOper(aa, tsize=tsize, aasize=1),
)
elif telltale == 0x73:
# btst (0x####73##
mnem = 'btst'
i3 = (val2 >> 4) & 0x7
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8AbsAddrOper(aa, tsize=tsize, aasize=1),
)
elif 0x78 > telltale > 0x73:
# other bit-halves:
tsize = 1
i3 = (val2 >> 4) & 0x7
opers = (
h8_operands.H8ImmOper(i3, tsize),
h8_operands.H8AbsAddrOper(aa, tsize=tsize, aasize=1),
)
else:
raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)
return op, mnem, opers, iflags, 4
def p_7f(va, val, buf, off, tsize):
# bset, bnor, bclr, bist, bst
val2, = struct.unpack('>H', buf[off+2: off+4])
op, mnem, iflags = getBitDbl_OpMnem(val2)
op |= ((val & 0xff00) << 8)
aa = val & 0xff
immreg = (val2 >> 4) & 0x7
if val2 & 0x1c00:
opers = (
h8_operands.H8ImmOper(immreg, tsize),
h8_operands.H8AbsAddrOper(aa, tsize, 1),
)
else:
opers = (
h8_operands.H8RegDirOper(immreg, tsize, va),
h8_operands.H8AbsAddrOper(aa, tsize, 1),
)
return op, mnem, opers, iflags, 4
'''
8DII add.b immediate 2 states
08SD add.b regdir 2 states
791DIIII add.w imm 4 states
09SD add.w regdir 2 states
7a1EIIII add.l imm 6 states
0aSD add.l regdir 2 states
0b0D adds #1, ERd 2 states
0b8D adds #2, ERd 2 states
0b9D adds #4, ERd 2 states
9DII addx #xx:8, Rd 2 states
0eSD addx Rs, Rd 2 states
eDII and.b #xx:8, Rd 2 states
16SD and.b Rs, Rd 2 states
796DIIII and.w #xx:16, Rd 4 states
66SD and.w Rs, Rd 2 states
7a6DIIIIIIII and.l #xx:32, ERd 6 states
01f066SD and.l Rs, ERd 4 states
06II andc #xx:8, CCR 2 states
76ID band #xx:3, Rd 2 states
7cD076I0 band #xx:3, @ERd 6 states
7eAb76I0 band #xx:3, @aa:8 6 states
4CDS bcc d:8 4 states
58C0DISP bcc d:16 6 states
'''
# table: ( subtable, mnem, decoder, tsize, iflags)
main_table = [(None, 'DECODE_ERROR', 0, 0, 0) for x in range(256)]
main_table[0x0] = (False, 'nop', None, 0, 0)
main_table[0x1] = (False, None, p_01, 0, 0)
main_table[0xa] = (False, None, p_0a_1a, 0, 0)
main_table[0xb] = (False, None, p_0b_1b, 0, 0)
main_table[0xf] = (False, None, p_0f_1f, 0, 0)
main_table[0x10] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x11] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x12] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x13] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x17] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x1a] = (False, None, p_0a_1a, 0, 0)
main_table[0x1b] = (False, None, p_0b_1b, 0, 0)
main_table[0x1f] = (False, None, p_0f_1f, 0, 0)
main_table[0x02] = (False, 'stc', p_CCR_Rd, 1, e_const.IF_B)
main_table[0x03] = (False, 'ldc', p_Rs_CCR, 1, e_const.IF_B)
main_table[0x04] = (False, 'orc', p_i8_CCR, 1, 0)
main_table[0x05] = (False, 'xorc', p_i8_CCR, 1, 0)
main_table[0x06] = (False, 'andc', p_i8_CCR, 1, 0)
main_table[0x07] = (False, 'ldc', p_i8_CCR, 1, e_const.IF_B)
main_table[0x08] = (False, 'add', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x09] = (False, 'add', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x0c] = (False, 'mov', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x0d] = (False, 'mov', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x0e] = (False, 'addx', p_Rs_Rd, 1, 0)
main_table[0x14] = (False, 'or', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x15] = (False, 'xor', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x16] = (False, 'and', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x18] = (False, 'sub', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x19] = (False, 'sub', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x1c] = (False, 'cmp', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x1d] = (False, 'cmp', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x1e] = (False, 'subx', p_Rs_Rd, 1, 0)
# mov.b set
for opbyte in range(0x20, 0x30):
main_table[opbyte] = (False, 'mov', p_aAA8_Rd, 1, e_const.IF_B)
for opbyte in range(0x30, 0x40):
main_table[opbyte] = (False, 'mov', p_Rs_aAA8, 1, e_const.IF_B)
# generate Bcc opcodes
for opbyte in range(16):
mnem, iflags = bcc[opbyte]
main_table[0x40 + opbyte] = (False, mnem, p_disp8, 1, iflags)
main_table[0x50] = (False, 'mulxu', p_Rs_Rd_mul, 1, e_const.IF_B)
main_table[0x51] = (False, 'divxu', p_Rs_Rd_mul, 1, e_const.IF_B)
main_table[0x52] = (False, 'mulxu', p_Rs_ERd, 2, e_const.IF_W)
main_table[0x53] = (False, 'divxu', p_Rs_ERd, 2, e_const.IF_W)
main_table[0x54] = (False, 'rts', None, 0, envi.IF_RET | envi.IF_NOFALL) # 5470
main_table[0x55] = (False, 'bsr', p_disp8, 0, envi.IF_CALL)
main_table[0x56] = (False, 'rte', None, 0, envi.IF_RET | envi.IF_NOFALL) # 5670
main_table[0x57] = (False, 'trapa', p_i2, 0, envi.IF_NOFALL)
main_table[0x58] = (False, 'error', p_disp16, 2, 0)
main_table[0x59] = (False, 'jmp', p_aERn, 3, envi.IF_BRANCH | envi.IF_NOFALL)
main_table[0x5a] = (False, 'jmp', p_aAA24, 0, envi.IF_BRANCH | envi.IF_NOFALL)
main_table[0x5b] = (False, 'jmp', p_aaAA8, 0, envi.IF_BRANCH | envi.IF_NOFALL)
main_table[0x5c] = (False, 'bsr', p_disp16, 0, envi.IF_CALL)
main_table[0x5d] = (False, 'jsr', p_aERn, 3, envi.IF_CALL)
main_table[0x5e] = (False, 'jsr', p_aAA24, 0, envi.IF_CALL)
main_table[0x5f] = (False, 'jsr', p_aaAA8, 0, envi.IF_CALL)
# all bit instructions are B. may set 0->1
main_table[0x60] = (False, 'bset', p_Rn_Rd, 1, 0)
main_table[0x70] = (False, 'bset', p_i3_Rd, 1, 0)
main_table[0x61] = (False, 'bnot', p_Rn_Rd, 1, 0)
main_table[0x71] = (False, 'bnot', p_i3_Rd, 1, 0)
main_table[0x62] = (False, 'bclr', p_Rn_Rd, 1, 0)
main_table[0x72] = (False, 'bclr', p_i3_Rd, 1, 0)
main_table[0x63] = (False, 'btst', p_Rn_Rd, 1, 0)
main_table[0x73] = (False, 'btst', p_i3_Rd, 1, 0)
main_table[0x64] = (False, 'or', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x65] = (False, 'xor', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x66] = (False, 'and', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x67] = (False, 'bitdoubles', p_Bit_Doubles, 1, 0)
main_table[0x68] = (False, 'mov', p_68_69_6e_6f, 1, e_const.IF_B)
main_table[0x69] = (False, 'mov', p_68_69_6e_6f, 2, e_const.IF_W)
main_table[0x6a] = (False, 'mov', p_6A_6B, 1, 0)
main_table[0x6b] = (False, 'mov', p_6A_6B, 2, e_const.IF_W)
# main_table[0x6c] = (False, 'mov', p_Mov_6C, 1, IF_B)
# main_table[0x6d] = (False, 'mov', p_Mov_6C, 2, IF_W)
main_table[0x6c] = (False, 'mov', p_6c_6d_0100, 1, e_const.IF_B)
main_table[0x6d] = (False, 'mov', p_6c_6d_0100, 2, e_const.IF_W)
main_table[0x6e] = (False, 'mov', p_68_69_6e_6f, 1, e_const.IF_B)
main_table[0x6f] = (False, 'mov', p_68_69_6e_6f, 2, e_const.IF_W)
for opbyte in range(0x74, 0x78):
main_table[opbyte] = (False, 'bitdoubles', p_Bit_Doubles, 1, 0)
main_table[0x78] = (False, 'mov', p_Mov_78, 1, 0)
main_table[0x79] = (False, 'p79', p_79, 2, e_const.IF_W)
main_table[0x7a] = (False, 'p7a', p_7a, 4, e_const.IF_L)
main_table[0x7b] = (False, 'eepmov', p_eepmov, 0, 0)
main_table[0x7c] = (False, '7Cmnem', p_7c, 1, 0)
main_table[0x7d] = (False, '7Dmnem', p_7d, 1, 0)
main_table[0x7e] = (False, '7Emnem', p_7e, 1, 0)
main_table[0x7f] = (False, '7Fmnem', p_7f, 1, 0)
for opbyte in range(0x80, 0x90):
main_table[opbyte] = (False, 'add', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0x90, 0xa0):
main_table[opbyte] = (False, 'addx', p_i8_Rd, 1, 0)
for opbyte in range(0xa0, 0xb0):
main_table[opbyte] = (False, 'cmp', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xb0, 0xc0):
main_table[opbyte] = (False, 'subx', p_i8_Rd, 1, 0)
for opbyte in range(0xc0, 0xd0):
main_table[opbyte] = (False, 'or', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xd0, 0xe0):
main_table[opbyte] = (False, 'xor', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xe0, 0xf0):
main_table[opbyte] = (False, 'and', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xf0, 0x100):
main_table[opbyte] = (False, 'mov', p_i8_Rd, 1, e_const.IF_B)
|
11575444
|
import pytest
@pytest.fixture(scope="session", autouse=True)
def setupsuite():
print("STARTING TESTS")
yield
print("FINISHED TESTS")
@pytest.fixture
def random_number_generator():
import random
def _number_provider():
return random.choice(range(10))
yield _number_provider
|
11575475
|
import numpy
def main():
print('''numpy:
- {}'''.format(numpy.__version__))
if __name__ == "__main__":
main()
|
11575531
|
import os
from argparse import Namespace
from copy import copy
from pathlib import Path
from luscious_dl.logger import logger_file_handler, logger
from luscious_dl.parser import is_a_valid_integer
from luscious_dl.start import start
from luscious_dl.utils import cls, create_default_files, open_config_menu, get_config_data, read_list, info, \
ListFilesManager, inputs_string_to_list
def list_txt_organizer(items: list[str], prefix: str) -> None:
"""
Remove from list.txt and then add to list_completed.txt
:param items: List of urls or ids
:param prefix: album/user
"""
for item in items:
ListFilesManager.remove(item)
ListFilesManager.add(f'{prefix}-{int(item)}' if is_a_valid_integer(item) else item)
def menu() -> None:
"""Menu"""
info()
create_default_files()
logger_file_handler()
configs = get_config_data()
base_namespace = Namespace(
output_dir=Path(os.path.normcase(configs.get('directory', './albums/'))).resolve(),
threads=configs.get('pool', os.cpu_count() or 1),
retries=configs.get('retries', 5),
timeout=configs.get('timeout', 30),
delay=configs.get('delay', 0),
foldername_format=configs.get('foldername_format', '%t'),
gen_pdf=configs.get('gen_pdf', False),
rm_origin_dir=configs.get('rm_origin_dir', False),
album_inputs=None, user_inputs=None, only_favorites=False,
keyword=None, search_download=False, sorting='date_trending', page=1, max_pages=1)
while True:
option = input('Options:\n'
'1 - Download albums by URL or ID.\n'
'2 - Download all user albums.\n'
'3 - Download all user favorites.\n'
'4 - Search albums by keyword.\n'
'5 - Download albums from list.txt.\n'
'6 - Settings.\n'
'0 - Exit.\n'
'> ')
if option in ('1', '2', '3'):
inputs = input('\n0 - Back.\n'
f'Enter {"album" if option == "1" else "user"} URL or ID.\n> ')
cls()
if inputs != '0':
args = copy(base_namespace)
args.album_inputs = inputs if option == '1' else None
args.user_inputs = inputs if option in ('2', '3') else None
args.only_favorites = option == '3'
start(args)
list_txt_organizer(inputs_string_to_list(inputs), 'album' if option == '1' else 'user')
logger.log(5, 'URLs/IDs added to completed list.')
elif option == '4':
keyword = input('Enter keyword\n> ')
if not keyword:
print('Please enter a keyword.\n')
return
page = input('Enter starting page number or leave blank\n> ')
page = int(page) if is_a_valid_integer(page) else 1
max_pages = input('Enter max page or leave blank\n> ')
max_pages = int(max_pages) if is_a_valid_integer(max_pages) else 1
search_download = input('Download search results? ("Y/N") ').strip() in 'yY'
args = copy(base_namespace)
args.keyword = keyword
args.search_download = search_download
args.page = page
args.max_pages = max_pages
start(args)
elif option == '5':
list_txt = list(set(read_list()))
args = copy(base_namespace)
args.album_inputs = ','.join(list_txt)
start(args)
list_txt_organizer(list_txt, 'album')
logger.log(5, 'URLs/IDs added to completed list.')
elif option == '6':
open_config_menu()
elif option == '0':
exit()
else:
print('Invalid Option.\n')
if __name__ == '__main__':
menu()
|
11575542
|
import logging
import json
import datetime
from datetime import timedelta
import decimal
import boto3
from queue import Queue
from queue import Empty
from transfixed import gainfixtrader as gain
import base64
import hmac
import hashlib
import os
import smtplib
import atexit
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from botocore.exceptions import ClientError
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class LambdaTrader(object):
__metaclass__ = Singleton
def __init__(self, logger):
self.Logger = logger
self.CurrentPositions = Queue()
self.CurrentBalance = Queue()
self.SubmittedOrders = Queue()
self.Messages = []
self.PendingOrders = Queue()
db = boto3.resource('dynamodb', region_name='us-east-1')
self.__Securities = db.Table('Securities')
self.__Orders = db.Table('Orders')
self.FixClient = gain.FixClient.Create(self.Logger, 'config.ini', False)
self.FixClient.addOrderListener(self.OrderNotificationReceived)
self.FixClient.addAccountInquiryListener(self.AccountInquiryReceived)
def AccountInquiryReceived(self, event):
if event.AccountInquiry == gain.AccountInquiry.CollateralInquiry:
self.Logger.info('CollInquiryID: %s Account: %s' % (event.CollInquiryID, event.Account))
self.Logger.info('Balance: %s Currency: %s' % (event.Balance, event.Currency))
self.CurrentBalance.put((event.CollInquiryID, event.Balance, event.Currency))
self.CurrentBalance.task_done()
if event.AccountInquiry == gain.AccountInquiry.RequestForPositions:
self.Logger.info('PosReqID: %s Account: %s' % (event.PosReqID, event.Account))
self.Logger.info('Quantity: %s Amount: %s' % (event.LongQty - event.ShortQty, event.PosAmt))
self.CurrentPositions.put((event.PosReqID, event.Symbol, event.Maturity, event.LongQty - event.ShortQty))
self.CurrentPositions.task_done()
def OrderNotificationReceived(self, event):
self.Logger.info('OrderId: %s Status: %s Side: %s' % (event.ClientOrderId, event.Status, event.Side))
self.Logger.info('Symbol: %s AvgPx: %s Quantity: %s' % (event.Symbol, event.AvgPx, event.Quantity))
self.Logger.info('order notification received')
if event.Status == gain.OrderStatus.Filled or event.Status == gain.OrderStatus.Rejected:
self.SubmittedOrders.put((event.ClientOrderId, event.Status, event.AvgPx))
self.SubmittedOrders.task_done()
def SendOrder(self, side, quantity, symbol, maturity, newOrderId, transactionTime):
self.Logger.info('Submitting Validated order %s %s %s %s' % (side, quantity, symbol, maturity))
if side.upper() == gain.OrderSide.Buy.upper():
order = gain.BuyFutureMarketOrder(symbol, maturity, quantity)
elif side.upper() == gain.OrderSide.Sell.upper():
order = gain.SellFutureMarketOrder(symbol, maturity, quantity)
trade = self.FixClient.send(order)
orderId, status, price = self.SubmittedOrders.get(True, 5)
while trade.OrderId != orderId:
self.Logger.error('requests do not match orderId: %s, trade.OrderId: %s' % (orderId, trade.OrderId))
orderId, status, price = self.SubmittedOrders.get(True, 5)
self.Logger.info('Confirmed orderId %s. Status: %s. Price: %s. Symbol: %s' % (orderId, status, price, symbol))
self.UpdateStatus('Confirmed newOrderId: %s. ClientOrderId: %s. Status: %s. Side: %s. Qty: %s. Symbol: %s. '
'Maturity: %s. Price: %s'
% (newOrderId, orderId, status, side, quantity, symbol, maturity, price),
newOrderId, transactionTime, orderId, status)
def UpdateStatus(self, text, newOrderId, transactionTime, clientOrderId, status):
try:
response = self.__Orders.update_item(
Key={
'NewOrderId': newOrderId['S'],
'TransactionTime': transactionTime['S'],
},
UpdateExpression="set #s = :s, ClientOrderId = :c",
ConditionExpression="#s = :p and NewOrderId = :n",
ExpressionAttributeNames={
'#s': 'Status'
},
ExpressionAttributeValues={
':s': status,
':c': clientOrderId,
':n': newOrderId['S'],
':p': 'PENDING'
},
ReturnValues="UPDATED_NEW")
text += '. %s' % response['Attributes']
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
text += '%s. %s' % ('', e.response['Error']['Message'])
except Exception as e:
self.Logger.error(e)
text += '%s. %s' % ('', e)
else:
text += ". UpdateItem succeeded."
self.Logger.info(json.dumps(response, indent=4, cls=DecimalEncoder))
self.Logger.info('To Send Email: %s', text)
self.Messages.append(text)
def SendReport(self, text):
try:
self.Logger.info('Send Email: %s', text)
def hash_smtp_pass_from_secret_key(key):
message = "SendRawEmail"
version = '\x02'
h = hmac.new(key, message, digestmod=hashlib.sha256)
return base64.b64encode("{0}{1}".format(version, h.digest()))
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Lambda FIX Trader report'
msg['From'] = os.environ['email_address']
msg['To'] = os.environ['email_address']
mime_text = MIMEText(text, 'html')
msg.attach(mime_text)
server = smtplib.SMTP('email-smtp.us-east-1.amazonaws.com', 587, timeout=10)
server.set_debuglevel(10)
server.starttls()
server.ehlo()
server.login(os.environ['aws_access_key_id'],
hash_smtp_pass_from_secret_key(os.environ['aws_secret_access_key']))
server.sendmail(os.environ['email_address'], os.environ['email_address'], msg.as_string())
res = server.quit()
self.Logger.info(res)
except Exception as e:
self.Logger.error(e)
def Run(self):
if not self.FixClient.SocketInitiator.application.connected:
self.FixClient.start()
if not self.PendingOrders.empty():
self.validate()
report = reduce(lambda x, y: x + y, map(lambda x, y: '<br><b>%s</b>. %s\n' % (x + 1, y),
range(len(self.Messages)), self.Messages))
self.SendReport(report)
self.Messages = []
def validate_order(self, order, security):
try:
side = str(order['Details']['M']['Side']['S'])
ordType = str(order['Details']['M']['OrdType']['S'])
riskFactor = float(security['Risk']['RiskFactor'])
margin = int(security['Risk']['Margin']['Amount'])
marginCcy = str(security['Risk']['Margin']['Currency'])
colReqId = self.FixClient.collateralInquiry()
receiveColReqId, balance, ccy = self.CurrentBalance.get(True, 5)
while colReqId != receiveColReqId:
self.Logger.error('requests do not match colReqId: %s, receiveColReqId: %s' % (colReqId, receiveColReqId))
receiveColReqId, balance, ccy = self.CurrentBalance.get(True, 5)
if marginCcy != ccy:
raise Exception('Margin Currency does not match Balance Currency for %s' % security['Symbol'])
if balance * riskFactor < margin:
raise Exception('Margin exceeded for %s. Balance: %s, RF: %s, Margin: %s'
% (security['Symbol'], balance, riskFactor, margin))
except Exception as e:
self.Logger.error(e)
self.UpdateStatus('Error validate_order NewOrderId: %s. %s' % (order['NewOrderId'], e),
order['NewOrderId'], order['TransactionTime'], 0, 'INVALID')
return False, None
else:
if ordType.upper() != gain.OrderType.Market.upper():
supported = 'Only MARKET Orders are supported'
self.Logger.error(supported)
self.UpdateStatus('Error validate_order NewOrderId: %s. %s' % (order['NewOrderId'], supported),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return False, None
if side.upper() == gain.OrderSide.Buy.upper() or side.upper() == gain.OrderSide.Sell.upper():
return True, side
else:
error = 'Unknown side received. Side: %s' % side
self.Logger.error(error)
self.UpdateStatus('Error validate_order NewOrderId: %s. %s' % (order['NewOrderId'], error),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return False, None
def validate_quantity(self, order, security):
otherPositionsInSecurity = False
try:
quantity = int(order['Details']['M']['Quantity']['N'])
side = order['Details']['M']['Side']['S']
symbol = order['Details']['M']['Symbol']['S']
maturity = order['Details']['M']['Maturity']['S']
maxPosition = security['Risk']['MaxPosition']
reqId = self.FixClient.requestForPositions()
receiveReqId, receivedSymbol, receivedMaturity, position = self.CurrentPositions.get(True, 5)
while reqId != receiveReqId or symbol != receivedSymbol or maturity != receivedMaturity:
otherPositionsInSecurity = True
self.Logger.error('requests do not match reqId: %s, receivedId: %s, maturity: %s, receivedMaturity: %s'
% (reqId, receiveReqId, maturity, receivedMaturity))
receiveReqId, receivedSymbol, receivedMaturity, position = self.CurrentPositions.get(True, 5)
if side.upper() == gain.OrderSide.Buy.upper() and maxPosition < position + quantity:
raise Exception('MaxPosition exceeded for %s' % security['Symbol'])
if side.upper()== gain.OrderSide.Sell.upper() and maxPosition < abs(position - quantity):
raise Exception('MaxPosition exceeded for %s' % security['Symbol'])
except Empty:
error = 'No reply to requestForPositions'
self.Logger.error(error)
if otherPositionsInSecurity:
self.Logger.error('Gain Futures does not send a reply to requestForPositions if position is 0 and there is'
'a position in other maturity in this contract')
if maxPosition < quantity:
self.UpdateStatus('Error validate_quantity NewOrderId: %s. %s' % (order['NewOrderId'], error+
'. MaxPosition exceeded for %s' %
security['Symbol']),
order['NewOrderId'], order['TransactionTime'], 0, 'INVALID')
return 0
else:
return quantity
else:
self.UpdateStatus('Error validate_quantity NewOrderId: %s. %s' % (order['NewOrderId'], error),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return 0
except Exception as e:
self.Logger.error(e)
self.UpdateStatus('Error validate_quantity NewOrderId: %s. %s' % (order['NewOrderId'], e),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return 0
else:
return quantity
def validate_maturity(self, order):
try:
maturity = order['Details']['M']['Maturity']['S']
year = int(maturity[:4])
month = int(maturity[-2:])
date = datetime.date(year, month, 1)
expiry = self.get_expiry_date(date)
if expiry <= datetime.date.today() + timedelta(days=1):
raise Exception('%s maturity date has expired' % expiry)
except Exception as e:
self.Logger.error(e)
self.UpdateStatus('Error validate_maturity NewOrderId: %s. %s' % (order['NewOrderId'], e),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return None
else:
return maturity
def validate_symbol(self, order):
try:
symbol = order['Details']['M']['Symbol']['S']
self.Logger.info('Validating %s' % symbol)
response = self.__Securities.get_item(
Key={
'Symbol': symbol
}
)
except ClientError as e:
self.Logger.error(e.response['Error']['Message'])
self.UpdateStatus('ClientError validate_symbol NewOrderId: %s. %s' % (order['NewOrderId'], e),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return False, None
except Exception as e:
self.Logger.error(e)
self.UpdateStatus('Error validate_symbol NewOrderId: %s. %s' % (order['NewOrderId'], e),
order['NewOrderId'],order['TransactionTime'], 0, 'INVALID')
return False, None
else:
# self.Logger.info(json.dumps(security, indent=4, cls=DecimalEncoder))
if response.has_key('Item') and response['Item']['Symbol'] == symbol and response['Item']['TradingEnabled']:
return True, response['Item']
self.UpdateStatus('Symbol is unknown or not enabled for trading %s' % symbol,
order['NewOrderId'], order['TransactionTime'], 0, 'INVALID')
return False, None
def validate(self):
while not self.PendingOrders.empty():
order = self.PendingOrders.get()
found, security = self.validate_symbol(order)
if not found: continue
maturity = self.validate_maturity(order)
if not maturity: continue
quantity = self.validate_quantity(order, security)
if quantity < 1: continue
good, side = self.validate_order(order, security)
if not good: continue
self.SendOrder(str(side), int(quantity), str(security['Symbol']), str(maturity),
order['NewOrderId'], order['TransactionTime'])
# lifted from https://github.com/conor10/examples/blob/master/python/expiries/vix.py
@staticmethod
def get_expiry_date(date):
"""
http://cfe.cboe.com/products/spec_vix.aspx
TERMINATION OF TRADING:
Trading hours for expiring VIX futures contracts end at 7:00 a.m. Chicago
time on the final settlement date.
FINAL SETTLEMENT DATE:
The Wednesday that is thirty days prior to the third Friday of the
calendar month immediately following the month in which the contract
expires ("Final Settlement Date"). If the third Friday of the month
subsequent to expiration of the applicable VIX futures contract is a
CBOE holiday, the Final Settlement Date for the contract shall be thirty
days prior to the CBOE business day immediately preceding that Friday.
"""
# Date of third friday of the following month
if date.month == 12:
third_friday_next_month = datetime.date(date.year + 1, 1, 15)
else:
third_friday_next_month = datetime.date(date.year,
date.month + 1, 15)
one_day = datetime.timedelta(days=1)
thirty_days = datetime.timedelta(days=30)
while third_friday_next_month.weekday() != 4:
# Using += results in a timedelta object
third_friday_next_month = third_friday_next_month + one_day
# TODO: Incorporate check that it's a trading day, if so move the 3rd
# Friday back by one day before subtracting
return third_friday_next_month - thirty_days
trader = None
@atexit.register
def lambda_exit():
if trader is not None:
trader.Logger.info('lambda_exit is called')
trader.FixClient.stop()
def main(event, context):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(threadName)s - %(message)s')
logger.info('event %s' % event)
logger.info('context %s' % context)
response = {'State':'OK'}
try:
logger.info('Start fix trader')
global trader
trader = LambdaTrader(logger)
for record in event['Records']:
if record['eventName'] == 'INSERT':
logger.info('New Order received NewOrderId: %s', record['dynamodb']['Keys']['NewOrderId'])
trader.PendingOrders.put_nowait(record['dynamodb']['NewImage'])
else:
logger.info('Not INSERT event is ignored')
if not trader.PendingOrders.empty():
trader.Run()
logger.info('Stop fix trader')
except Exception as e:
logger.error(e)
response['State']='ERROR'
return response
def lambda_handler(event, context):
res = main(event, context)
return json.dumps(res)
if __name__ == '__main__':
with open("event.json") as json_file:
test_event = json.load(json_file, parse_float=decimal.Decimal)
re = main(test_event, None)
re = main(test_event, None)
print(json.dumps(re))
|
11575555
|
from __future__ import absolute_import, division, print_function
class DBError(Exception):
pass
|
11575572
|
description = 'setup for the status HTML monitor'
group = 'special'
_expcolumn = Column(
Block('Experiment', [
BlockRow(
Field(name='Current status', key='exp/action', width=40,
istext=True, maxlen=40),
Field(name='Data file', key='exp/lastpoint'))]),
)
_sampletable = Column(
Block('Sample table', [
BlockRow(Field(dev='omgs')),
BlockRow(Field(dev='tths')),
],
),
)
_instrument = Column(
Block('Instrument', [
BlockRow(Field(dev='wav')),
BlockRow(Field(dev='slits')),
BlockRow(Field(dev='mon'),
Field(name='Resosteps', key='adet/resosteps'),
Field(name='Step', key='adet/value[0]'),),
],
),
)
_frm = Column(
Block('FRM II', [
BlockRow(Field(dev='ReactorPower',)),
],
),
)
_htf = Column(
Block('HTF', [
BlockRow(Field(dev='T'),
Field(name='Power', key='T/heaterpower'),)
],
setups='htf*',
),
)
_magnet = Column(
Block('Magnet', [
BlockRow(Field(dev='B')),
],
setups='ccm*',
),
)
_sc = Column(
Block('Sample Changer', [
BlockRow(Field(dev='sams'),),
],
setups='samplechanger',
),
)
_e = Column(
Block('E field', [
BlockRow(Field(dev='E')),
],
setups='efield',
),
)
_tension = Column(
Block('Tension rack', [
BlockRow(Field(dev='teload'),
Field(dev='tepos'),
Field(dev='teext'),
Field(dev='topos'),
Field(dev='tomom'),),
],
setups='tensile',
),
)
devices = dict(
Monitor = device('nicos.services.monitor.html.Monitor',
title = 'SPODI status monitor',
loglevel = 'info',
interval = 10,
filename = 'webroot/index.html',
cache = 'localhost',
font = 'Luxi Sans',
valuefont = 'Consolas',
prefix = 'nicos/',
padding = 0,
fontsize = 24,
layout = [
Row(_expcolumn),
Row(_frm, _instrument, _sampletable),
Row(_htf,),
Row(_tension),
Row(_magnet, _e,),
Row(_sc),
],
),
)
|
11575596
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dp = [0]*(len(s)+1)
dp[0] = 1
dic = set(wordDict)
for j in range(1,len(s)+1):
for i in range(j):
if dp[i] == 1 and s[i:j] in dic:
dp[j] = 1
return dp[-1]
# Recursive
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dic= set(wordDict)
visited = {}
return self.helper(s, dic, visited )
def helper(self, s, dic, visited):
if s in visited:
return visited[s]
if not s:
return True
for i in range(len(s)+1):
if s[:i] in dic:
if self.helper(s[i:], dic, visited):
visited[s] = True
return visited[s]
visited[s] = False
return False
|
11575599
|
def counting_sort(l):
largest = 0
for num in l:
if num < 0:
return []
elif num > largest:
largest = num
tracker = [0 for i in range(largest + 1)]
final = [0 for i in range(len(l))]
for num in l:
tracker[num] += 1
for i in range(len(tracker)):
if i > 0:
tracker[i] += tracker[i - 1]
for num in l:
if tracker[num] > 0:
final[tracker[num] - 1] = num
tracker[num] -= 1
return final
|
11575610
|
from django.core.management.base import BaseCommand
from waldur_mastermind.marketplace.models import (
Attribute,
AttributeOption,
Category,
Section,
)
def get_category_prefix(category):
if category.sections.exists():
# if at least one section exist, take its sections first prefix as category prefix
return category.sections.first().key.split('_')[0]
else:
# cleanup whitespaces from the title
return category.title.strip().replace(' ', '')
class Command(BaseCommand):
help = 'Copy structure of categories for the Marketplace'
def add_arguments(self, parser):
parser.add_argument(
'source_category_uuid',
nargs=1,
type=str,
help='UUID of a category to copy metadata from',
)
parser.add_argument(
'target_category_uuid',
nargs=1,
type=str,
help='UUID of a category to copy metadata to',
)
def handle(self, *args, **options):
source_category_uuid = options['source_category_uuid'][0]
target_category_uuid = options['target_category_uuid'][0]
try:
source_category = Category.objects.get(uuid=source_category_uuid)
except Category.DoesNotExist:
self.stdout.write(
self.style.ERROR(
'Source category %s was not found.' % source_category_uuid
)
)
exit(1)
try:
target_category = Category.objects.get(uuid=target_category_uuid)
except Category.DoesNotExist:
self.stdout.write(
self.style.ERROR(
'Target category %s was not found.' % source_category_uuid
)
)
exit(1)
source_prefix = get_category_prefix(source_category)
target_prefix = get_category_prefix(target_category)
# Copy metadata
for source_section in source_category.sections.all():
section_source_prefix = source_section.key.split('_')[0]
# assert that convention is respected
if section_source_prefix != source_prefix:
self.stdout.write(
self.style.ERROR(
'Prefixes mismatch: %s (from category) and %s (from section)'
% (source_prefix, section_source_prefix)
)
)
section_prefix = '_'.join(
[target_prefix] + source_section.key.split('_')[1:]
)
target_section, _ = Section.objects.get_or_create(
key=section_prefix,
title=source_section.title,
category=target_category,
is_standalone=source_section.is_standalone,
)
# copy attributes
for source_attribute in source_section.attributes.all():
attribute_target_key = (
target_prefix
+ source_attribute.key[
source_attribute.key.find(source_prefix) + len(source_prefix) :
]
)
attr, _ = Attribute.objects.get_or_create(
key=attribute_target_key,
title=source_attribute.title,
type=source_attribute.type,
section=target_section,
)
for source_option in source_attribute.options.all():
option_target_key = (
target_prefix
+ source_option.key[
source_option.key.find(source_prefix) + len(source_prefix) :
]
)
AttributeOption.objects.get_or_create(
attribute=attr,
key=option_target_key,
title=source_option.title,
)
self.stdout.write(
self.style.SUCCESS(
'Target category %s was successfully populated.' % target_category
)
)
|
11575659
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Test module used to create some initial site data for experimentation and manual testing
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import unittest
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
import annalist
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordview import RecordView
from annalist.models.recordlist import RecordList
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitydata import EntityData
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
test_layout,
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
copySitedata,
init_annalist_test_site, init_annalist_test_coll, resetSitedata
)
from .entity_testutils import collection_create_values
from .entity_testtypedata import recordtype_create_values
from .entity_testviewdata import recordview_create_values
from .entity_testlistdata import recordlist_create_values
# -----------------------------------------------------------------------------
#
# Helper functions
#
# -----------------------------------------------------------------------------
def entitydata_create_values(coll, etype, entity_id, update="Entity"):
"""
Data used when creating entity test data
"""
return (
{ 'rdfs:label': '%s %s/%s/%s'%(update, coll._entityid, etype._entityid, entity_id)
, 'rdfs:comment': '%s coll %s, type %s, entity %s'%(update, coll._entityid, etype._entityid, entity_id)
})
def site_create_data(site_base_uri, target_subdir):
"""
Create site data in `target_subdir`...
@@NOTE: due to the way EntityRoot is defenfensively coded, all test data is created
under settings.BASE_SITE_DIR, and the supplied 'target_subdir' parameter is ignored.
"""
# target_dir = os.path.join(settings.SAMPLEDATA_DIR, target_subdir)
target_dir = settings.BASE_SITE_DIR
site = Site.initialize_site_data(
site_base_uri, target_dir,
settings.SITE_SRC_ROOT + "/annalist/data/sitedata",
label="Annalist data notebook test site",
description="Annalist test site metadata and site-wide values."
)
return site
def coll123_create_data(site):
coll1 = Collection.create(site, "coll1", collection_create_values("coll1"))
coll2 = Collection.create(site, "coll2", collection_create_values("coll2"))
coll3 = Collection.create(site, "coll3", collection_create_values("coll3"))
#
for coll in [coll1, coll2, coll3]:
type1 = RecordType.create(coll, "type1", recordtype_create_values(coll._entityid, "type1"))
view1 = RecordView.create(coll, "view1", recordview_create_values(coll._entityid, "view1"))
list1 = RecordList.create(coll, "list1", recordlist_create_values(coll._entityid, "list1"))
data1 = RecordTypeData.create(coll, "type1", {})
type2 = RecordType.create(coll, "type2", recordtype_create_values(coll._entityid, "type2"))
view2 = RecordView.create(coll, "view2", recordview_create_values(coll._entityid, "view2"))
list2 = RecordList.create(coll, "list2", recordlist_create_values(coll._entityid, "list2"))
data2 = RecordTypeData.create(coll, "type2", {})
#
for t,d in [(type1,data1),(type2,data2)]:
for eid in ["entity1", "entity2", "entity3"]:
e = EntityData.create(d, eid, entitydata_create_values(coll,t,eid))
return
def collbib_create_data(site):
bibcoll = Site.initialize_bib_data(site,
settings.SITE_SRC_ROOT+"/annalist/data/bibdata",
# label="Bibliographic definitions",
# description="Bibliographic definitions for testing"
)
return
# -----------------------------------------------------------------------------
#
# CreateSiteData
#
# -----------------------------------------------------------------------------
class CreateSiteData(AnnalistTestCase):
"""
Tests for Site object interface
"""
def setUp(self):
return
def tearDown(self):
return
# -----------------------------------------------------------------------------
# Create site data
# -----------------------------------------------------------------------------
def make_CreateDevelSiteData(self):
# Note: copysitedata copies also copies from source tree
# def copySitedata(src, sitedatasrc, tgt):
copySitedata(
settings.SITE_SRC_ROOT+"/devel/"+test_layout.SITE_DIR,
settings.SITE_SRC_ROOT+"/annalist/data/sitedata",
TestBaseDir)
# Use localhost base URI for devel site
develsite = site_create_data("http://localhost:8000/annalist/", "devel")
coll123_create_data(develsite)
return
def test_CreateTestSiteData(self):
testsite = site_create_data(TestBaseUri, "testinit")
coll123_create_data(testsite)
return
#@@
# def test_CreateBibTestSiteData(self):
# testsite = site_create_data(TestBaseUri, "bibtestinit")
# collbib_create_data(testsite)
# coll123_create_data(testsite)
# return
#@@
def test_CreateEmptySiteData(self):
emptysite = site_create_data(TestBaseUri, "empty")
return
# End.
|
11575678
|
from bisect import bisect_left
class Solution:
def maxEnvelopes(self, envelopes: List[List[int]]) -> int:
envelopes.sort(key=lambda x: [x[0], -x[1]])
def lcs(nums):
cells = []
for idx, val in enumerate(nums):
if not cells or val > cells[-1]:
cells.append(val)
continue
cells[bisect_left(cells, val)] = val
return len(cells)
nums = [_[1] for _ in envelopes]
return lcs(nums)
|
11575680
|
from relay_ftdi import *
import time
import sys
RELEASE_TIME = .5
ON_TIME = 1
OFF_TIME = 6
def initialize_power():
#open_serial()
#reset_device()
pass
def power_on(device):
print "powering on device %d..." % device
close_relay(device)
time.sleep(RELEASE_TIME)
open_relay(device)
time.sleep(ON_TIME)
close_relay(device)
print "...done"
def power_off(device):
print "shutting down device %d..." % device
close_relay(device)
time.sleep(RELEASE_TIME)
open_relay(device)
time.sleep(OFF_TIME)
close_relay(device)
print "...done"
if __name__ == "__main__":
initialize_power()
power_on(int(sys.argv[1]))
time.sleep(5)
power_off(int(sys.argv[1]))
|
11575707
|
import sys
import os
from subprocess import check_output
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.join("..", "..")
),
)
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx_click"]
project = "Testplan"
copyright = "2018, <NAME>"
author = ""
master_doc = "index"
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
NEWS_FILE = "news.rst"
GENERATE_NEWS_COMMAND = "releaseherald generate "
def generate_news():
news_content = check_output(GENERATE_NEWS_COMMAND, shell=True)
with open(NEWS_FILE, "wb") as news_file:
news_file.write(news_content)
def setup(app):
app.add_stylesheet("icon.css")
generate_news()
|
11575711
|
from datetime import datetime
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
class ExecutionLoggerManager(object):
"""Logger to log all the ir commands with all the parameters. """
FILE_ARGUMENTS = ['--from-file']
def __init__(self,
ansible_config_path,
log_name="ir-commands",
log_file='ir-commands.log',
log_level=logging.INFO):
self.log = logging.getLogger(log_name)
is_log_present = os.path.isfile(log_file)
self.log.addHandler(RotatingFileHandler(
log_file, maxBytes=5 * 1024 * 1024, backupCount=1))
self.log.setLevel(log_level)
# add extra line if log file is new
if not is_log_present:
self.log.info(
"# infrared setup instruction: "
"http://infrared.readthedocs.io/en/latest/bootstrap.html"
"#setup\n")
self.log_file(ansible_config_path)
def command(self):
"""Saves current ir command with arguments to the log. """
self.log.info("# executed at %s", datetime.now())
# ensure we see the content of the answers file
for file_option in self.FILE_ARGUMENTS:
if file_option in sys.argv:
file_index = sys.argv.index(file_option)
if file_index and len(sys.argv) >= file_index + 2:
self.log_file(sys.argv[file_index + 1])
self.log.info("infrared %s", " ".join(sys.argv[1:]).replace(
' -', ' \\\n -'))
self.log.info("")
def log_file(self, file_name):
"""Logs the file to be used with the infrared. """
if os.path.isfile(file_name):
file_dir = os.path.dirname(file_name)
if file_dir:
self.log.info("mkdir -p %s", file_dir)
with open(file_name) as conf_file:
self.log.info(
"# create file\n"
"cat << EOF > %s\n"
"%s"
"\nEOF\n", file_name, conf_file.read())
|
11575718
|
import re
import string
def normalize_span(s):
if not s:
return ""
# Lower text and remove punctuation, articles and extra whitespace.
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
11575750
|
from magichour.api.local.modelgen import preprocess
from magichour.api.local.util.log import get_logger, log_time
logger = get_logger(__name__)
def read_transforms_substep(transforms_file):
# These transforms are tailored to this dataset.
# You will likely need to write your own transforms for your own data.
logger.info("Reading transforms from file: %s" % transforms_file)
transforms = preprocess.get_transforms(transforms_file)
return transforms
def read_lines_substep(log_file, *args, **kwargs):
logger.info("Reading log lines from file: %s" % log_file)
if kwargs.get('gettime_auditd'):
# Read timestamp in auditd format
preprocess_read_log_function = preprocess.read_auditd_file
else:
preprocess_read_log_function = preprocess.read_log_file
lines = preprocess_read_log_function(log_file, *args, **kwargs)
return lines
def transform_lines_substep(lines, transforms):
logger.info("Transforming log lines...")
transformed_lines = preprocess.transform_lines(lines, transforms)
return transformed_lines
def _transformed_lines_to_list_substep(transformed_lines):
return [line for line in transformed_lines]
@log_time
def preprocess_step(
log_file,
transforms_file=None,
_transforms_cache={},
*args,
**kwargs):
lines = read_lines_substep(log_file, *args, **kwargs)
if transforms_file:
if transforms_file in _transforms_cache:
transforms = _transforms_cache[transforms_file]
else:
transforms = _transforms_cache[
transforms_file] = read_transforms_substep(transforms_file)
transformed_lines = transform_lines_substep(lines, transforms)
else:
transformed_lines = lines
# get_transformed_lines returns a generator. This converts it to a list.
transformed_lines = _transformed_lines_to_list_substep(transformed_lines)
return transformed_lines
|
11575808
|
from subprocess import run
# test snpeff annotation
command = 'python ../helpers/snpeff.py -i sample.1000.vcf'
#run(command, shell=True)
|
11575820
|
import sys
# we need Python 3.4+ for __del__ to work with circular references
assert sys.hexversion >= 0x03040000
from .idldsl import define_winrt_com_method, funcwrap, _new_rtobj
from .winstring import HSTRING
from .types import *
# unknwn
class IUnknown(c_void_p):
IID = '00000000-0000-0000-C000-000000000046'
QueryInterface = funcwrap(WINFUNCTYPE(check_hresult, REFGUID, VOIDPP)(0, "QueryInterface"))
AddRef = funcwrap(WINFUNCTYPE(ULONG)(1, "AddRef"))
Release = funcwrap(WINFUNCTYPE(ULONG)(2, "Release"))
_vtblend = 2
_own_object = True
def _detach(self):
newptr = cast(self, c_void_p)
self.value = None
return newptr
def __del__(self):
if self._own_object and self.value is not None:
# print('IUnknown_Release', self.value)
self.Release()
def astype(self, interface_type):
iid = GUID(interface_type.IID)
obj = _new_rtobj(interface_type)
self.QueryInterface(byref(iid), byref(obj))
return obj
# inspectable
class TrustLevel:
_enum_type_ = INT
BaseTrust = 0
PartialTrust = 1
FullTrust = 2
class IInspectable(IUnknown):
IID = 'AF86E2E0-B12D-4c6a-9C5A-D7AA65101E90'
define_winrt_com_method(IInspectable, 'GetIids', POINTER(ULONG), POINTER(REFGUID), vtbl=3)
define_winrt_com_method(IInspectable, 'GetRuntimeClassName', retval=HSTRING, vtbl=4)
define_winrt_com_method(IInspectable, 'GetTrustLevel', retval=TrustLevel._enum_type_, vtbl=5)
# activation
class IActivationFactory(IInspectable):
IID = '00000035-0000-0000-c000-000000000046'
define_winrt_com_method(IActivationFactory, 'ActivateInstance', retval=IInspectable, vtbl=6)
|
11575822
|
import json
from pathlib import Path
__all__ = ["__version__", "__js__"]
__js__ = json.load(
(Path(__file__).parent.resolve() / "labextension/package.json").read_bytes()
)
__version__ = __js__["version"]
|
11575842
|
import argparse
import sys
from custom_image_cli import __version__
# Parses command line arguments and assigns values to global variables
class ArgsParser(argparse.ArgumentParser):
def error(self, msg):
sys.stderr.write('Error: %s \n' % msg)
self.print_help()
sys.exit(2)
def parse_commandline_arguments(args=None):
if args is None:
args = sys.argv[1:]
main_parser = ArgsParser(prog="emr-on-eks-custom-image",
formatter_class=argparse.RawTextHelpFormatter)
main_parser.add_argument('--version', action='version',
version='Amazon EMR on EKS Custom Image CLI '
'\nVersion: {version}'.format(version=__version__))
subparsers = main_parser.add_subparsers(dest="command")
validate_image_parser = parse_validate_image(subparsers)
main_parser_args = main_parser.parse_args(args)
return main_parser_args
def parse_validate_image(subparsers):
validate_image_parser = subparsers.add_parser(name="validate-image",
formatter_class=argparse.RawTextHelpFormatter)
validate_image_parser.add_argument('--version', action='version',
version='%(prog)s \nVersion: {version}'.format(version=__version__))
validate_image_parser.add_argument("-i", "--local-image-uri",
help="specifies the name of image uri",
required=True)
validate_image_parser.add_argument("-r", "--release-name",
help="specifies the release name of the image. e.g. emr-5.32.0",
required=True)
validate_image_parser.add_argument("-t", "--image-type",
help="specifies the image runtime type. e.g. spark \ndefault runtime type is "
"spark")
return validate_image_parser
|
11575854
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "ELI"
addresses_name = "2021-03-29T14:54:03.784744/Democracy_Club__06May2021.csv"
stations_name = "2021-03-29T14:54:03.784744/Democracy_Club__06May2021.csv"
elections = ["2021-05-06"]
csv_delimiter = ","
def station_record_to_dict(self, record):
# St Marys Church Church Lane Fotherby Louth
if record.polling_place_id == "9064":
record = record._replace(polling_place_easting="531703")
record = record._replace(polling_place_northing="391688")
# Church Institute Church Lane South Elkington LN11 OSA
if record.polling_place_id == "9060":
record = record._replace(polling_place_postcode="LN11 0SA")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn == "10008528388": # AMERICA FARM, HANNAH, ALFORD
record = record._replace(addressline6="LN13 9QP")
if uprn in [
"100030775814", # SEA SHADOW, CHURCHILL LANE, THEDDLETHORPE, MABLETHORPE
"200001828818", # GRANGE FARM, WELTON-LE-MARSH, SPILSBY
"10024297692", # LAKE VIEW STATION ROAD, LITTLE STEEPING
"100032167500", # WINDSOR COTTAGE, HAKERLEY BRIDGE, FRITHVILLE, BOSTON
"200002780835", # BARBRIDGE HOUSE, MAIN ROAD, SIBSEY, BOSTON
"100030754138", # NORTHERN LODGE, MAIN ROAD, SIBSEY, BOSTON
]:
return None
if record.addressline6 in [
"PE24 5RE",
"PE24 5UT",
"LN9 5JP",
"LN11 0EG",
"LN11 8DW",
"LN12 2HX",
"PE25 2PX",
"PE25 3BS",
"PE22 8DQ",
"PE25 1SH",
]:
return None
return super().address_record_to_dict(record)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.