code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import argparse
import json
import re
import subprocess
import sys
from time import sleep
from typing import Any
def run_command(cmd: str, print_all: bool = False) -> str:
# return subprocess.run([cmd], stdout=subprocess.PIPE, shell=True).stdout.decode(
# "utf-8"
# )
process = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
output_str_list = []
total_output_str = ""
for line in iter(process.stdout.readline, b""): # mypy: ignore[union-attr]
decoded = line.decode("utf-8")
output_str_list.append(decoded)
total_output_str += decoded + "\n"
if print_all:
print(decoded, end="")
return total_output_str
def cleanup() -> None:
out = run_command(
"./vast.py show instances --raw",
)
dict_out = json.loads(out)
for server in dict_out:
print(f"Start destroying {server['id']}")
out = run_command(
f"./vast.py destroy instance {server['id']}",
)
print(out)
def startup() -> tuple[str, int]:
out = run_command(
"./vast.py search offers 'reliability > 0.98 num_gpus==1 rentable==True"
" inet_down > 100 disk_space > 30 dph_total < 0.25 inet_down_cost < 0.021"
" inet_up_cost < 0.021 cuda_vers >= 11.2' -o 'dph_total' --storage=32 --raw"
)
dict_out = json.loads(out)
print("Starting best server")
if len(dict_out) == 0:
print("NO SERVER FOUND")
sys.exit(1)
print(dict_out[0])
out = run_command(
f"./vast.py create instance {dict_out[0]['id']} "
"--image joennlae/halutmatmul-conda-gpu:latest --disk 32"
)
print(out)
starting = True
counter = 1
ssh_host = ""
ssh_port = 0
while starting:
print(f"Starting {counter}")
sleep(5)
out = run_command(f"./vast.py show instances --raw")
out_dict = json.loads(out)
if len(out_dict):
print(out_dict[0]["status_msg"])
if ssh_port == 0:
ssh_host = out_dict[0]["ssh_host"]
ssh_port = out_dict[0]["ssh_port"]
if out_dict[0]["actual_status"] == "running":
starting = False
counter += 1
return ssh_host, ssh_port
def run_ssh_commands(ssh_host: str, ssh_port: int, debug: bool = False) -> int:
# commands to execute the tests
# mv /venv/ /venv2 # because vast.ai already has venv
# source /venv2/bin/activate
# git clone https://github.com/joennlae/halutmatmul.git
# cd halutmatmul
# pytest -n4 -srPA src/python/test/test_kernel_gpu.py
# currently using 4 jobs in parallel
commands = 'cd /; mv /venv/ /venv2; source /venv2/bin/activate; \
git clone https://github.com/joennlae/halutmatmul.git; \
cd halutmatmul; pytest -n0 -srPA -k "gpu"; \
echo "ERROR CODE: $?";'
ssh_identity_str = ""
if debug:
ssh_identity_str = "-i .ssh/id_rsa"
print("SSH host", ssh_host)
print("SSH port", ssh_port)
out = run_command(
f"ssh -o StrictHostKeyChecking=no {ssh_identity_str} "
f'-p {ssh_port} root@{ssh_host} "{commands}"',
print_all=True,
)
error_code = re.findall(r"(?<=ERROR CODE: )\d+", out)
print("ERROR CODE: ", error_code)
return int(error_code[0])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Vast.ai helper")
parser.add_argument("--cleanup", "-c", action="store_true", help="run only cleanup")
parser.add_argument(
"--debug", "-d", action="store_true", help="set ssh key offline"
)
args = parser.parse_args()
print(args)
if args.cleanup:
cleanup()
else:
cleanup()
ssh_host, ssh_port = startup()
# ssh_host = "ssh4.vast.ai"
# ssh_port = 11182
sleep(5)
error_code = run_ssh_commands(ssh_host, ssh_port, args.debug)
cleanup()
sys.exit(error_code)
| [
"json.loads",
"argparse.ArgumentParser",
"subprocess.Popen",
"time.sleep",
"sys.exit",
"re.findall"
] | [((300, 359), 'subprocess.Popen', 'subprocess.Popen', (['[cmd]'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '([cmd], stdout=subprocess.PIPE, shell=True)\n', (316, 359), False, 'import subprocess\n'), ((810, 825), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (820, 825), False, 'import json\n'), ((1349, 1364), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (1359, 1364), False, 'import json\n'), ((3203, 3243), 're.findall', 're.findall', (['"""(?<=ERROR CODE: )\\\\d+"""', 'out'], {}), "('(?<=ERROR CODE: )\\\\d+', out)\n", (3213, 3243), False, 'import re\n'), ((3355, 3408), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Vast.ai helper"""'}), "(description='Vast.ai helper')\n", (3378, 3408), False, 'import argparse\n'), ((1468, 1479), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1476, 1479), False, 'import sys\n'), ((1811, 1819), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (1816, 1819), False, 'from time import sleep\n'), ((1900, 1915), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (1910, 1915), False, 'import json\n'), ((3827, 3835), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (3832, 3835), False, 'from time import sleep\n'), ((3932, 3952), 'sys.exit', 'sys.exit', (['error_code'], {}), '(error_code)\n', (3940, 3952), False, 'import sys\n')] |
# Copyright (c) <NAME>. All Rights Reserved.
import torch
import torch.nn as nn
from .backbones import get_backbone
BLOCKNAMES = {
"resnet": {
"stem": ["conv1", "bn1", "relu", "maxpool"],
"block1": ["layer1"],
"block2": ["layer2"],
"block3": ["layer3"],
"block4": ["layer4"],
},
"clipresnet": {
"stem": ["conv1", "bn1", "conv2", "bn2", "conv3", "bn3", "relu", "avgpool"],
"block1": ["layer1"],
"block2": ["layer2"],
"block3": ["layer3"],
"block4": ["layer4"],
},
"clipvit": { # vit-base
"stem": ["conv1"],
"block1": ["transformer.resblocks.0", "transformer.resblocks.1", "transformer.resblocks.2"],
"block2": ["transformer.resblocks.3", "transformer.resblocks.4", "transformer.resblocks.5"],
"block3": ["transformer.resblocks.6", "transformer.resblocks.7", "transformer.resblocks.8"],
"block4": ["transformer.resblocks.9", "transformer.resblocks.10", "transformer.resblocks.11"],
},
"regnety": {
"stem": ["stem"],
"block1": ["trunk_output.block1"],
"block2": ["trunk_output.block2"],
"block3": ["trunk_output.block3"],
"block4": ["trunk_output.block4"]
},
}
def get_module(module, name):
for n, m in module.named_modules():
if n == name:
return m
def build_blocks(model, block_name_dict):
# blocks = nn.ModuleList()
blocks = [] # saved model can be broken...
for _key, name_list in block_name_dict.items():
block = nn.ModuleList()
for module_name in name_list:
module = get_module(model, module_name)
block.append(module)
blocks.append(block)
return blocks
def freeze_(model):
"""Freeze model
Note that this function does not control BN
"""
for p in model.parameters():
p.requires_grad_(False)
class URResNet(torch.nn.Module):
"""ResNet + FrozenBN + IntermediateFeatures
"""
def __init__(self, input_shape, hparams, preserve_readout=False, freeze=None, feat_layers=None):
assert input_shape == (3, 224, 224), input_shape
super().__init__()
self.network, self.n_outputs = get_backbone(hparams.model, preserve_readout, hparams.pretrained)
if hparams.model == "resnet18":
block_names = BLOCKNAMES["resnet"]
elif hparams.model.startswith("resnet50"):
block_names = BLOCKNAMES["resnet"]
elif hparams.model.startswith("clip_resnet"):
block_names = BLOCKNAMES["clipresnet"]
elif hparams.model.startswith("clip_vit"):
block_names = BLOCKNAMES["clipvit"]
elif hparams.model == "swag_regnety_16gf":
block_names = BLOCKNAMES["regnety"]
elif hparams.model.startswith("vit"):
block_names = BLOCKNAMES["vit"]
else:
raise ValueError(hparams.model)
self._features = []
self.feat_layers = self.build_feature_hooks(feat_layers, block_names)
self.blocks = build_blocks(self.network, block_names)
self.freeze(freeze)
if not preserve_readout:
self.dropout = nn.Dropout(hparams["resnet_dropout"])
else:
self.dropout = nn.Identity()
assert hparams["resnet_dropout"] == 0.0
self.hparams = hparams
self.freeze_bn()
def freeze(self, freeze):
if freeze is not None:
if freeze == "all":
freeze_(self.network)
else:
for block in self.blocks[:freeze+1]:
freeze_(block)
def hook(self, module, input, output):
self._features.append(output)
def build_feature_hooks(self, feats, block_names):
assert feats in ["stem_block", "block"]
if feats is None:
return []
# build feat layers
if feats.startswith("stem"):
last_stem_name = block_names["stem"][-1]
feat_layers = [last_stem_name]
else:
feat_layers = []
for name, module_names in block_names.items():
if name == "stem":
continue
module_name = module_names[-1]
feat_layers.append(module_name)
# print(f"feat layers = {feat_layers}")
for n, m in self.network.named_modules():
if n in feat_layers:
m.register_forward_hook(self.hook)
return feat_layers
def forward(self, x, ret_feats=False):
"""Encode x into a feature vector of size n_outputs."""
self.clear_features()
out = self.dropout(self.network(x))
if ret_feats:
return out, self._features
else:
return out
def clear_features(self):
self._features.clear()
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
self.freeze_bn()
def freeze_bn(self):
for m in self.network.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def URFeaturizer(input_shape, hparams, **kwargs):
"""Auto-select an appropriate featurizer for the given input shape."""
if input_shape[1:3] == (224, 224):
return URResNet(input_shape, hparams, **kwargs)
else:
raise NotImplementedError(f"Input shape {input_shape} is not supported")
| [
"torch.nn.ModuleList",
"torch.nn.Identity",
"torch.nn.Dropout"
] | [((1561, 1576), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1574, 1576), True, 'import torch.nn as nn\n'), ((3190, 3227), 'torch.nn.Dropout', 'nn.Dropout', (["hparams['resnet_dropout']"], {}), "(hparams['resnet_dropout'])\n", (3200, 3227), True, 'import torch.nn as nn\n'), ((3269, 3282), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (3280, 3282), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except:
from distutils.core import setup, find_packages
setup(
name='yaml-resume',
version='0.0.1',
description='Generates multiple resume/CV formats from a single YAML source.',
long_description=''.join(open('README.rst').readlines()),
keywords='yaml, resume',
author='<NAME>',
author_email='<EMAIL>',
license='GPLv2',
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
]
)
| [
"distutils.core.find_packages"
] | [((470, 485), 'distutils.core.find_packages', 'find_packages', ([], {}), '()\n', (483, 485), False, 'from distutils.core import setup, find_packages\n')] |
#
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from core.vtg.emg.common import get_conf_property
from core.vtg.emg.common.c.types import Declaration, Function, Array, Pointer, Primitive
from core.vtg.emg.processGenerator.linuxModule.interface import Resource, Callback, StructureContainer, \
FunctionInterface
def yield_categories(collection):
"""
Analyze all new types found by SA component and yield final set of interface categories built from manually prepared
interface specifications and global variables. All new categories and interfaces are added directly to the
InterfaceCategoriesSpecification object. Also all types declarations are updated according with new imported C
types. However, there are still unused interfaces present in the collection after this function termination.
:param collection: InterfaceCategoriesSpecification object.
:param conf: Configuration property dictionary of InterfaceCategoriesSpecification object.
:return: None
"""
# Add resources
if get_conf_property(collection.conf, "generate new resource interfaces"):
__populate_resources(collection)
# Complement interface references
__complement_interfaces(collection)
return
def __populate_resources(collection):
# Iterate over categories
for category in collection.categories:
usage = dict()
# Extract callbacks
for callback in collection.callbacks(category):
for parameter in (p for i, p in enumerate(callback.declaration.points.parameters)
if isinstance(p, Declaration) and
not (len(callback.param_interfaces) > i and callback.param_interfaces[i])):
if parameter.identifier in usage:
usage[parameter.identifier]["counter"] += 1
else:
# Try to resolve interface
intfs = collection.resolve_interface_weakly(parameter, category=callback.category, use_cache=False)
if len(intfs) == 0:
# Only unmatched resources should be introduced
usage[parameter.identifier] = {
"counter": 1,
"declaration": parameter
}
# Introduce new resources
for declaration in (usage[i]["declaration"] for i in usage if usage[i]["counter"] > 1):
if "{}.{}".format(category, declaration.pretty_name) not in collection.interfaces:
identifier = declaration.pretty_name
elif "{}.{}".format(category, 'ldv_' + declaration.pretty_name) not in collection.interfaces:
identifier = 'ldv_' + declaration.pretty_name
else:
raise RuntimeError("Cannot yield identifier for callback {!r} of category {!r}".
format(declaration.identifier, category))
interface = Resource(category, identifier)
interface.declaration = declaration
collection.set_intf(interface)
return
def fulfill_function_interfaces(collection, interface, category=None):
"""
Check an interface declaration (function or function pointer) and try to match its return value type and
parameters arguments types with existing interfaces. The algorythm should be the following:
* Match explicitly stated interface References (only if they meet given category).
* Match rest parameters:
- Avoid matching primitives and arrays and pointers of primitives;
- Match interfaces from given category or from the category of already matched interfaces by interface
references;
- If there are more that one category is matched - do not do match to avoid mistakes in match.
:param collection: InterfaceCategoriesSpecification object.
:param interface: Interface object: KernelFunction or Callback.
:param category: Category filter.
:return: None.
"""
def is_primitive_or_void(decl):
"""
Return True if given declaration object has type of Primitive or pointer(* and **) to Primitive.
:param decl: Declaration object
:return: True - it is primitive, False - otherwise
"""
# todo: Implement check agains arrays of primitives
if isinstance(decl, Primitive) or (isinstance(decl, Pointer) and isinstance(decl.points, Primitive)) or \
decl.identifier in {'void *', 'void **'}:
return True
else:
return False
collection.logger.debug("Try to match collateral interfaces for function '{!r}'".format(interface.identifier))
# Check declaration type
if isinstance(interface, Callback):
declaration = interface.declaration.points
elif isinstance(interface, FunctionInterface):
declaration = interface.declaration
else:
raise TypeError('Expect pointer to function or function declaration but got {!r}'.
format(str(type(interface.declaration).__name__)))
# Second match rest types
if not interface.rv_interface and declaration.return_value and not is_primitive_or_void(declaration.return_value):
rv_interface = collection.resolve_interface(declaration.return_value, category, False)
if len(rv_interface) == 0:
rv_interface = collection.resolve_interface_weakly(declaration.return_value, category, False)
if len(rv_interface) == 1:
interface.rv_interface = rv_interface[-1]
elif len(rv_interface) > 1:
collection.logger.warning(
'Interface {!r} return value signature {!r} can be match with several following interfaces: {}'.
format(interface.identifier, declaration.return_value.identifier,
', '.join((i.identifier for i in rv_interface))))
for index in range(len(declaration.parameters)):
if not (len(interface.param_interfaces) > index and interface.param_interfaces[index]) and \
not isinstance(declaration.parameters[index], str) and \
not is_primitive_or_void(declaration.parameters[index]):
p_interface = collection.resolve_interface(declaration.parameters[index], category, False)
if len(p_interface) == 0:
p_interface = collection.resolve_interface_weakly(declaration.parameters[index], category, False)
if len(p_interface) == 1:
p_interface = p_interface[0]
elif len(p_interface) == 0:
p_interface = None
else:
collection.logger.warning(
'Interface {!r} parameter in the position {} with signature {!r} can be match with several '
'following interfaces: {}'.format(interface.identifier,
index, declaration.parameters[index].identifier,
', '.join((i.identifier for i in p_interface))))
p_interface = None
interface.set_param_interface(index, p_interface)
if p_interface and not category:
category = p_interface.category
def __complement_interfaces(collection):
def __match_interface_for_container(signature, category, id_match):
candidates = collection.resolve_interface_weakly(signature, category, use_cache=False)
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
return None
else:
strict_candidates = collection.resolve_interface(signature, category, use_cache=False)
if len(strict_candidates) == 1:
return strict_candidates[0]
elif len(strict_candidates) > 1 and id_match:
id_candidates = [i for i in strict_candidates if i.short_identifier == id_match]
if len(id_candidates) == 1:
return id_candidates[0]
else:
return None
if len(strict_candidates) > 1:
raise RuntimeError('There are several interfaces with the same declaration {}'.
format(signature.to_string('a')))
# Filter of resources
candidates = [i for i in candidates if not isinstance(i, Resource)]
if len(candidates) == 1:
return candidates[0]
else:
return None
# Resolve callback parameters
for callback in collection.callbacks():
fulfill_function_interfaces(collection, callback, callback.category)
# Resolve kernel function parameters
for func in collection.function_interfaces:
fulfill_function_interfaces(collection, func)
# todo: Remove dirty declarations in container references and add additional clean one
# Resolve array elements
for container in (cnt for cnt in collection.containers() if cnt.declaration and
isinstance(cnt.declaration, Array) and not cnt.element_interface):
intf = __match_interface_for_container(container.declaration.element, container.category, None)
if intf:
container.element_interface = intf
# Resolve structure interfaces
for container in (cnt for cnt in collection.containers() if cnt.declaration and
isinstance(cnt, StructureContainer)):
for field in container.declaration.fields:
if field not in container.field_interfaces:
intf = __match_interface_for_container(container.declaration.fields[field], container.category,
field)
if intf:
container.field_interfaces[field] = intf
if field in container.field_interfaces and isinstance(container.field_interfaces[field], Callback) and \
isinstance(container.declaration.fields[field], Pointer) and \
isinstance(container.declaration.fields[field].points, Function) and \
isinstance(container.field_interfaces[field].declaration, Pointer) and \
isinstance(container.field_interfaces[field].declaration.points, Function):
# Track implementations from structures if types slightly differs and attached to structure variable
container.field_interfaces[field].declaration = container.declaration.fields[field]
return
| [
"core.vtg.emg.processGenerator.linuxModule.interface.Resource",
"core.vtg.emg.common.get_conf_property"
] | [((1668, 1738), 'core.vtg.emg.common.get_conf_property', 'get_conf_property', (['collection.conf', '"""generate new resource interfaces"""'], {}), "(collection.conf, 'generate new resource interfaces')\n", (1685, 1738), False, 'from core.vtg.emg.common import get_conf_property\n'), ((3613, 3643), 'core.vtg.emg.processGenerator.linuxModule.interface.Resource', 'Resource', (['category', 'identifier'], {}), '(category, identifier)\n', (3621, 3643), False, 'from core.vtg.emg.processGenerator.linuxModule.interface import Resource, Callback, StructureContainer, FunctionInterface\n')] |
import re
email_pattern: 'Pattern' = re.compile(r'^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w+$')
| [
"re.compile"
] | [((39, 94), 're.compile', 're.compile', (['"""^[a-z0-9]+[\\\\._]?[a-z0-9]+[@]\\\\w+[.]\\\\w+$"""'], {}), "('^[a-z0-9]+[\\\\._]?[a-z0-9]+[@]\\\\w+[.]\\\\w+$')\n", (49, 94), False, 'import re\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 19:43
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('datapoint', '0002_auto_20160203_1929'),
]
operations = [
migrations.AddField(
model_name='datapoint',
name='collected_at',
field=models.DateTimeField(default=datetime.datetime(2016, 2, 3, 19, 43, 23, 995365, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='datapoint',
name='extra',
field=models.TextField(blank=True),
),
]
| [
"datetime.datetime",
"django.db.models.TextField"
] | [((714, 742), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (730, 742), False, 'from django.db import migrations, models\n'), ((492, 553), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(2)', '(3)', '(19)', '(43)', '(23)', '(995365)'], {'tzinfo': 'utc'}), '(2016, 2, 3, 19, 43, 23, 995365, tzinfo=utc)\n', (509, 553), False, 'import datetime\n')] |
from keras.models import load_model
import numpy as np
import cv2
import pickle
from image_segmentation import segment_image
from neural_network import resize_to_fit
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
def solve_captcha(image):
# Load up the model labels
with open(MODEL_LABELS_FILENAME, "rb") as f:
lb = pickle.load(f)
# Load up the trained model
model = load_model(MODEL_FILENAME)
# We do not know the number of characters here
chars = segment_image(image, -1)
if len(chars) > 0:
output = cv2.merge([image] * 3)
predictions = []
# Loop over the characters
for bounding_box in chars:
x, y, w, h = bounding_box
# Extract the char from the input image
char_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
# Re-size the letter image to 60x60 pixels to match training data
char_image = resize_to_fit(char_image, 60, 60)
if char_image is not None:
# Expand dimensions
char_image = np.expand_dims(char_image, axis=2)
char_image = np.expand_dims(char_image, axis=0)
# Use the model to make a prediction
prediction = model.predict(char_image)
# Convert the encoded prediction to specific label
label = lb.inverse_transform(prediction)[0]
predictions.append(label)
# draw the prediction on the output image
cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)
cv2.putText(output, label, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
# Print captcha
captcha_text = "".join(predictions)
print("CAPTCHA is: {}".format(captcha_text))
return output, captcha_text
return None, ''
| [
"cv2.rectangle",
"cv2.merge",
"keras.models.load_model",
"pickle.load",
"cv2.putText",
"numpy.expand_dims",
"image_segmentation.segment_image",
"neural_network.resize_to_fit"
] | [((429, 455), 'keras.models.load_model', 'load_model', (['MODEL_FILENAME'], {}), '(MODEL_FILENAME)\n', (439, 455), False, 'from keras.models import load_model\n'), ((520, 544), 'image_segmentation.segment_image', 'segment_image', (['image', '(-1)'], {}), '(image, -1)\n', (533, 544), False, 'from image_segmentation import segment_image\n'), ((369, 383), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (380, 383), False, 'import pickle\n'), ((586, 608), 'cv2.merge', 'cv2.merge', (['([image] * 3)'], {}), '([image] * 3)\n', (595, 608), False, 'import cv2\n'), ((963, 996), 'neural_network.resize_to_fit', 'resize_to_fit', (['char_image', '(60)', '(60)'], {}), '(char_image, 60, 60)\n', (976, 996), False, 'from neural_network import resize_to_fit\n'), ((1102, 1136), 'numpy.expand_dims', 'np.expand_dims', (['char_image'], {'axis': '(2)'}), '(char_image, axis=2)\n', (1116, 1136), True, 'import numpy as np\n'), ((1166, 1200), 'numpy.expand_dims', 'np.expand_dims', (['char_image'], {'axis': '(0)'}), '(char_image, axis=0)\n', (1180, 1200), True, 'import numpy as np\n'), ((1555, 1632), 'cv2.rectangle', 'cv2.rectangle', (['output', '(x - 2, y - 2)', '(x + w + 4, y + h + 4)', '(0, 255, 0)', '(1)'], {}), '(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)\n', (1568, 1632), False, 'import cv2\n'), ((1649, 1743), 'cv2.putText', 'cv2.putText', (['output', 'label', '(x - 5, y - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.55)', '(0, 255, 0)', '(2)'], {}), '(output, label, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55,\n (0, 255, 0), 2)\n', (1660, 1743), False, 'import cv2\n')] |
import wx
app = wx.App(False)
frame = wx.Frame(None, wx.ID_ANY, "Hello, World")
frame.Show(True)
app.MainLoop()
| [
"wx.Frame",
"wx.App"
] | [((17, 30), 'wx.App', 'wx.App', (['(False)'], {}), '(False)\n', (23, 30), False, 'import wx\n'), ((39, 80), 'wx.Frame', 'wx.Frame', (['None', 'wx.ID_ANY', '"""Hello, World"""'], {}), "(None, wx.ID_ANY, 'Hello, World')\n", (47, 80), False, 'import wx\n')] |
import matplotlib.pyplot as plt
def apply():
"""Apply all MPL settings
"""
# print(plt.style.available)
# plt.style.use('seaborn-paper')
plt.style.use('seaborn-notebook')
plt.rcParams['figure.max_open_warning'] = 1000
plt.rcParams['figure.figsize'] = (12, 6)
| [
"matplotlib.pyplot.style.use"
] | [((161, 194), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-notebook"""'], {}), "('seaborn-notebook')\n", (174, 194), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
import discord, configparser, random, re, requests, random
from time import sleep
class InvalidDieException(Exception):
def __init__(self, die, error='invalid'):
self.die = die
self.error = error
def __str__(self):
if self.error == 'too many die':
return "Error: Too many die. Slow your roll."
if self.error == 'too many sides':
return "Error: IT'S OVER 9000!!!!1!11!11!one!!11"
else:
return "Error: Invalid Die: " + self.die
class DiscordBot:
def __init__(self, configFile):
self._loadConfig(configFile)
self.cReload()
def _loadConfig(self, configFile):
self.config = configparser.ConfigParser()
self.config.read(configFile)
self.commandString = self.config.get(
'settings', 'commandString', fallback='!'
)
def cReload(self):
self.loadCommands()
self.loadChannels()
self.loadAdmins()
self.loadIgnore()
def loadCommands(self):
self.commands = {}
commandsFile = self.config.get(
'files', 'commands', fallback='commands.txt'
)
f = open(commandsFile, 'r')
commandGroup = ''
for line in f:
# Detect beginning of new commandGroup, "[[name]]"
m = re.match('\[\[(.*)\]\]', line)
if m:
commandGroup = m.group(1)
self.commands[commandGroup] = {}
continue
# If no current commandGroup is set, ignore this line
if not commandGroup:
continue
(command, response) = line.split("\t", 1)
if command in self.commands[commandGroup]:
self.commands[commandGroup][command].append(response.strip())
else:
self.commands[commandGroup][command]=[response.strip()]
f.close()
def loadChannels(self):
channelsFile = self.config.get(
'files', 'channels', fallback='channels.txt'
)
f = open(channelsFile, 'r')
self.commandGroups = {}
for line in f:
(channelId, commandGroups) = line.split("\t", 1)
self.commandGroups[channelId] = commandGroups.strip().split(",")
f.close()
def loadAdmins(self):
self.admins=[]
adminsFile = self.config.get(
'files', 'admins', fallback='admins.txt'
)
f = open(adminsFile, 'r')
for line in f:
id = line.strip()
if id != '': self.admins.append(id)
f.close()
def loadIgnore(self):
self.ignore=[]
ignoreFile = self.config.get(
'files', 'ignore', fallback='ignore.txt'
)
f = open(ignoreFile, 'r')
for line in f:
id = line.strip()
if id != '': self.ignore.append(id)
f.close()
def handleLogin(self, user):
print('Logged in as {}'.format(user.name))
self.user = user
def connect(self):
self.client = discord.Client()
self.client.login(
self.config['authentication']['username'],
self.config['authentication']['password']
)
return self.client
def isAdmin(self, user):
return user.id in self.admins
def isIgnored(self, user):
return (user.id in self.ignore or user == self.user)
async def say(self, channel, message):
print('\033[1;34m[\033[31m' + str(channel) + '\033[34m]\033[32m Replying ...\033[0m')
# print('\033[1;34m[\033[0;31;1m' + str(channel) + '\033[1;34m]\033[0;32m Replying\033[0;33;1m: \033[0;32;1m' + str(message) + '\033[0;32m')
await self.client.send_message(channel, message)
sleep(1)
async def handleCommand(self, channel, message, sender):
# Are we listening in this channel?
if channel.id not in self.commandGroups:
return
# Get the list of commandGroups for this channel
commandGroups = self.commandGroups[channel.id]
# Working backwards from the end of the string, remove
# words until a command is found
cmd = message.strip()
params = ''
response = False
while True:
rawResponse = self.getRawCommandResponse(commandGroups, cmd.strip(), params.strip())
if rawResponse != False:
break
spl = cmd.rsplit(' ',1)
if len(spl) == 1:
break
cmd = spl[0]
params = spl[1] + ' ' + params
if rawResponse != False:
await self.processCommandResponse(channel, rawResponse, sender, params.strip())
def getRawCommandResponse(self, commandGroups, cmd, params):
# Single-spacify the command
cmd = ' '.join(cmd.split()).lower()
# Iterate over all commandGroups for the current channel
for g in commandGroups:
if g in self.commands:
if cmd in self.commands[g] and params == '':
# Exact command match with no params
return random.choice(self.commands[g][cmd])
elif (cmd + ' *') in self.commands[g] and params != '':
return random.choice(self.commands[g][cmd + ' *'])
# We got to here with no result, so there is no matching command
return False
async def processCommandResponse(self, channel, response, sender, params):
if "%LIST%" in response:
# Need to get a list of subkeys. Out of scope right now.
response = response.replace(
"%LIST%", "This function is not yet implemented"
)
if "%SENDER%" in response:
response = response.replace("%SENDER%", sender.name)
if "%INPUT%" in response:
response = response.replace("%INPUT%", params)
if "%CHOICE%" in response:
print(params)
response = response.replace(
"%CHOICE%",
random.choice(params.split(',')).strip()
)
if "%ROLL%" in response:
try:
response = response.replace(
"%ROLL%",
self.diceRoll(params)
)
except InvalidDieException as e:
response = str(e)
if "%XKCD%" in response:
response = response.replace("%XKCD%", self.getXkcd(params))
if "%RANDOM_XKCD%" in response:
response = response.replace("%RANDOM_XKCD%", self.getRandomXkcd())
if "%RANDOM_SONIC%" in response:
response = response.replace("%RANDOM_SONIC%", self.getRandomSonic())
if "%RANDOM_FI%" in response:
response = response.replace("%RANDOM_FI%", self.getRandomFi())
if "%RANDOM_ELIAS%" in response:
response = response.replace("%RANDOM_ELIAS%", self.getRandomElias())
await self.say(channel, response)
def diceRoll(self, dice):
dice = dice.split()
rolls = []
for die in dice:
dieDef = die.lower().split('d')
if len(dieDef) != 2:
raise InvalidDieException(die)
try:
if dieDef[0] == '':
number = 1
else:
number = int(dieDef[0])
sides = int(dieDef[1])
except ValueError:
raise InvalidDieException(die)
if number < 1 or sides < 2:
raise InvalidDieException(die)
if number > 20:
raise InvalidDieException(die, 'too many die')
if sides > 9000:
raise InvalidDieException(die, 'too many sides')
for i in range(number):
rolls.append(random.randint(1,sides))
return " + ".join(str(n) for n in rolls) + (" = " + str(sum(rolls)) if len(rolls) > 1 else '')
def getXkcd(self,number):
try:
r = requests.get("http://xkcd.com/{}/info.0.json".format(number))
except:
return "Sorry, I couldn't reach XKCD"
try:
title = r.json()['safe_title']
except:
return("Comic {} not found".format(number))
return("http://xkcd.com/{} (\"{}\")".format(number, title))
def getRandomXkcd(self):
try:
r = requests.get("http://xkcd.com/info.0.json")
latest = r.json()['num']
except:
return "Sorry, I couldn't reach XKCD"
return self.getXkcd(random.randint(1, latest))
def getRandomSonic(self):
responses = [
'<:sonic:281605314111995905> GOTTA GO FAST',
'<:sonic:281605314111995905> YOU\'RE TOO SLOW',
'<:sonic:281605314111995905> COME ON, STEP IT UP',
'<:sonic:281605314111995905> NOW I\'LL SHOW YOU',
'<:sonic:281605314111995905> TRUE FAST ISN\'T MEASURED IN MILES, IT COMES FROM THE HEART',
'<:sonic:281605314111995905> SONIC\'S THE NAME, SPEED\'S MY GAME',
'<:sonic:281605314111995905> YOOLO SWAG',
]
return responses[random.randint(0, (len(responses) - 1))]
def getRandomFi(self):
responses = [
'<:fi:281601396581597204> I predict an 85% chance you will find my results helpful.',
'<:fi:281601396581597204> Signs indicate a 75% chance that it is unlikely.',
'<:fi:281601396581597204> I calculate an 85% chance you can obtain information about it by asking my master instead.',
'<:fi:281601396581597204> 80% of the room itself is covered in a blanket of sand.',
'<:fi:281601396581597204> I calculate a 75% probability that visiting the fortune-teller will be helpful.',
'<:fi:281601396581597204> There is a 95% probability that you will discover something unexpected.',
'<:fi:281601396581597204> There is an 85% probability that any eventual results will be of use, Master.',
'<:fi:281601396581597204> I estimate you have less than a 1% chance of completing your quest.',
'<:fi:281601396581597204> I judge the probability of defeat at 30%.',
'<:fi:281601396581597204> He increased his muscle mass by 500%.',
'<:fi:281601396581597204> Over 80% of ghosts are said to harbor some kind of unfulfilled desire.',
'<:fi:281601396581597204> Sensitivity to human presence: 60%.',
'<:fi:281601396581597204> I project a 50% probability of failing.',
'<:fi:281601396581597204> You can expect at least an 85% chance that it will attempt to stab you.',
'<:fi:281601396581597204> I calculate the probability of your intense aggravation at 100%.',
'<:fi:281601396581597204> I calculate a 100% failure rate.',
'<:fi:281601396581597204> There is an 85% chance that something very important will occur here.',
'<:fi:281601396581597204> I calculate a 60% chance that a portion of the information contained in my analysis will be new to you.',
'<:fi:281601396581597204> I calculate a 95% chance your clothing will immediately combust upon entrance.',
'<:fi:281601396581597204> I can verify with only 40% accuracy that this person is a plant.',
]
return responses[random.randint(0, (len(responses) - 1))]
def getRandomElias(self):
responses = [
'<:elias:268180756306722832> There will be cake',
'<:elias:268180756306722832> Twilight Princess is a Wii game',
]
return responses[random.randint(0, (len(responses) - 1))]
async def handleSystemCommand(self, channel, message, sender):
print(self.isAdmin(sender))
print(channel.is_private)
# if not channel.is_private: return
(cmd, params) = (message.strip() + ' ').split(' ', 1)
cmd = cmd.lower()
# General commands
if cmd == 'whoami':
await self.say(channel, 'Your name is {} and your id is {}'
.format(sender.name, sender.id))
# Admin commands
if not self.isAdmin(sender): return
if cmd == 'reload':
self.cReload()
await self.say(channel, 'Reloaded!')
if cmd == 'stop':
await self.say(channel, 'Shutting down.')
self.client.logout()
if cmd == 'channels':
await self.say(channel, 'Channel list:')
for s in self.client.servers:
await self.say(channel, 'Server: {}\n'.format(s.name))
for c in s.channels:
if str(c.type) == 'text':
r = "-- {} (id: {})\n".format(
c.name, c.id
)
if c.id in self.commandGroups:
r += "---- In groups: {}\n".format(
', '.join(self.commandGroups[c.id])
)
else:
r += "---- (Channel not monitored)\n"
await self.say(channel, r)
rfwbot = DiscordBot('config/rfwbot.conf')
client = rfwbot.connect();
@client.async_event
async def on_message(message):
print('\033[1;34m[\033[31m' + str(message.channel) + '\033[34m]\033[36m ' + str(message.author) + '\033[33m: \033[0m' + str(message.content))
# say = input('--> ')
if not rfwbot.isIgnored(message.author):
if message.content.startswith(rfwbot.commandString):
command = message.content[len(rfwbot.commandString):]
print('\033[1;34m[\033[31m' + str(message.channel) + '\033[34m]\033[31m Command Detected\033[033m: \033[0;33m' + command + '\033[0m')
if command.startswith(rfwbot.commandString):
# System commands start with !!
command = command[len(rfwbot.commandString):]
await rfwbot.handleSystemCommand(message.channel, command, message.author)
else:
await rfwbot.handleCommand(message.channel, command, message.author)
# Table Flip Correction
elif '︵ ┻━┻' in message.content or \
'︵ ┻━┻' in message.content or \
'┻━┻ ︵' in message.content or \
'┻━┻ ︵' in message.content or \
'︵ ┻─┻' in message.content or \
'︵ ┻─┻' in message.content or \
'┻─┻ ︵' in message.content or \
'┻─┻ ︵' in message.content or \
'︵ ┴━┴' in message.content or \
'︵ ┴━┴' in message.content or \
'┴━┴ ︵' in message.content or \
'┴━┴ ︵' in message.content or \
'︵ ┴─┴' in message.content or \
'︵ ┴─┴' in message.content or \
'┴─┴ ︵' in message.content or \
'┴─┴ ︵' in message.content:
print('\033[1;34m[\033[31m' + str(message.channel) + '\033[34m]\033[31m Table Flip Detected\033[0m')
msg = ''
for x in range(0, sum(message.content.count(y) for y in ('︵ ┻━┻', '︵ ┻━┻', '┻━┻ ︵', '┻━┻ ︵', '︵ ┻─┻', '︵ ┻─┻', '┻─┻ ︵', '┻─┻ ︵', '︵ ┴━┴', '︵ ┴━┴', '┴━┴ ︵', '┴━┴ ︵', '︵ ┴─┴', '︵ ┴─┴', '┴─┴ ︵', '┴─┴ ︵'))):
if x != 0:
msg += ' '
msg += '┳━┳ノ(°-°ノ)'
await rfwbot.say(message.channel, msg)
@client.async_event
def on_ready():
rfwbot.handleLogin(client.user)
client.run(rfwbot.config['authentication']['password'])
| [
"random.choice",
"configparser.ConfigParser",
"re.match",
"requests.get",
"time.sleep",
"discord.Client",
"random.randint"
] | [((712, 739), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (737, 739), False, 'import discord, configparser, random, re, requests, random\n'), ((3099, 3115), 'discord.Client', 'discord.Client', ([], {}), '()\n', (3113, 3115), False, 'import discord, configparser, random, re, requests, random\n'), ((3805, 3813), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (3810, 3813), False, 'from time import sleep\n'), ((1350, 1384), 're.match', 're.match', (['"""\\\\[\\\\[(.*)\\\\]\\\\]"""', 'line'], {}), "('\\\\[\\\\[(.*)\\\\]\\\\]', line)\n", (1358, 1384), False, 'import discord, configparser, random, re, requests, random\n'), ((8451, 8494), 'requests.get', 'requests.get', (['"""http://xkcd.com/info.0.json"""'], {}), "('http://xkcd.com/info.0.json')\n", (8463, 8494), False, 'import discord, configparser, random, re, requests, random\n'), ((8627, 8652), 'random.randint', 'random.randint', (['(1)', 'latest'], {}), '(1, latest)\n', (8641, 8652), False, 'import discord, configparser, random, re, requests, random\n'), ((5167, 5203), 'random.choice', 'random.choice', (['self.commands[g][cmd]'], {}), '(self.commands[g][cmd])\n', (5180, 5203), False, 'import discord, configparser, random, re, requests, random\n'), ((7875, 7899), 'random.randint', 'random.randint', (['(1)', 'sides'], {}), '(1, sides)\n', (7889, 7899), False, 'import discord, configparser, random, re, requests, random\n'), ((5304, 5347), 'random.choice', 'random.choice', (["self.commands[g][cmd + ' *']"], {}), "(self.commands[g][cmd + ' *'])\n", (5317, 5347), False, 'import discord, configparser, random, re, requests, random\n')] |
from bs4 import BeautifulSoup as bs
import threading
import time
import numpy as np
import sys
from io import StringIO
import scrapeconfig as cng
import consoleconfig as ccng
import os
def print_html(html_test):
'''To print html containers returned by beautifulsoup4'''
try:
strhtml = str(html_test.prettify())
except:
strhtml = str(html_test)
print(strhtml)
return strhtml
def join_threads(threads: list, verbose: bool = False, blink_interval: int = cng.BLINK_INTERVAL):
'''
Join ongoing threads from threading module, has a verbose functionality showing
the number of active threads.
'''
if verbose:
space = ' '
backspace = '\b'
basemsg = "Active threads: "
basemsglen = len(basemsg)
sys.stdout.write(basemsg)
while threading.activeCount() > 1:
countstring = str(threading.activeCount()-1)
countlen = len(countstring)
sys.stdout.write(countstring)
sys.stdout.flush()
time.sleep(blink_interval)
# Clears current number of threads from terminal and "resets" cursor
sys.stdout.write(backspace*countlen + space*countlen + backspace*countlen)
sys.stdout.flush()
time.sleep(blink_interval)
sys.stdout.write(f'\r{space*basemsglen}\r')
sys.stdout.write('All threads done!')
[worker.join() for worker in threads]
return
def case_decorator(func):
'''Decorator to enforce commmon behavior for cases'''
def wrapboi(*args, **kwargs):
clear_screen()
retobj = func(*args, **kwargs)
time.sleep(ccng.CASE_EXIT_WAIT_TIME)
return retobj
# "Inherit docstring"
wrapboi.__doc__ = func.__doc__
return wrapboi
if __name__ == '__main__':
def test_join_threads():
'''Test join_threads using dummy threads'''
def dummywaiter(maxwait: int=10):
'''Dummy thread, sleeps for random time between 1 and maxwait (seconds)'''
time.sleep(np.random.randint(1, maxwait))
return
workers = [threading.Thread(target=dummywaiter) for i in range(500)]
[worker.start() for worker in workers]
join_threads(workers, verbose=True)
test_join_threads() | [
"threading.activeCount",
"time.sleep",
"numpy.random.randint",
"threading.Thread",
"sys.stdout.flush",
"sys.stdout.write"
] | [((820, 845), 'sys.stdout.write', 'sys.stdout.write', (['basemsg'], {}), '(basemsg)\n', (836, 845), False, 'import sys\n'), ((1388, 1433), 'sys.stdout.write', 'sys.stdout.write', (["f'\\r{space * basemsglen}\\r'"], {}), "(f'\\r{space * basemsglen}\\r')\n", (1404, 1433), False, 'import sys\n'), ((1441, 1478), 'sys.stdout.write', 'sys.stdout.write', (['"""All threads done!"""'], {}), "('All threads done!')\n", (1457, 1478), False, 'import sys\n'), ((1732, 1768), 'time.sleep', 'time.sleep', (['ccng.CASE_EXIT_WAIT_TIME'], {}), '(ccng.CASE_EXIT_WAIT_TIME)\n', (1742, 1768), False, 'import time\n'), ((861, 884), 'threading.activeCount', 'threading.activeCount', ([], {}), '()\n', (882, 884), False, 'import threading\n'), ((1002, 1031), 'sys.stdout.write', 'sys.stdout.write', (['countstring'], {}), '(countstring)\n', (1018, 1031), False, 'import sys\n'), ((1045, 1063), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1061, 1063), False, 'import sys\n'), ((1079, 1105), 'time.sleep', 'time.sleep', (['blink_interval'], {}), '(blink_interval)\n', (1089, 1105), False, 'import time\n'), ((1216, 1301), 'sys.stdout.write', 'sys.stdout.write', (['(backspace * countlen + space * countlen + backspace * countlen)'], {}), '(backspace * countlen + space * countlen + backspace * countlen\n )\n', (1232, 1301), False, 'import sys\n'), ((1304, 1322), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1320, 1322), False, 'import sys\n'), ((1350, 1376), 'time.sleep', 'time.sleep', (['blink_interval'], {}), '(blink_interval)\n', (1360, 1376), False, 'import time\n'), ((2221, 2257), 'threading.Thread', 'threading.Thread', ([], {'target': 'dummywaiter'}), '(target=dummywaiter)\n', (2237, 2257), False, 'import threading\n'), ((2148, 2177), 'numpy.random.randint', 'np.random.randint', (['(1)', 'maxwait'], {}), '(1, maxwait)\n', (2165, 2177), True, 'import numpy as np\n'), ((921, 944), 'threading.activeCount', 'threading.activeCount', ([], {}), '()\n', (942, 944), False, 'import threading\n')] |
from PIL import Image
import random
newImage = []
width=200
height=200
for i in range(0,width,1):
for j in range(0,height,1):
newImage.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
newIm = Image.new('RGB', (width, height))
newIm.putdata(newImage)
newIm.show()
| [
"PIL.Image.new",
"random.randint"
] | [((237, 270), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)'], {}), "('RGB', (width, height))\n", (246, 270), False, 'from PIL import Image\n'), ((156, 178), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (170, 178), False, 'import random\n'), ((180, 202), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (194, 202), False, 'import random\n'), ((204, 226), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (218, 226), False, 'import random\n')] |
from xgboost import XGBClassifier
class SimpleModel():
def __init__(self):
self.model = XGBClassifier()
def fit(self, x, y, val_x, val_y):
eval_set = [(val_x,val_y)]
self.model.fit(x, y, early_stopping_rounds=10, eval_metric="logloss", eval_set=eval_set, verbose=True)
def predict(self, x):
return self.model.predict(x)
| [
"xgboost.XGBClassifier"
] | [((110, 125), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (123, 125), False, 'from xgboost import XGBClassifier\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
=================================================
@Project :span-aste
@IDE :PyCharm
@Author :<NAME>
@Date :2022/1/19 9:10
@Desc :
==================================================
"""
import argparse
import torch
from torch.utils.data import DataLoader
from models.collate import collate_fn
from models.tokenizers.tokenizer import BasicTokenizer
from models.embedding.word2vector import GloveWord2Vector
from models.model import SpanAsteModel
from trainer import SpanAsteTrainer
from utils.dataset import CustomDataset
from utils.tager import SpanLabel, RelationLabel
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"using device:{device}")
batch_size = 16
SEED = 1024
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", required=True, type=str, default="output/checkpoint.pkl",
help="the model of span-aste output")
parser.add_argument("-w", "--glove_word2vector", required=True, type=str, default="vector_cache/42B_w2v.txt",
help="the glove word2vector file path")
parser.add_argument("-d", "--dataset", required=True, type=str, default="data/ASTE-Data-V2-EMNLP2020/15res/",
help="the dataset for test")
parser.add_argument("-b", "--batch_size", type=int, default=8, help="number of batch_size")
parser.add_argument("--lstm_hidden", type=int, default=300, help="hidden size of BiLstm model")
parser.add_argument("--lstm_layers", type=int, default=1, help="number of BiLstm layers")
args = parser.parse_args()
print("Loading GloVe word2vector...", args.glove_word2vector)
tokenizer = BasicTokenizer()
glove_w2v = GloveWord2Vector(args.glove_word2vector)
print("Loading test Dataset...", args.dataset)
# Load dataset
test_dataset = CustomDataset(
args.dataset + "test_triplets.txt",
tokenizer, glove_w2v
)
print("Construct Dataloader...")
batch_size = args.batch_size
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
print("Building SPAN-ASTE model...")
# get dimension of target and relation
target_dim, relation_dim = len(SpanLabel), len(RelationLabel)
# get the dimension of glove vector
input_dim = glove_w2v.glove_model.vector_size
# build span-aste model
model = SpanAsteModel(
input_dim,
target_dim,
relation_dim,
lstm_layer=args.lstm_layers,
lstm_hidden_dim=args.lstm_hidden
)
model.to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
print("Creating SPAN-ASTE Trainer...")
trainer = SpanAsteTrainer(model, optimizer, device)
print("Loading model state from output...", args.model)
model.load_state_dict(torch.load(args.model)["model_state_dict"])
trainer.test(test_dataloader)
| [
"torch.manual_seed",
"utils.dataset.CustomDataset",
"models.model.SpanAsteModel",
"argparse.ArgumentParser",
"torch.load",
"torch.cuda.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"models.tokenizers.tokenizer.BasicTokenizer",
"trainer.SpanAsteTrainer",
"models.embeddin... | [((751, 774), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (768, 774), False, 'import torch\n'), ((775, 803), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (797, 803), False, 'import torch\n'), ((814, 839), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (837, 839), False, 'import argparse\n'), ((1703, 1719), 'models.tokenizers.tokenizer.BasicTokenizer', 'BasicTokenizer', ([], {}), '()\n', (1717, 1719), False, 'from models.tokenizers.tokenizer import BasicTokenizer\n'), ((1732, 1772), 'models.embedding.word2vector.GloveWord2Vector', 'GloveWord2Vector', (['args.glove_word2vector'], {}), '(args.glove_word2vector)\n', (1748, 1772), False, 'from models.embedding.word2vector import GloveWord2Vector\n'), ((1851, 1922), 'utils.dataset.CustomDataset', 'CustomDataset', (["(args.dataset + 'test_triplets.txt')", 'tokenizer', 'glove_w2v'], {}), "(args.dataset + 'test_triplets.txt', tokenizer, glove_w2v)\n", (1864, 1922), False, 'from utils.dataset import CustomDataset\n'), ((2014, 2103), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), '(test_dataset, batch_size=batch_size, shuffle=True, collate_fn=\n collate_fn)\n', (2024, 2103), False, 'from torch.utils.data import DataLoader\n'), ((2352, 2470), 'models.model.SpanAsteModel', 'SpanAsteModel', (['input_dim', 'target_dim', 'relation_dim'], {'lstm_layer': 'args.lstm_layers', 'lstm_hidden_dim': 'args.lstm_hidden'}), '(input_dim, target_dim, relation_dim, lstm_layer=args.\n lstm_layers, lstm_hidden_dim=args.lstm_hidden)\n', (2365, 2470), False, 'from models.model import SpanAsteModel\n'), ((2614, 2655), 'trainer.SpanAsteTrainer', 'SpanAsteTrainer', (['model', 'optimizer', 'device'], {}), '(model, optimizer, device)\n', (2629, 2655), False, 'from trainer import SpanAsteTrainer\n'), ((653, 678), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (676, 678), False, 'import torch\n'), ((2735, 2757), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (2745, 2757), False, 'import torch\n')] |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
from django.contrib.gis.db import models
class AlertaHexgis(models.Model):
id = models.CharField(primary_key=True, max_length=-1)
geom = models.MultiPolygonField(srid=4674, blank=True, null=True)
img_n_ex = models.CharField(max_length=40, blank=True, null=True)
img_n_data = models.DateField(blank=True, null=True)
img_ex = models.CharField(max_length=40, blank=True, null=True)
img_data = models.DateField(blank=True, null=True)
area_ha = models.FloatField(blank=True, null=True)
area_km2 = models.FloatField(blank=True, null=True)
intervalo = models.IntegerField(blank=True, null=True)
municipio = models.CharField(max_length=40, blank=True, null=True)
estado = models.CharField(max_length=20, blank=True, null=True)
id_des = models.CharField(max_length=20, blank=True, null=True)
id_assoc = models.CharField(max_length=20, blank=True, null=True)
estagio = models.CharField(max_length=24, blank=True, null=True)
lat = models.FloatField(blank=True, null=True)
long = models.FloatField(blank=True, null=True)
data_ana = models.DateField(blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'alerta_hexgis'
def dia(self):
return self.img_data.day
def mes(self):
return self.img_data.month
def ano(self):
return self.img_data.year
def total(self):
return self.area_ha
def interval(self):
return (self.img_data - self.img_n_data).days
| [
"django.contrib.gis.db.models.CharField",
"django.contrib.gis.db.models.IntegerField",
"django.contrib.gis.db.models.GeoManager",
"django.contrib.gis.db.models.MultiPolygonField",
"django.contrib.gis.db.models.FloatField",
"django.contrib.gis.db.models.DateField"
] | [((658, 707), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(-1)'}), '(primary_key=True, max_length=-1)\n', (674, 707), False, 'from django.contrib.gis.db import models\n'), ((719, 777), 'django.contrib.gis.db.models.MultiPolygonField', 'models.MultiPolygonField', ([], {'srid': '(4674)', 'blank': '(True)', 'null': '(True)'}), '(srid=4674, blank=True, null=True)\n', (743, 777), False, 'from django.contrib.gis.db import models\n'), ((793, 847), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)', 'null': '(True)'}), '(max_length=40, blank=True, null=True)\n', (809, 847), False, 'from django.contrib.gis.db import models\n'), ((865, 904), 'django.contrib.gis.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (881, 904), False, 'from django.contrib.gis.db import models\n'), ((918, 972), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)', 'null': '(True)'}), '(max_length=40, blank=True, null=True)\n', (934, 972), False, 'from django.contrib.gis.db import models\n'), ((988, 1027), 'django.contrib.gis.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1004, 1027), False, 'from django.contrib.gis.db import models\n'), ((1042, 1082), 'django.contrib.gis.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1059, 1082), False, 'from django.contrib.gis.db import models\n'), ((1098, 1138), 'django.contrib.gis.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1115, 1138), False, 'from django.contrib.gis.db import models\n'), ((1155, 1197), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1174, 1197), False, 'from django.contrib.gis.db import models\n'), ((1214, 1268), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)', 'null': '(True)'}), '(max_length=40, blank=True, null=True)\n', (1230, 1268), False, 'from django.contrib.gis.db import models\n'), ((1282, 1336), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (1298, 1336), False, 'from django.contrib.gis.db import models\n'), ((1350, 1404), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (1366, 1404), False, 'from django.contrib.gis.db import models\n'), ((1420, 1474), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'null': '(True)'}), '(max_length=20, blank=True, null=True)\n', (1436, 1474), False, 'from django.contrib.gis.db import models\n'), ((1489, 1543), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(24)', 'blank': '(True)', 'null': '(True)'}), '(max_length=24, blank=True, null=True)\n', (1505, 1543), False, 'from django.contrib.gis.db import models\n'), ((1554, 1594), 'django.contrib.gis.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1571, 1594), False, 'from django.contrib.gis.db import models\n'), ((1606, 1646), 'django.contrib.gis.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1623, 1646), False, 'from django.contrib.gis.db import models\n'), ((1662, 1701), 'django.contrib.gis.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1678, 1701), False, 'from django.contrib.gis.db import models\n'), ((1716, 1735), 'django.contrib.gis.db.models.GeoManager', 'models.GeoManager', ([], {}), '()\n', (1733, 1735), False, 'from django.contrib.gis.db import models\n')] |
# Copyright 2018 Deep Air. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser to generate flat json file"""
import json
import os
from os.path import basename
def _mangle(s):
return s.strip()[1:-1]
def _keyNameFix(dictionary, filename):
'''
Rename all the first level keys in the dictionary to
filename_dictionary.
inputs:
dictionary: a python dict()
filename: basepath of a file (string)
'''
changed = []
for key in dictionary.keys():
if (key not in changed) and (filename not in key):
# 'os.path.splitext(filename)[0]' will remove extension from name
new_key = os.path.splitext(filename)[0] + '_' + key
changed.append(new_key)
dictionary[new_key] = dictionary.pop(key)
def _generateTempFile(infile):
'''
Generate a temporary file with fixed dictionary keys.
inputs:
infile: file name (string)
'''
with open(infile) as f:
data = json.load(f)
_keyNameFix(data, basename(infile))
with open('temp', 'w') as f:
json.dump(data, f, indent=2)
def flat_json(output_filename, input_filenames):
'''
Generates one file from multiple json files :
output_filename: file name for output file (string)
input_filenames: list of names to merge (list)
'''
with open(output_filename, "w") as outfile:
first = True
for infile_name in input_filenames:
_generateTempFile(infile_name)
with open('temp') as infile:
if first:
outfile.write('{')
first = False
else:
outfile.write(',')
outfile.write(_mangle(infile.read()))
os.remove('temp')
outfile.write('}')
| [
"os.path.splitext",
"os.path.basename",
"json.load",
"json.dump",
"os.remove"
] | [((1537, 1549), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1546, 1549), False, 'import json\n'), ((1573, 1589), 'os.path.basename', 'basename', (['infile'], {}), '(infile)\n', (1581, 1589), False, 'from os.path import basename\n'), ((1632, 1660), 'json.dump', 'json.dump', (['data', 'f'], {'indent': '(2)'}), '(data, f, indent=2)\n', (1641, 1660), False, 'import json\n'), ((2328, 2345), 'os.remove', 'os.remove', (['"""temp"""'], {}), "('temp')\n", (2337, 2345), False, 'import os\n'), ((1196, 1222), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1212, 1222), False, 'import os\n')] |
# Generated by Django 3.1.1 on 2020-09-25 11:37
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0002_post_likes'),
]
operations = [
migrations.AddField(
model_name='post',
name='dislikes',
field=models.ManyToManyField(related_name='post_dislikes', to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField"
] | [((194, 251), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (225, 251), False, 'from django.db import migrations, models\n'), ((424, 510), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""post_dislikes"""', 'to': 'settings.AUTH_USER_MODEL'}), "(related_name='post_dislikes', to=settings.\n AUTH_USER_MODEL)\n", (446, 510), False, 'from django.db import migrations, models\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk')
palette = sns.color_palette()
beta = 1
landa = 1./beta
reps = 25
pois = np.random.exponential(beta, reps)
pois = pois.cumsum()
psra = np.arange(reps)*beta + np.random.exponential(beta, reps) - landa
psra.sort()
f, ax = plt.subplots(1, 2, sharex=True, figsize=(24, 10))
yy = np.arange(reps) + 1
for x, y in zip(pois, yy):
ax[0].plot([x, x], [0, y], c=palette[0], ls='--', lw=2)
ax[0].step(pois, yy, lw=5)
ax[0].scatter(pois, np.zeros(reps))
ax[0].set_title(r'Poisson arrivals, $\lambda$ = {:.1f}'.format(landa))
ax[0].set_xlabel('time')
ax[0].set_ylabel('count')
for x, y in zip(psra, yy):
ax[1].plot([x, x], [0, y], c=palette[0], ls='--', lw=2)
ax[1].step(psra, yy, lw=5)
ax[1].scatter(psra, np.zeros(reps))
title = r'Pre-scheduled random arrivals, $\sigma$ = {:.1f}'.format(landa)
ax[1].set_title(title)
ax[1].set_xlabel('time')
plt.savefig('pois_psra.png')
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"seaborn.color_palette",
"numpy.random.exponential",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((74, 97), 'seaborn.set', 'sns.set', ([], {'context': '"""talk"""'}), "(context='talk')\n", (81, 97), True, 'import seaborn as sns\n'), ((108, 127), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (125, 127), True, 'import seaborn as sns\n'), ((172, 205), 'numpy.random.exponential', 'np.random.exponential', (['beta', 'reps'], {}), '(beta, reps)\n', (193, 205), True, 'import numpy as np\n'), ((321, 370), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'figsize': '(24, 10)'}), '(1, 2, sharex=True, figsize=(24, 10))\n', (333, 370), True, 'import matplotlib.pyplot as plt\n'), ((944, 972), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pois_psra.png"""'], {}), "('pois_psra.png')\n", (955, 972), True, 'import matplotlib.pyplot as plt\n'), ((377, 392), 'numpy.arange', 'np.arange', (['reps'], {}), '(reps)\n', (386, 392), True, 'import numpy as np\n'), ((532, 546), 'numpy.zeros', 'np.zeros', (['reps'], {}), '(reps)\n', (540, 546), True, 'import numpy as np\n'), ((805, 819), 'numpy.zeros', 'np.zeros', (['reps'], {}), '(reps)\n', (813, 819), True, 'import numpy as np\n'), ((258, 291), 'numpy.random.exponential', 'np.random.exponential', (['beta', 'reps'], {}), '(beta, reps)\n', (279, 291), True, 'import numpy as np\n'), ((235, 250), 'numpy.arange', 'np.arange', (['reps'], {}), '(reps)\n', (244, 250), True, 'import numpy as np\n')] |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_modulated')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_modulated')
_modulated = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_modulated', [dirname(__file__)])
except ImportError:
import _modulated
return _modulated
try:
_mod = imp.load_module('_modulated', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_modulated = swig_import_helper()
del swig_import_helper
else:
import _modulated
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
import btk20
from btk20 import stream
oldimport = """
import btk20.stream
"""
class NormalFFTAnalysisBankPtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, NormalFFTAnalysisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, NormalFFTAnalysisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_NormalFFTAnalysisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.NormalFFTAnalysisBankPtr___iter__(self)
def __deref__(self):
return _modulated.NormalFFTAnalysisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_NormalFFTAnalysisBankPtr
__del__ = lambda self: None
def next(self, frameX=-5):
return _modulated.NormalFFTAnalysisBankPtr_next(self, frameX)
def reset(self):
return _modulated.NormalFFTAnalysisBankPtr_reset(self)
def fftlen(self):
return _modulated.NormalFFTAnalysisBankPtr_fftlen(self)
def name(self):
return _modulated.NormalFFTAnalysisBankPtr_name(self)
def size(self):
return _modulated.NormalFFTAnalysisBankPtr_size(self)
def current(self):
return _modulated.NormalFFTAnalysisBankPtr_current(self)
NormalFFTAnalysisBankPtr_swigregister = _modulated.NormalFFTAnalysisBankPtr_swigregister
NormalFFTAnalysisBankPtr_swigregister(NormalFFTAnalysisBankPtr)
class OverSampledDFTAnalysisBankPtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, OverSampledDFTAnalysisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, OverSampledDFTAnalysisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_OverSampledDFTAnalysisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.OverSampledDFTAnalysisBankPtr___iter__(self)
def __deref__(self):
return _modulated.OverSampledDFTAnalysisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_OverSampledDFTAnalysisBankPtr
__del__ = lambda self: None
def polyphase(self, m, n):
return _modulated.OverSampledDFTAnalysisBankPtr_polyphase(self, m, n)
def next(self, frameX=-5):
return _modulated.OverSampledDFTAnalysisBankPtr_next(self, frameX)
def reset(self):
return _modulated.OverSampledDFTAnalysisBankPtr_reset(self)
def is_end(self):
return _modulated.OverSampledDFTAnalysisBankPtr_is_end(self)
def fftlen(self):
return _modulated.OverSampledDFTAnalysisBankPtr_fftlen(self)
def shiftlen(self):
return _modulated.OverSampledDFTAnalysisBankPtr_shiftlen(self)
def frame_no(self):
return _modulated.OverSampledDFTAnalysisBankPtr_frame_no(self)
def name(self):
return _modulated.OverSampledDFTAnalysisBankPtr_name(self)
def size(self):
return _modulated.OverSampledDFTAnalysisBankPtr_size(self)
def current(self):
return _modulated.OverSampledDFTAnalysisBankPtr_current(self)
OverSampledDFTAnalysisBankPtr_swigregister = _modulated.OverSampledDFTAnalysisBankPtr_swigregister
OverSampledDFTAnalysisBankPtr_swigregister(OverSampledDFTAnalysisBankPtr)
class OverSampledDFTSynthesisBankPtr(btk20.stream.VectorFloatFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, OverSampledDFTSynthesisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, OverSampledDFTSynthesisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_OverSampledDFTSynthesisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.OverSampledDFTSynthesisBankPtr___iter__(self)
def __deref__(self):
return _modulated.OverSampledDFTSynthesisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_OverSampledDFTSynthesisBankPtr
__del__ = lambda self: None
def polyphase(self, m, n):
return _modulated.OverSampledDFTSynthesisBankPtr_polyphase(self, m, n)
def next(self, frameX=-5):
return _modulated.OverSampledDFTSynthesisBankPtr_next(self, frameX)
def reset(self):
return _modulated.OverSampledDFTSynthesisBankPtr_reset(self)
def input_source_vector(self, block):
return _modulated.OverSampledDFTSynthesisBankPtr_input_source_vector(self, block)
def no_stream_feature(self, flag=True):
return _modulated.OverSampledDFTSynthesisBankPtr_no_stream_feature(self, flag)
def name(self):
return _modulated.OverSampledDFTSynthesisBankPtr_name(self)
def size(self):
return _modulated.OverSampledDFTSynthesisBankPtr_size(self)
def is_end(self):
return _modulated.OverSampledDFTSynthesisBankPtr_is_end(self)
def current(self):
return _modulated.OverSampledDFTSynthesisBankPtr_current(self)
OverSampledDFTSynthesisBankPtr_swigregister = _modulated.OverSampledDFTSynthesisBankPtr_swigregister
OverSampledDFTSynthesisBankPtr_swigregister(OverSampledDFTSynthesisBankPtr)
class PerfectReconstructionFFTAnalysisBankPtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PerfectReconstructionFFTAnalysisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, PerfectReconstructionFFTAnalysisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_PerfectReconstructionFFTAnalysisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr___iter__(self)
def __deref__(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_PerfectReconstructionFFTAnalysisBankPtr
__del__ = lambda self: None
def polyphase(self, m, n):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_polyphase(self, m, n)
def next(self, frameX=-5):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_next(self, frameX)
def reset(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_reset(self)
def name(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_name(self)
def size(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_size(self)
def current(self):
return _modulated.PerfectReconstructionFFTAnalysisBankPtr_current(self)
PerfectReconstructionFFTAnalysisBankPtr_swigregister = _modulated.PerfectReconstructionFFTAnalysisBankPtr_swigregister
PerfectReconstructionFFTAnalysisBankPtr_swigregister(PerfectReconstructionFFTAnalysisBankPtr)
class PerfectReconstructionFFTSynthesisBankPtr(btk20.stream.VectorFloatFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PerfectReconstructionFFTSynthesisBankPtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorFloatFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, PerfectReconstructionFFTSynthesisBankPtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_PerfectReconstructionFFTSynthesisBankPtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr___iter__(self)
def __deref__(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr___deref__(self)
__swig_destroy__ = _modulated.delete_PerfectReconstructionFFTSynthesisBankPtr
__del__ = lambda self: None
def next(self, frameX=-5):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_next(self, frameX)
def reset(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_reset(self)
def polyphase(self, m, n):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_polyphase(self, m, n)
def name(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_name(self)
def size(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_size(self)
def is_end(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_is_end(self)
def current(self):
return _modulated.PerfectReconstructionFFTSynthesisBankPtr_current(self)
PerfectReconstructionFFTSynthesisBankPtr_swigregister = _modulated.PerfectReconstructionFFTSynthesisBankPtr_swigregister
PerfectReconstructionFFTSynthesisBankPtr_swigregister(PerfectReconstructionFFTSynthesisBankPtr)
class DelayFeaturePtr(btk20.stream.VectorComplexFeatureStreamPtr):
__swig_setmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DelayFeaturePtr, name, value)
__swig_getmethods__ = {}
for _s in [btk20.stream.VectorComplexFeatureStreamPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DelayFeaturePtr, name)
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
this = _modulated.new_DelayFeaturePtr(*args, **kwargs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __iter__(self):
return _modulated.DelayFeaturePtr___iter__(self)
def __deref__(self):
return _modulated.DelayFeaturePtr___deref__(self)
__swig_destroy__ = _modulated.delete_DelayFeaturePtr
__del__ = lambda self: None
def set_time_delay(self, time_delay):
return _modulated.DelayFeaturePtr_set_time_delay(self, time_delay)
def next(self, frameX=-5):
return _modulated.DelayFeaturePtr_next(self, frameX)
def reset(self):
return _modulated.DelayFeaturePtr_reset(self)
def name(self):
return _modulated.DelayFeaturePtr_name(self)
def size(self):
return _modulated.DelayFeaturePtr_size(self)
def current(self):
return _modulated.DelayFeaturePtr_current(self)
DelayFeaturePtr_swigregister = _modulated.DelayFeaturePtr_swigregister
DelayFeaturePtr_swigregister(DelayFeaturePtr)
class CosineModulatedPrototypeDesign(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CosineModulatedPrototypeDesign, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CosineModulatedPrototypeDesign, name)
__repr__ = _swig_repr
def __init__(self, M=256, N=3072, fs=1.0):
this = _modulated.new_CosineModulatedPrototypeDesign(M, N, fs)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _modulated.delete_CosineModulatedPrototypeDesign
__del__ = lambda self: None
def fcn(self, x, f):
return _modulated.CosineModulatedPrototypeDesign_fcn(self, x, f)
def grad(self, x, g):
return _modulated.CosineModulatedPrototypeDesign_grad(self, x, g)
def M(self):
return _modulated.CosineModulatedPrototypeDesign_M(self)
def N(self):
return _modulated.CosineModulatedPrototypeDesign_N(self)
def m(self):
return _modulated.CosineModulatedPrototypeDesign_m(self)
def J(self):
return _modulated.CosineModulatedPrototypeDesign_J(self)
def proto(self):
return _modulated.CosineModulatedPrototypeDesign_proto(self)
CosineModulatedPrototypeDesign_swigregister = _modulated.CosineModulatedPrototypeDesign_swigregister
CosineModulatedPrototypeDesign_swigregister(CosineModulatedPrototypeDesign)
def design_f(v, params):
return _modulated.design_f(v, params)
design_f = _modulated.design_f
def design_df(v, params, df):
return _modulated.design_df(v, params, df)
design_df = _modulated.design_df
def design_fdf(v, params, f, df):
return _modulated.design_fdf(v, params, f, df)
design_fdf = _modulated.design_fdf
def write_gsl_format(fileName, prototype):
return _modulated.write_gsl_format(fileName, prototype)
write_gsl_format = _modulated.write_gsl_format
class AnalysisOversampledDFTDesignPtr(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AnalysisOversampledDFTDesignPtr, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AnalysisOversampledDFTDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, M=256, m=4, r=1, wp=1.0, tau_h=-1):
this = _modulated.new_AnalysisOversampledDFTDesignPtr(M, m, r, wp, tau_h)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.AnalysisOversampledDFTDesignPtr___deref__(self)
__swig_destroy__ = _modulated.delete_AnalysisOversampledDFTDesignPtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.AnalysisOversampledDFTDesignPtr_design(self, tolerance)
def save(self, fileName):
return _modulated.AnalysisOversampledDFTDesignPtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.AnalysisOversampledDFTDesignPtr_calcError(self, doPrint)
AnalysisOversampledDFTDesignPtr_swigregister = _modulated.AnalysisOversampledDFTDesignPtr_swigregister
AnalysisOversampledDFTDesignPtr_swigregister(AnalysisOversampledDFTDesignPtr)
class SynthesisOversampledDFTDesignPtr(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SynthesisOversampledDFTDesignPtr, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SynthesisOversampledDFTDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, h, M=256, m=4, r=1, v=1.0, wp=1.0, tau_T=-1):
this = _modulated.new_SynthesisOversampledDFTDesignPtr(h, M, m, r, v, wp, tau_T)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.SynthesisOversampledDFTDesignPtr___deref__(self)
__swig_destroy__ = _modulated.delete_SynthesisOversampledDFTDesignPtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.SynthesisOversampledDFTDesignPtr_design(self, tolerance)
def save(self, fileName):
return _modulated.SynthesisOversampledDFTDesignPtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.SynthesisOversampledDFTDesignPtr_calcError(self, doPrint)
SynthesisOversampledDFTDesignPtr_swigregister = _modulated.SynthesisOversampledDFTDesignPtr_swigregister
SynthesisOversampledDFTDesignPtr_swigregister(SynthesisOversampledDFTDesignPtr)
class AnalysisNyquistMDesignPtr(AnalysisOversampledDFTDesignPtr):
__swig_setmethods__ = {}
for _s in [AnalysisOversampledDFTDesignPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AnalysisNyquistMDesignPtr, name, value)
__swig_getmethods__ = {}
for _s in [AnalysisOversampledDFTDesignPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, AnalysisNyquistMDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, M=512, m=2, r=1, wp=1.0):
this = _modulated.new_AnalysisNyquistMDesignPtr(M, m, r, wp)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.AnalysisNyquistMDesignPtr___deref__(self)
__swig_destroy__ = _modulated.delete_AnalysisNyquistMDesignPtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.AnalysisNyquistMDesignPtr_design(self, tolerance)
def save(self, fileName):
return _modulated.AnalysisNyquistMDesignPtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.AnalysisNyquistMDesignPtr_calcError(self, doPrint)
AnalysisNyquistMDesignPtr_swigregister = _modulated.AnalysisNyquistMDesignPtr_swigregister
AnalysisNyquistMDesignPtr_swigregister(AnalysisNyquistMDesignPtr)
class SynthesisNyquistMDesignPtr(SynthesisOversampledDFTDesignPtr):
__swig_setmethods__ = {}
for _s in [SynthesisOversampledDFTDesignPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SynthesisNyquistMDesignPtr, name, value)
__swig_getmethods__ = {}
for _s in [SynthesisOversampledDFTDesignPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SynthesisNyquistMDesignPtr, name)
__repr__ = _swig_repr
def __init__(self, h, M=512, m=2, r=1, wp=1.0):
this = _modulated.new_SynthesisNyquistMDesignPtr(h, M, m, r, wp)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.SynthesisNyquistMDesignPtr___deref__(self)
__swig_destroy__ = _modulated.delete_SynthesisNyquistMDesignPtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.SynthesisNyquistMDesignPtr_design(self, tolerance)
def save(self, fileName):
return _modulated.SynthesisNyquistMDesignPtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.SynthesisNyquistMDesignPtr_calcError(self, doPrint)
SynthesisNyquistMDesignPtr_swigregister = _modulated.SynthesisNyquistMDesignPtr_swigregister
SynthesisNyquistMDesignPtr_swigregister(SynthesisNyquistMDesignPtr)
class SynthesisNyquistMDesignCompositeResponsePtr(SynthesisNyquistMDesignPtr):
__swig_setmethods__ = {}
for _s in [SynthesisNyquistMDesignPtr]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SynthesisNyquistMDesignCompositeResponsePtr, name, value)
__swig_getmethods__ = {}
for _s in [SynthesisNyquistMDesignPtr]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SynthesisNyquistMDesignCompositeResponsePtr, name)
__repr__ = _swig_repr
def __init__(self, h, M=512, m=2, r=1, wp=1.0):
this = _modulated.new_SynthesisNyquistMDesignCompositeResponsePtr(h, M, m, r, wp)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
return _modulated.SynthesisNyquistMDesignCompositeResponsePtr___deref__(self)
__swig_destroy__ = _modulated.delete_SynthesisNyquistMDesignCompositeResponsePtr
__del__ = lambda self: None
def design(self, tolerance=2.2204e-16):
return _modulated.SynthesisNyquistMDesignCompositeResponsePtr_design(self, tolerance)
def save(self, fileName):
return _modulated.SynthesisNyquistMDesignCompositeResponsePtr_save(self, fileName)
def calcError(self, doPrint=True):
return _modulated.SynthesisNyquistMDesignCompositeResponsePtr_calcError(self, doPrint)
SynthesisNyquistMDesignCompositeResponsePtr_swigregister = _modulated.SynthesisNyquistMDesignCompositeResponsePtr_swigregister
SynthesisNyquistMDesignCompositeResponsePtr_swigregister(SynthesisNyquistMDesignCompositeResponsePtr)
def get_window(winType, winLen):
return _modulated.get_window(winType, winLen)
get_window = _modulated.get_window
# This file is compatible with both classic and new-style classes.
| [
"_modulated.OverSampledDFTAnalysisBankPtr_name",
"_modulated.PerfectReconstructionFFTAnalysisBankPtr_current",
"_modulated.new_OverSampledDFTAnalysisBankPtr",
"_modulated.DelayFeaturePtr___deref__",
"_modulated.NormalFFTAnalysisBankPtr___iter__",
"_modulated.design_f",
"_modulated.PerfectReconstructionF... | [((16883, 16913), '_modulated.design_f', '_modulated.design_f', (['v', 'params'], {}), '(v, params)\n', (16902, 16913), False, 'import _modulated\n'), ((16987, 17022), '_modulated.design_df', '_modulated.design_df', (['v', 'params', 'df'], {}), '(v, params, df)\n', (17007, 17022), False, 'import _modulated\n'), ((17102, 17141), '_modulated.design_fdf', '_modulated.design_fdf', (['v', 'params', 'f', 'df'], {}), '(v, params, f, df)\n', (17123, 17141), False, 'import _modulated\n'), ((17232, 17280), '_modulated.write_gsl_format', '_modulated.write_gsl_format', (['fileName', 'prototype'], {}), '(fileName, prototype)\n', (17259, 17280), False, 'import _modulated\n'), ((24890, 24928), '_modulated.get_window', '_modulated.get_window', (['winType', 'winLen'], {}), '(winType, winLen)\n', (24911, 24928), False, 'import _modulated\n'), ((3657, 3713), '_modulated.new_NormalFFTAnalysisBankPtr', '_modulated.new_NormalFFTAnalysisBankPtr', (['*args'], {}), '(*args, **kwargs)\n', (3696, 3713), False, 'import _modulated\n'), ((3869, 3919), '_modulated.NormalFFTAnalysisBankPtr___iter__', '_modulated.NormalFFTAnalysisBankPtr___iter__', (['self'], {}), '(self)\n', (3913, 3919), False, 'import _modulated\n'), ((3961, 4012), '_modulated.NormalFFTAnalysisBankPtr___deref__', '_modulated.NormalFFTAnalysisBankPtr___deref__', (['self'], {}), '(self)\n', (4006, 4012), False, 'import _modulated\n'), ((4158, 4212), '_modulated.NormalFFTAnalysisBankPtr_next', '_modulated.NormalFFTAnalysisBankPtr_next', (['self', 'frameX'], {}), '(self, frameX)\n', (4198, 4212), False, 'import _modulated\n'), ((4250, 4297), '_modulated.NormalFFTAnalysisBankPtr_reset', '_modulated.NormalFFTAnalysisBankPtr_reset', (['self'], {}), '(self)\n', (4291, 4297), False, 'import _modulated\n'), ((4336, 4384), '_modulated.NormalFFTAnalysisBankPtr_fftlen', '_modulated.NormalFFTAnalysisBankPtr_fftlen', (['self'], {}), '(self)\n', (4378, 4384), False, 'import _modulated\n'), ((4421, 4467), '_modulated.NormalFFTAnalysisBankPtr_name', '_modulated.NormalFFTAnalysisBankPtr_name', (['self'], {}), '(self)\n', (4461, 4467), False, 'import _modulated\n'), ((4504, 4550), '_modulated.NormalFFTAnalysisBankPtr_size', '_modulated.NormalFFTAnalysisBankPtr_size', (['self'], {}), '(self)\n', (4544, 4550), False, 'import _modulated\n'), ((4590, 4639), '_modulated.NormalFFTAnalysisBankPtr_current', '_modulated.NormalFFTAnalysisBankPtr_current', (['self'], {}), '(self)\n', (4633, 4639), False, 'import _modulated\n'), ((5488, 5549), '_modulated.new_OverSampledDFTAnalysisBankPtr', '_modulated.new_OverSampledDFTAnalysisBankPtr', (['*args'], {}), '(*args, **kwargs)\n', (5532, 5549), False, 'import _modulated\n'), ((5705, 5760), '_modulated.OverSampledDFTAnalysisBankPtr___iter__', '_modulated.OverSampledDFTAnalysisBankPtr___iter__', (['self'], {}), '(self)\n', (5754, 5760), False, 'import _modulated\n'), ((5802, 5858), '_modulated.OverSampledDFTAnalysisBankPtr___deref__', '_modulated.OverSampledDFTAnalysisBankPtr___deref__', (['self'], {}), '(self)\n', (5852, 5858), False, 'import _modulated\n'), ((6009, 6071), '_modulated.OverSampledDFTAnalysisBankPtr_polyphase', '_modulated.OverSampledDFTAnalysisBankPtr_polyphase', (['self', 'm', 'n'], {}), '(self, m, n)\n', (6059, 6071), False, 'import _modulated\n'), ((6119, 6178), '_modulated.OverSampledDFTAnalysisBankPtr_next', '_modulated.OverSampledDFTAnalysisBankPtr_next', (['self', 'frameX'], {}), '(self, frameX)\n', (6164, 6178), False, 'import _modulated\n'), ((6216, 6268), '_modulated.OverSampledDFTAnalysisBankPtr_reset', '_modulated.OverSampledDFTAnalysisBankPtr_reset', (['self'], {}), '(self)\n', (6262, 6268), False, 'import _modulated\n'), ((6307, 6360), '_modulated.OverSampledDFTAnalysisBankPtr_is_end', '_modulated.OverSampledDFTAnalysisBankPtr_is_end', (['self'], {}), '(self)\n', (6354, 6360), False, 'import _modulated\n'), ((6399, 6452), '_modulated.OverSampledDFTAnalysisBankPtr_fftlen', '_modulated.OverSampledDFTAnalysisBankPtr_fftlen', (['self'], {}), '(self)\n', (6446, 6452), False, 'import _modulated\n'), ((6493, 6548), '_modulated.OverSampledDFTAnalysisBankPtr_shiftlen', '_modulated.OverSampledDFTAnalysisBankPtr_shiftlen', (['self'], {}), '(self)\n', (6542, 6548), False, 'import _modulated\n'), ((6589, 6644), '_modulated.OverSampledDFTAnalysisBankPtr_frame_no', '_modulated.OverSampledDFTAnalysisBankPtr_frame_no', (['self'], {}), '(self)\n', (6638, 6644), False, 'import _modulated\n'), ((6681, 6732), '_modulated.OverSampledDFTAnalysisBankPtr_name', '_modulated.OverSampledDFTAnalysisBankPtr_name', (['self'], {}), '(self)\n', (6726, 6732), False, 'import _modulated\n'), ((6769, 6820), '_modulated.OverSampledDFTAnalysisBankPtr_size', '_modulated.OverSampledDFTAnalysisBankPtr_size', (['self'], {}), '(self)\n', (6814, 6820), False, 'import _modulated\n'), ((6860, 6914), '_modulated.OverSampledDFTAnalysisBankPtr_current', '_modulated.OverSampledDFTAnalysisBankPtr_current', (['self'], {}), '(self)\n', (6908, 6914), False, 'import _modulated\n'), ((7780, 7842), '_modulated.new_OverSampledDFTSynthesisBankPtr', '_modulated.new_OverSampledDFTSynthesisBankPtr', (['*args'], {}), '(*args, **kwargs)\n', (7825, 7842), False, 'import _modulated\n'), ((7998, 8054), '_modulated.OverSampledDFTSynthesisBankPtr___iter__', '_modulated.OverSampledDFTSynthesisBankPtr___iter__', (['self'], {}), '(self)\n', (8048, 8054), False, 'import _modulated\n'), ((8096, 8153), '_modulated.OverSampledDFTSynthesisBankPtr___deref__', '_modulated.OverSampledDFTSynthesisBankPtr___deref__', (['self'], {}), '(self)\n', (8147, 8153), False, 'import _modulated\n'), ((8305, 8368), '_modulated.OverSampledDFTSynthesisBankPtr_polyphase', '_modulated.OverSampledDFTSynthesisBankPtr_polyphase', (['self', 'm', 'n'], {}), '(self, m, n)\n', (8356, 8368), False, 'import _modulated\n'), ((8416, 8476), '_modulated.OverSampledDFTSynthesisBankPtr_next', '_modulated.OverSampledDFTSynthesisBankPtr_next', (['self', 'frameX'], {}), '(self, frameX)\n', (8462, 8476), False, 'import _modulated\n'), ((8514, 8567), '_modulated.OverSampledDFTSynthesisBankPtr_reset', '_modulated.OverSampledDFTSynthesisBankPtr_reset', (['self'], {}), '(self)\n', (8561, 8567), False, 'import _modulated\n'), ((8626, 8700), '_modulated.OverSampledDFTSynthesisBankPtr_input_source_vector', '_modulated.OverSampledDFTSynthesisBankPtr_input_source_vector', (['self', 'block'], {}), '(self, block)\n', (8687, 8700), False, 'import _modulated\n'), ((8761, 8832), '_modulated.OverSampledDFTSynthesisBankPtr_no_stream_feature', '_modulated.OverSampledDFTSynthesisBankPtr_no_stream_feature', (['self', 'flag'], {}), '(self, flag)\n', (8820, 8832), False, 'import _modulated\n'), ((8869, 8921), '_modulated.OverSampledDFTSynthesisBankPtr_name', '_modulated.OverSampledDFTSynthesisBankPtr_name', (['self'], {}), '(self)\n', (8915, 8921), False, 'import _modulated\n'), ((8958, 9010), '_modulated.OverSampledDFTSynthesisBankPtr_size', '_modulated.OverSampledDFTSynthesisBankPtr_size', (['self'], {}), '(self)\n', (9004, 9010), False, 'import _modulated\n'), ((9049, 9103), '_modulated.OverSampledDFTSynthesisBankPtr_is_end', '_modulated.OverSampledDFTSynthesisBankPtr_is_end', (['self'], {}), '(self)\n', (9097, 9103), False, 'import _modulated\n'), ((9143, 9198), '_modulated.OverSampledDFTSynthesisBankPtr_current', '_modulated.OverSampledDFTSynthesisBankPtr_current', (['self'], {}), '(self)\n', (9192, 9198), False, 'import _modulated\n'), ((10101, 10172), '_modulated.new_PerfectReconstructionFFTAnalysisBankPtr', '_modulated.new_PerfectReconstructionFFTAnalysisBankPtr', (['*args'], {}), '(*args, **kwargs)\n', (10155, 10172), False, 'import _modulated\n'), ((10328, 10393), '_modulated.PerfectReconstructionFFTAnalysisBankPtr___iter__', '_modulated.PerfectReconstructionFFTAnalysisBankPtr___iter__', (['self'], {}), '(self)\n', (10387, 10393), False, 'import _modulated\n'), ((10435, 10501), '_modulated.PerfectReconstructionFFTAnalysisBankPtr___deref__', '_modulated.PerfectReconstructionFFTAnalysisBankPtr___deref__', (['self'], {}), '(self)\n', (10495, 10501), False, 'import _modulated\n'), ((10662, 10734), '_modulated.PerfectReconstructionFFTAnalysisBankPtr_polyphase', '_modulated.PerfectReconstructionFFTAnalysisBankPtr_polyphase', (['self', 'm', 'n'], {}), '(self, m, n)\n', (10722, 10734), False, 'import _modulated\n'), ((10782, 10851), '_modulated.PerfectReconstructionFFTAnalysisBankPtr_next', '_modulated.PerfectReconstructionFFTAnalysisBankPtr_next', (['self', 'frameX'], {}), '(self, frameX)\n', (10837, 10851), False, 'import _modulated\n'), ((10889, 10951), '_modulated.PerfectReconstructionFFTAnalysisBankPtr_reset', '_modulated.PerfectReconstructionFFTAnalysisBankPtr_reset', (['self'], {}), '(self)\n', (10945, 10951), False, 'import _modulated\n'), ((10988, 11049), '_modulated.PerfectReconstructionFFTAnalysisBankPtr_name', '_modulated.PerfectReconstructionFFTAnalysisBankPtr_name', (['self'], {}), '(self)\n', (11043, 11049), False, 'import _modulated\n'), ((11086, 11147), '_modulated.PerfectReconstructionFFTAnalysisBankPtr_size', '_modulated.PerfectReconstructionFFTAnalysisBankPtr_size', (['self'], {}), '(self)\n', (11141, 11147), False, 'import _modulated\n'), ((11187, 11251), '_modulated.PerfectReconstructionFFTAnalysisBankPtr_current', '_modulated.PerfectReconstructionFFTAnalysisBankPtr_current', (['self'], {}), '(self)\n', (11245, 11251), False, 'import _modulated\n'), ((12187, 12259), '_modulated.new_PerfectReconstructionFFTSynthesisBankPtr', '_modulated.new_PerfectReconstructionFFTSynthesisBankPtr', (['*args'], {}), '(*args, **kwargs)\n', (12242, 12259), False, 'import _modulated\n'), ((12415, 12481), '_modulated.PerfectReconstructionFFTSynthesisBankPtr___iter__', '_modulated.PerfectReconstructionFFTSynthesisBankPtr___iter__', (['self'], {}), '(self)\n', (12475, 12481), False, 'import _modulated\n'), ((12523, 12590), '_modulated.PerfectReconstructionFFTSynthesisBankPtr___deref__', '_modulated.PerfectReconstructionFFTSynthesisBankPtr___deref__', (['self'], {}), '(self)\n', (12584, 12590), False, 'import _modulated\n'), ((12752, 12822), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_next', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_next', (['self', 'frameX'], {}), '(self, frameX)\n', (12808, 12822), False, 'import _modulated\n'), ((12860, 12923), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_reset', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_reset', (['self'], {}), '(self)\n', (12917, 12923), False, 'import _modulated\n'), ((12971, 13044), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_polyphase', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_polyphase', (['self', 'm', 'n'], {}), '(self, m, n)\n', (13032, 13044), False, 'import _modulated\n'), ((13081, 13143), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_name', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_name', (['self'], {}), '(self)\n', (13137, 13143), False, 'import _modulated\n'), ((13180, 13242), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_size', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_size', (['self'], {}), '(self)\n', (13236, 13242), False, 'import _modulated\n'), ((13281, 13345), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_is_end', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_is_end', (['self'], {}), '(self)\n', (13339, 13345), False, 'import _modulated\n'), ((13385, 13450), '_modulated.PerfectReconstructionFFTSynthesisBankPtr_current', '_modulated.PerfectReconstructionFFTSynthesisBankPtr_current', (['self'], {}), '(self)\n', (13444, 13450), False, 'import _modulated\n'), ((14321, 14368), '_modulated.new_DelayFeaturePtr', '_modulated.new_DelayFeaturePtr', (['*args'], {}), '(*args, **kwargs)\n', (14351, 14368), False, 'import _modulated\n'), ((14524, 14565), '_modulated.DelayFeaturePtr___iter__', '_modulated.DelayFeaturePtr___iter__', (['self'], {}), '(self)\n', (14559, 14565), False, 'import _modulated\n'), ((14607, 14649), '_modulated.DelayFeaturePtr___deref__', '_modulated.DelayFeaturePtr___deref__', (['self'], {}), '(self)\n', (14643, 14649), False, 'import _modulated\n'), ((14797, 14856), '_modulated.DelayFeaturePtr_set_time_delay', '_modulated.DelayFeaturePtr_set_time_delay', (['self', 'time_delay'], {}), '(self, time_delay)\n', (14838, 14856), False, 'import _modulated\n'), ((14904, 14949), '_modulated.DelayFeaturePtr_next', '_modulated.DelayFeaturePtr_next', (['self', 'frameX'], {}), '(self, frameX)\n', (14935, 14949), False, 'import _modulated\n'), ((14987, 15025), '_modulated.DelayFeaturePtr_reset', '_modulated.DelayFeaturePtr_reset', (['self'], {}), '(self)\n', (15019, 15025), False, 'import _modulated\n'), ((15062, 15099), '_modulated.DelayFeaturePtr_name', '_modulated.DelayFeaturePtr_name', (['self'], {}), '(self)\n', (15093, 15099), False, 'import _modulated\n'), ((15136, 15173), '_modulated.DelayFeaturePtr_size', '_modulated.DelayFeaturePtr_size', (['self'], {}), '(self)\n', (15167, 15173), False, 'import _modulated\n'), ((15213, 15253), '_modulated.DelayFeaturePtr_current', '_modulated.DelayFeaturePtr_current', (['self'], {}), '(self)\n', (15247, 15253), False, 'import _modulated\n'), ((15770, 15825), '_modulated.new_CosineModulatedPrototypeDesign', '_modulated.new_CosineModulatedPrototypeDesign', (['M', 'N', 'fs'], {}), '(M, N, fs)\n', (15815, 15825), False, 'import _modulated\n'), ((16086, 16143), '_modulated.CosineModulatedPrototypeDesign_fcn', '_modulated.CosineModulatedPrototypeDesign_fcn', (['self', 'x', 'f'], {}), '(self, x, f)\n', (16131, 16143), False, 'import _modulated\n'), ((16186, 16244), '_modulated.CosineModulatedPrototypeDesign_grad', '_modulated.CosineModulatedPrototypeDesign_grad', (['self', 'x', 'g'], {}), '(self, x, g)\n', (16232, 16244), False, 'import _modulated\n'), ((16278, 16327), '_modulated.CosineModulatedPrototypeDesign_M', '_modulated.CosineModulatedPrototypeDesign_M', (['self'], {}), '(self)\n', (16321, 16327), False, 'import _modulated\n'), ((16361, 16410), '_modulated.CosineModulatedPrototypeDesign_N', '_modulated.CosineModulatedPrototypeDesign_N', (['self'], {}), '(self)\n', (16404, 16410), False, 'import _modulated\n'), ((16444, 16493), '_modulated.CosineModulatedPrototypeDesign_m', '_modulated.CosineModulatedPrototypeDesign_m', (['self'], {}), '(self)\n', (16487, 16493), False, 'import _modulated\n'), ((16527, 16576), '_modulated.CosineModulatedPrototypeDesign_J', '_modulated.CosineModulatedPrototypeDesign_J', (['self'], {}), '(self)\n', (16570, 16576), False, 'import _modulated\n'), ((16614, 16667), '_modulated.CosineModulatedPrototypeDesign_proto', '_modulated.CosineModulatedPrototypeDesign_proto', (['self'], {}), '(self)\n', (16661, 16667), False, 'import _modulated\n'), ((17741, 17807), '_modulated.new_AnalysisOversampledDFTDesignPtr', '_modulated.new_AnalysisOversampledDFTDesignPtr', (['M', 'm', 'r', 'wp', 'tau_h'], {}), '(M, m, r, wp, tau_h)\n', (17787, 17807), False, 'import _modulated\n'), ((17964, 18022), '_modulated.AnalysisOversampledDFTDesignPtr___deref__', '_modulated.AnalysisOversampledDFTDesignPtr___deref__', (['self'], {}), '(self)\n', (18016, 18022), False, 'import _modulated\n'), ((18188, 18254), '_modulated.AnalysisOversampledDFTDesignPtr_design', '_modulated.AnalysisOversampledDFTDesignPtr_design', (['self', 'tolerance'], {}), '(self, tolerance)\n', (18237, 18254), False, 'import _modulated\n'), ((18301, 18364), '_modulated.AnalysisOversampledDFTDesignPtr_save', '_modulated.AnalysisOversampledDFTDesignPtr_save', (['self', 'fileName'], {}), '(self, fileName)\n', (18348, 18364), False, 'import _modulated\n'), ((18420, 18487), '_modulated.AnalysisOversampledDFTDesignPtr_calcError', '_modulated.AnalysisOversampledDFTDesignPtr_calcError', (['self', 'doPrint'], {}), '(self, doPrint)\n', (18472, 18487), False, 'import _modulated\n'), ((19096, 19169), '_modulated.new_SynthesisOversampledDFTDesignPtr', '_modulated.new_SynthesisOversampledDFTDesignPtr', (['h', 'M', 'm', 'r', 'v', 'wp', 'tau_T'], {}), '(h, M, m, r, v, wp, tau_T)\n', (19143, 19169), False, 'import _modulated\n'), ((19326, 19385), '_modulated.SynthesisOversampledDFTDesignPtr___deref__', '_modulated.SynthesisOversampledDFTDesignPtr___deref__', (['self'], {}), '(self)\n', (19379, 19385), False, 'import _modulated\n'), ((19552, 19619), '_modulated.SynthesisOversampledDFTDesignPtr_design', '_modulated.SynthesisOversampledDFTDesignPtr_design', (['self', 'tolerance'], {}), '(self, tolerance)\n', (19602, 19619), False, 'import _modulated\n'), ((19666, 19730), '_modulated.SynthesisOversampledDFTDesignPtr_save', '_modulated.SynthesisOversampledDFTDesignPtr_save', (['self', 'fileName'], {}), '(self, fileName)\n', (19714, 19730), False, 'import _modulated\n'), ((19786, 19854), '_modulated.SynthesisOversampledDFTDesignPtr_calcError', '_modulated.SynthesisOversampledDFTDesignPtr_calcError', (['self', 'doPrint'], {}), '(self, doPrint)\n', (19839, 19854), False, 'import _modulated\n'), ((20698, 20751), '_modulated.new_AnalysisNyquistMDesignPtr', '_modulated.new_AnalysisNyquistMDesignPtr', (['M', 'm', 'r', 'wp'], {}), '(M, m, r, wp)\n', (20738, 20751), False, 'import _modulated\n'), ((20908, 20960), '_modulated.AnalysisNyquistMDesignPtr___deref__', '_modulated.AnalysisNyquistMDesignPtr___deref__', (['self'], {}), '(self)\n', (20954, 20960), False, 'import _modulated\n'), ((21120, 21180), '_modulated.AnalysisNyquistMDesignPtr_design', '_modulated.AnalysisNyquistMDesignPtr_design', (['self', 'tolerance'], {}), '(self, tolerance)\n', (21163, 21180), False, 'import _modulated\n'), ((21227, 21284), '_modulated.AnalysisNyquistMDesignPtr_save', '_modulated.AnalysisNyquistMDesignPtr_save', (['self', 'fileName'], {}), '(self, fileName)\n', (21268, 21284), False, 'import _modulated\n'), ((21340, 21401), '_modulated.AnalysisNyquistMDesignPtr_calcError', '_modulated.AnalysisNyquistMDesignPtr_calcError', (['self', 'doPrint'], {}), '(self, doPrint)\n', (21386, 21401), False, 'import _modulated\n'), ((22226, 22283), '_modulated.new_SynthesisNyquistMDesignPtr', '_modulated.new_SynthesisNyquistMDesignPtr', (['h', 'M', 'm', 'r', 'wp'], {}), '(h, M, m, r, wp)\n', (22267, 22283), False, 'import _modulated\n'), ((22440, 22493), '_modulated.SynthesisNyquistMDesignPtr___deref__', '_modulated.SynthesisNyquistMDesignPtr___deref__', (['self'], {}), '(self)\n', (22487, 22493), False, 'import _modulated\n'), ((22654, 22715), '_modulated.SynthesisNyquistMDesignPtr_design', '_modulated.SynthesisNyquistMDesignPtr_design', (['self', 'tolerance'], {}), '(self, tolerance)\n', (22698, 22715), False, 'import _modulated\n'), ((22762, 22820), '_modulated.SynthesisNyquistMDesignPtr_save', '_modulated.SynthesisNyquistMDesignPtr_save', (['self', 'fileName'], {}), '(self, fileName)\n', (22804, 22820), False, 'import _modulated\n'), ((22876, 22938), '_modulated.SynthesisNyquistMDesignPtr_calcError', '_modulated.SynthesisNyquistMDesignPtr_calcError', (['self', 'doPrint'], {}), '(self, doPrint)\n', (22923, 22938), False, 'import _modulated\n'), ((23800, 23874), '_modulated.new_SynthesisNyquistMDesignCompositeResponsePtr', '_modulated.new_SynthesisNyquistMDesignCompositeResponsePtr', (['h', 'M', 'm', 'r', 'wp'], {}), '(h, M, m, r, wp)\n', (23858, 23874), False, 'import _modulated\n'), ((24031, 24101), '_modulated.SynthesisNyquistMDesignCompositeResponsePtr___deref__', '_modulated.SynthesisNyquistMDesignCompositeResponsePtr___deref__', (['self'], {}), '(self)\n', (24095, 24101), False, 'import _modulated\n'), ((24279, 24357), '_modulated.SynthesisNyquistMDesignCompositeResponsePtr_design', '_modulated.SynthesisNyquistMDesignCompositeResponsePtr_design', (['self', 'tolerance'], {}), '(self, tolerance)\n', (24340, 24357), False, 'import _modulated\n'), ((24404, 24479), '_modulated.SynthesisNyquistMDesignCompositeResponsePtr_save', '_modulated.SynthesisNyquistMDesignCompositeResponsePtr_save', (['self', 'fileName'], {}), '(self, fileName)\n', (24463, 24479), False, 'import _modulated\n'), ((24535, 24614), '_modulated.SynthesisNyquistMDesignCompositeResponsePtr_calcError', '_modulated.SynthesisNyquistMDesignCompositeResponsePtr_calcError', (['self', 'doPrint'], {}), '(self, doPrint)\n', (24599, 24614), False, 'import _modulated\n'), ((492, 522), 'importlib.import_module', 'importlib.import_module', (['mname'], {}), '(mname)\n', (515, 522), False, 'import importlib\n'), ((570, 607), 'importlib.import_module', 'importlib.import_module', (['"""_modulated"""'], {}), "('_modulated')\n", (593, 607), False, 'import importlib\n'), ((1045, 1101), 'imp.load_module', 'imp.load_module', (['"""_modulated"""', 'fp', 'pathname', 'description'], {}), "('_modulated', fp, pathname, description)\n", (1060, 1101), False, 'import imp\n'), ((905, 922), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (912, 922), False, 'from os.path import dirname\n')] |
"""
SunPy
=====
An open-source Python library for Solar Physics data analysis.
Web Links
---------
Homepage: http://sunpy.org
Documentation: http://docs.sunpy.org/en/stable/
"""
from __future__ import absolute_import
try:
from .version import version as __version__
except ImportError:
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
__githash__ = ''
import os
from sunpy.util.config import load_config, print_config
from sunpy.util import system_info
from sunpy.tests.runner import SunPyTestRunner
self_test = SunPyTestRunner.make_test_runner_in(os.path.dirname(__file__))
# Load user configuration
config = load_config()
__all__ = ['config', 'self_test', 'system_info']
| [
"os.path.dirname",
"sunpy.util.config.load_config"
] | [((670, 683), 'sunpy.util.config.load_config', 'load_config', ([], {}), '()\n', (681, 683), False, 'from sunpy.util.config import load_config, print_config\n'), ((607, 632), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (622, 632), False, 'import os\n')] |
import classy.datasets
from .Struct import Struct
import numpy as np
from numpy import sqrt,sum,exp,pi,min,max,linspace
def normal(x,mu,sd):
return 1.0/sqrt(2*pi*sd**2)*exp(-(x-mu)**2/(2*sd**2))
def overlap(means_,covars_):
# http://en.wikipedia.org/wiki/Bhattacharyya_distance
# overlap is a dot product
s1,s2=covars_
m1,m2=means_
minx=min([m1-4*s1,m2-4*s2])
maxx=min([m1+4*s1,m2+4*s2])
x=linspace(minx,maxx,1000)
dx=x[1]-x[0]
BC=sum(dx*sqrt(normal(x,m1,s1)*normal(x,m2,s2)))
return BC
def GMM_features_from_1D_vectors2(origdata,number_of_gaussians_list,verbose=True):
from sklearn.mixture import GMM
data=Struct(origdata)
data.vectors=[]
data.feature_names=[]
for M in number_of_gaussians_list:
for G in range(M):
data.feature_names+=['M%d mu%d' % (M,G+1),'M%d sd%d' % (M,G+1)]
for X in origdata.vectors:
vec=[]
for M in number_of_gaussians_list:
model = GMM(M).fit(X)
means=model.means_.ravel()
stddevs=model.covars_.ravel()
for m,s in zip(means,stddevs):
vec.append(m)
vec.append(s)
data.vectors.append(vec)
data.vectors=np.array(data.vectors)
if verbose:
classy.datasets.summary(data)
return data
def GMM_features_from_1D_vectors(origdata,number_of_gaussians,verbose=True):
from sklearn.mixture import GMM
data=Struct(origdata)
data.vectors=[]
data.feature_names=[]
for i in range(number_of_gaussians):
data.feature_names+=['mu%d' % (i+1),'sd%d' % (i+1)]
L=number_of_gaussians
for i in range(L):
for j in range(i+1,L):
data.feature_names+=['overlap %d-%d' % (i+1,j+1)]
for X in origdata.vectors:
model = GMM(number_of_gaussians).fit(X)
means=model.means_.ravel()
stddevs=model.covars_.ravel()
vec=[]
for m,s in zip(means,stddevs):
vec.append(m)
vec.append(s)
L=number_of_gaussians
for i in range(L):
for j in range(i+1,L):
vec.append(overlap([means[i],means[j]],[stddevs[i],stddevs[j]]))
data.vectors.append(vec)
data.vectors=np.array(data.vectors)
if verbose:
classy.datasets.summary(data)
return data
| [
"numpy.sqrt",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.min",
"sklearn.mixture.GMM"
] | [((365, 396), 'numpy.min', 'min', (['[m1 - 4 * s1, m2 - 4 * s2]'], {}), '([m1 - 4 * s1, m2 - 4 * s2])\n', (368, 396), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((397, 428), 'numpy.min', 'min', (['[m1 + 4 * s1, m2 + 4 * s2]'], {}), '([m1 + 4 * s1, m2 + 4 * s2])\n', (400, 428), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((431, 457), 'numpy.linspace', 'linspace', (['minx', 'maxx', '(1000)'], {}), '(minx, maxx, 1000)\n', (439, 457), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((1276, 1298), 'numpy.array', 'np.array', (['data.vectors'], {}), '(data.vectors)\n', (1284, 1298), True, 'import numpy as np\n'), ((2329, 2351), 'numpy.array', 'np.array', (['data.vectors'], {}), '(data.vectors)\n', (2337, 2351), True, 'import numpy as np\n'), ((174, 209), 'numpy.exp', 'exp', (['(-(x - mu) ** 2 / (2 * sd ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sd ** 2))\n', (177, 209), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((157, 179), 'numpy.sqrt', 'sqrt', (['(2 * pi * sd ** 2)'], {}), '(2 * pi * sd ** 2)\n', (161, 179), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((1871, 1895), 'sklearn.mixture.GMM', 'GMM', (['number_of_gaussians'], {}), '(number_of_gaussians)\n', (1874, 1895), False, 'from sklearn.mixture import GMM\n'), ((1013, 1019), 'sklearn.mixture.GMM', 'GMM', (['M'], {}), '(M)\n', (1016, 1019), False, 'from sklearn.mixture import GMM\n')] |
"""
Setup script for the Gimbal package
"""
from setuptools import setup
from setuptools import find_packages
def readme():
"""Returns the contents of the README without the header image."""
header = '======\nGimbal\n======\n'
with open('README.rst', 'r') as f:
f.readline()
return header + f.read()
def requirements():
"""Returns the requirement list."""
with open('requirements.txt', 'r') as f:
return [line.strip() for line in f.readlines()]
# read the current version number
exec(open('gimbal/_version.py').read())
setup(
name='gimbal',
version=__version__,
description=('Tools for importing, creating, editing and querying ' +
'molecular geometries'),
long_description=readme(),
long_description_content_type='text/x-rst',
keywords='gimbal molecule geometry displacement transformation 3D',
url='https://github.com/ryjmacdonell/gimbal',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
scripts=['bin/convgeom', 'bin/measure', 'bin/nudge', 'bin/subst'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Chemistry'
],
install_requires=requirements()
)
| [
"setuptools.find_packages"
] | [((1020, 1035), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1033, 1035), False, 'from setuptools import find_packages\n')] |
import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
import seaborn as sns
def plot_conservation(out_path):
"""
Plotting the fraction of conserved binding sites for Brn2, Ebf2 and
Onecut2, based on multiGPS and edgeR results from Aydin et al., 2019
(Nature Neurosciece: PMID 31086315)
Parameters:
out_path: Filepath prefix for output bar plots (Manuscript Fig. 6A)
Returns: None
"""
# Defining the dataFrames using multiGPS and edgeR results \
# from Aydin et al., (2019) Nat. Neuroscience.
# Brn2
brn2 = pd.DataFrame([['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]],
columns=['category', '#'])
brn2['#'] = brn2['#']/np.sum(brn2['#'])
# Ebf2
ebf2 = pd.DataFrame([['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]],
columns=['category', '#'])
ebf2['#'] = ebf2['#']/np.sum(ebf2['#'])
# Onecut2
onecut2 = pd.DataFrame([['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]],
columns=['category', '#'])
onecut2['#'] = onecut2['#']/np.sum(onecut2['#'])
# plot bar plots
sns.set_style('ticks')
fig, ax = plt.subplots()
plt.subplot(1, 3, 1)
plt.bar([0, 1, 2], onecut2['#'], width=0.5, color='#687466')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
plt.subplot(1, 3, 2)
plt.bar([0, 1, 2], brn2['#'], width=0.5, color='#cd8d7b')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
plt.subplot(1, 3, 3)
plt.bar([0, 1, 2], ebf2['#'], width=0.5, color='#fbc490')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
sns.despine()
fig.tight_layout()
fig.set_size_inches(6, 4)
plt.savefig(out_path + 'Fig_6a.pdf')
def plot_embeddings(data_path, outpath):
"""
Plot 2-D latent embeddings for Brn2, Ebf2 and Onecut2.
Parameters:
data_path: Input file paths (N rows * 2 columns) storing the 2-D co-ordinates
for each binding site in the latent space. The embeddings must be derived
using latent_embeddings/get_latent_embeddings.py
Note: This function assumes that the files are saved with an \
".embedding.txt" extension. Provide only the prefix as an argument.
For example, if the 2-D embedding is stored in "~/Prefix/Oct4.embedding.txt",
call function as: plot_embeddings("~/Prefix/Oct4")
outpath: Output file path.
Returns: None
"""
transcription_factors = ['Brn2', 'Ebf2', 'Onecut2']
for tf in transcription_factors:
dat = np.loadtxt(data_path + tf + '.embedding.txt')
plt.scatter(dat[:, 0], dat[:, 1], s=3, alpha=0.3)
plt.savefig(outpath)
def plot_correlation(data_path, outpath):
"""
Plotting the correlation between ATAC-seq data at individual sites and the
associated chromatin sub-network (Bichrom-CHR) scores.
Parameters:
data_path: Prefix for the ".bound.chromtracks.npy" file. This file stores the
chromatin data at each binding site.
outpath: Output file path.
Returns: None
"""
sns.set_style('whitegrid')
fig, axs = plt.subplots()
for idx, tf in enumerate(['Onecut2', 'Brn2', 'Ebf2']):
# load chromatin data
chrom_data = np.load(data_path + tf + '.bound.chromtracks.npy')
chrom_sum = np.sum(chrom_data, axis=1)
# load scores
embedding = np.loadtxt(data_path + tf + '.embedding.txt')
chrom_score = embedding[:, 1]
plt.subplot(1, 3, idx+1)
plt.scatter(chrom_sum, chrom_score, color='#084177', s=1,
alpha=0.05)
fig.set_size_inches(6, 2)
plt.subplots_adjust(left=0.1, bottom=0.2, right=0.95, top=0.95)
plt.savefig(outpath + 'fig_b.png', dpi=960, layout='tight')
def plot_motif_heatmaps(out_path):
"""
Run MEME-ChIP & FIMO to get the number of motifs enriched at \
chromatin predicted (CP) and sequence predicted (SP) sites.
Parameters:
out_path: Output file path
"""
# Brn2
fig, ax = plt.subplots()
brn2 =np.array([[919.0, 320], [999, 305], [318, 717], [142, 1769], [72, 612]])
brn2[:, 0] = brn2[:, 0]/933.0 # Total # of sites: 933
brn2[:, 1] = brn2[:, 1]/1055.0 # Total # of sites: 1055
sns.heatmap(brn2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,
linewidths=5.3, linecolor='white')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.95)
fig.set_size_inches(2, 3)
plt.savefig(out_path + 'fig_c1.pdf')
# Ebf2
fig, ax = plt.subplots()
ebf2 = np.array([[3146.0, 700], [2922, 1864], [3544, 1228], [1865, 6496],
[2882, 2124], [104, 1214]])
ebf2[:, 0] = ebf2[:, 0] / 4146.0 # Total # of sites: 4146
ebf2[:, 1] = ebf2[:, 1] / 3469.0 # Total # of sites: 3469
sns.heatmap(ebf2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,
linewidths=5.3, linecolor='white')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.95)
fig.set_size_inches(2, 3)
plt.savefig(out_path + 'fig_c2.pdf')
# Onecut2
fig, ax = plt.subplots()
oc2 =np.array([[1055.0, 6234], [3637, 542], [5227, 1245], [1282, 10372],
[1266, 10067]])
oc2[:, 0] = oc2[:, 0]/5771.0 # Total # of sites: 5771
oc2[:, 1] = oc2[:, 1]/4627.0 # Total # of sites: 4627
sns.heatmap(oc2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,
linewidths=5.3, linecolor='white')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.95)
fig.set_size_inches(2, 3)
plt.savefig(out_path + 'fig_c3.pdf')
def plot_ebo_boxplots(data_path, outpath):
"""
Plot violin plots (manuscript figure 6) for the iAscl1 TFs.
Parameters:
metrics_path: Path the directory which contains TF.iA.summary files
For example, the GATA summary file looks as follows:
...
bichrom, GATA, 0.49097278959035834
bichrom, GATA, 0.515491844830841
bichrom, GATA, 0.572293273059536
bichrom, GATA, 0.4909197931794813
bichrom, GATA, 0.519433898153947
seq, GATA, 0.40140515853838615
seq, GATA, 0.4071458624248806
seq, GATA, 0.4944029049796368
seq, GATA, 0.3942885914448734
seq, GATA, 0.4207938581419808
...
Note that seq refers to a sequence-only model.
outpath: Output file path.
Returns:
None
"""
sns.set_style('darkgrid')
fig, ax = plt.subplots()
for idx, tf in enumerate(['Brn2', 'Ebf2', 'Onecut2']):
dat = pd.read_csv(data_path + tf + '.iA.summary', sep=',', header=None,
names=['condition', 'tf', 'auprc'])
plt.subplot(1, 3, idx+1)
sns.violinplot(x=dat['condition'], y=dat['auprc'],
palette=('#ecce6d', '#5b8c85'),
order=['seq', 'bichrom'], cut=0)
plt.ylim(0, 1)
plt.xlabel("")
plt.ylabel("")
fig.set_size_inches(6, 3)
plt.savefig(data_path + 'violinplots.pdf')
if __name__ == "__main__":
out_path = sys.argv[1]
data_path = sys.argv[2]
plot_conservation(out_path)
plot_embeddings(data_path=data_path, outpath=out_path)
plot_correlation(data_path=data_path, outpath=out_path)
plot_motif_heatmaps(out_path=out_path)
plot_ebo_boxplots(data_path=data_path, outpath=out_path)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.array",
"seaborn.violinplot",
"seaborn.despine",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"seaborn... | [((589, 687), 'pandas.DataFrame', 'pd.DataFrame', (["[['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]]"], {'columns': "['category', '#']"}), "([['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]], columns=\n ['category', '#'])\n", (601, 687), True, 'import pandas as pd\n'), ((774, 873), 'pandas.DataFrame', 'pd.DataFrame', (["[['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]]"], {'columns': "['category', '#']"}), "([['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]],\n columns=['category', '#'])\n", (786, 873), True, 'import pandas as pd\n'), ((967, 1066), 'pandas.DataFrame', 'pd.DataFrame', (["[['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]]"], {'columns': "['category', '#']"}), "([['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]], columns\n =['category', '#'])\n", (979, 1066), True, 'import pandas as pd\n'), ((1168, 1190), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (1181, 1190), True, 'import seaborn as sns\n'), ((1205, 1219), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1217, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1244), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1235, 1244), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1309), 'matplotlib.pyplot.bar', 'plt.bar', (['[0, 1, 2]', "onecut2['#']"], {'width': '(0.5)', 'color': '"""#687466"""'}), "([0, 1, 2], onecut2['#'], width=0.5, color='#687466')\n", (1256, 1309), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1337), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1324, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1356), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1350, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1387), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1378, 1387), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1449), 'matplotlib.pyplot.bar', 'plt.bar', (['[0, 1, 2]', "brn2['#']"], {'width': '(0.5)', 'color': '"""#cd8d7b"""'}), "([0, 1, 2], brn2['#'], width=0.5, color='#cd8d7b')\n", (1399, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1454, 1477), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1464, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1496), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1490, 1496), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1527), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1518, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1589), 'matplotlib.pyplot.bar', 'plt.bar', (['[0, 1, 2]', "ebf2['#']"], {'width': '(0.5)', 'color': '"""#fbc490"""'}), "([0, 1, 2], ebf2['#'], width=0.5, color='#fbc490')\n", (1539, 1589), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1617), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1604, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1636), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1630, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1660), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1658, 1660), True, 'import seaborn as sns\n'), ((1718, 1754), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'Fig_6a.pdf')"], {}), "(out_path + 'Fig_6a.pdf')\n", (1729, 1754), True, 'import matplotlib.pyplot as plt\n'), ((3102, 3128), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (3115, 3128), True, 'import seaborn as sns\n'), ((3144, 3158), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3156, 3158), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3722), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.2)', 'right': '(0.95)', 'top': '(0.95)'}), '(left=0.1, bottom=0.2, right=0.95, top=0.95)\n', (3678, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3786), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outpath + 'fig_b.png')"], {'dpi': '(960)', 'layout': '"""tight"""'}), "(outpath + 'fig_b.png', dpi=960, layout='tight')\n", (3738, 3786), True, 'import matplotlib.pyplot as plt\n'), ((4047, 4061), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4144), 'numpy.array', 'np.array', (['[[919.0, 320], [999, 305], [318, 717], [142, 1769], [72, 612]]'], {}), '([[919.0, 320], [999, 305], [318, 717], [142, 1769], [72, 612]])\n', (4080, 4144), True, 'import numpy as np\n'), ((4270, 4377), 'seaborn.heatmap', 'sns.heatmap', (['brn2'], {'cmap': '"""bone_r"""', 'cbar_kws': "{'shrink': 0.5}", 'vmax': '(1.5)', 'linewidths': '(5.3)', 'linecolor': '"""white"""'}), "(brn2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,\n linewidths=5.3, linecolor='white')\n", (4281, 4377), True, 'import seaborn as sns\n'), ((4394, 4458), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.95)'}), '(left=0.15, bottom=0.1, right=0.85, top=0.95)\n', (4413, 4458), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4529), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'fig_c1.pdf')"], {}), "(out_path + 'fig_c1.pdf')\n", (4504, 4529), True, 'import matplotlib.pyplot as plt\n'), ((4556, 4570), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4568, 4570), True, 'import matplotlib.pyplot as plt\n'), ((4582, 4681), 'numpy.array', 'np.array', (['[[3146.0, 700], [2922, 1864], [3544, 1228], [1865, 6496], [2882, 2124], [\n 104, 1214]]'], {}), '([[3146.0, 700], [2922, 1864], [3544, 1228], [1865, 6496], [2882, \n 2124], [104, 1214]])\n', (4590, 4681), True, 'import numpy as np\n'), ((4827, 4934), 'seaborn.heatmap', 'sns.heatmap', (['ebf2'], {'cmap': '"""bone_r"""', 'cbar_kws': "{'shrink': 0.5}", 'vmax': '(1.5)', 'linewidths': '(5.3)', 'linecolor': '"""white"""'}), "(ebf2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,\n linewidths=5.3, linecolor='white')\n", (4838, 4934), True, 'import seaborn as sns\n'), ((4951, 5015), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.95)'}), '(left=0.15, bottom=0.1, right=0.85, top=0.95)\n', (4970, 5015), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5086), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'fig_c2.pdf')"], {}), "(out_path + 'fig_c2.pdf')\n", (5061, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5116, 5130), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5128, 5130), True, 'import matplotlib.pyplot as plt\n'), ((5140, 5228), 'numpy.array', 'np.array', (['[[1055.0, 6234], [3637, 542], [5227, 1245], [1282, 10372], [1266, 10067]]'], {}), '([[1055.0, 6234], [3637, 542], [5227, 1245], [1282, 10372], [1266, \n 10067]])\n', (5148, 5228), True, 'import numpy as np\n'), ((5364, 5470), 'seaborn.heatmap', 'sns.heatmap', (['oc2'], {'cmap': '"""bone_r"""', 'cbar_kws': "{'shrink': 0.5}", 'vmax': '(1.5)', 'linewidths': '(5.3)', 'linecolor': '"""white"""'}), "(oc2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,\n linewidths=5.3, linecolor='white')\n", (5375, 5470), True, 'import seaborn as sns\n'), ((5487, 5551), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.95)'}), '(left=0.15, bottom=0.1, right=0.85, top=0.95)\n', (5506, 5551), True, 'import matplotlib.pyplot as plt\n'), ((5586, 5622), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'fig_c3.pdf')"], {}), "(out_path + 'fig_c3.pdf')\n", (5597, 5622), True, 'import matplotlib.pyplot as plt\n'), ((6444, 6469), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (6457, 6469), True, 'import seaborn as sns\n'), ((6484, 6498), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6496, 6498), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7048), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(data_path + 'violinplots.pdf')"], {}), "(data_path + 'violinplots.pdf')\n", (7017, 7048), True, 'import matplotlib.pyplot as plt\n'), ((733, 750), 'numpy.sum', 'np.sum', (["brn2['#']"], {}), "(brn2['#'])\n", (739, 750), True, 'import numpy as np\n'), ((920, 937), 'numpy.sum', 'np.sum', (["ebf2['#']"], {}), "(ebf2['#'])\n", (926, 937), True, 'import numpy as np\n'), ((1121, 1141), 'numpy.sum', 'np.sum', (["onecut2['#']"], {}), "(onecut2['#'])\n", (1127, 1141), True, 'import numpy as np\n'), ((2567, 2612), 'numpy.loadtxt', 'np.loadtxt', (["(data_path + tf + '.embedding.txt')"], {}), "(data_path + tf + '.embedding.txt')\n", (2577, 2612), True, 'import numpy as np\n'), ((2621, 2670), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dat[:, 0]', 'dat[:, 1]'], {'s': '(3)', 'alpha': '(0.3)'}), '(dat[:, 0], dat[:, 1], s=3, alpha=0.3)\n', (2632, 2670), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2699), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outpath'], {}), '(outpath)\n', (2690, 2699), True, 'import matplotlib.pyplot as plt\n'), ((3270, 3320), 'numpy.load', 'np.load', (["(data_path + tf + '.bound.chromtracks.npy')"], {}), "(data_path + tf + '.bound.chromtracks.npy')\n", (3277, 3320), True, 'import numpy as np\n'), ((3341, 3367), 'numpy.sum', 'np.sum', (['chrom_data'], {'axis': '(1)'}), '(chrom_data, axis=1)\n', (3347, 3367), True, 'import numpy as np\n'), ((3410, 3455), 'numpy.loadtxt', 'np.loadtxt', (["(data_path + tf + '.embedding.txt')"], {}), "(data_path + tf + '.embedding.txt')\n", (3420, 3455), True, 'import numpy as np\n'), ((3502, 3528), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(idx + 1)'], {}), '(1, 3, idx + 1)\n', (3513, 3528), True, 'import matplotlib.pyplot as plt\n'), ((3535, 3604), 'matplotlib.pyplot.scatter', 'plt.scatter', (['chrom_sum', 'chrom_score'], {'color': '"""#084177"""', 's': '(1)', 'alpha': '(0.05)'}), "(chrom_sum, chrom_score, color='#084177', s=1, alpha=0.05)\n", (3546, 3604), True, 'import matplotlib.pyplot as plt\n'), ((6572, 6678), 'pandas.read_csv', 'pd.read_csv', (["(data_path + tf + '.iA.summary')"], {'sep': '""","""', 'header': 'None', 'names': "['condition', 'tf', 'auprc']"}), "(data_path + tf + '.iA.summary', sep=',', header=None, names=[\n 'condition', 'tf', 'auprc'])\n", (6583, 6678), True, 'import pandas as pd\n'), ((6708, 6734), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(idx + 1)'], {}), '(1, 3, idx + 1)\n', (6719, 6734), True, 'import matplotlib.pyplot as plt\n'), ((6741, 6860), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': "dat['condition']", 'y': "dat['auprc']", 'palette': "('#ecce6d', '#5b8c85')", 'order': "['seq', 'bichrom']", 'cut': '(0)'}), "(x=dat['condition'], y=dat['auprc'], palette=('#ecce6d',\n '#5b8c85'), order=['seq', 'bichrom'], cut=0)\n", (6755, 6860), True, 'import seaborn as sns\n'), ((6911, 6925), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (6919, 6925), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6948), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (6944, 6948), True, 'import matplotlib.pyplot as plt\n'), ((6957, 6971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (6967, 6971), True, 'import matplotlib.pyplot as plt\n')] |
import pyperclip
import unittest
from credential import Credential
class TestCredentials(unittest.TestCase):
def setUp(self):
'''
function to run before each test
'''
self.new_credential=Credential("Instagram","mimoh","<PASSWORD>")
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_credential.account_name, "Instagram")
self.assertEqual(self.new_credential.username,"mimoh")
self.assertEqual(self.new_credential.account_password, "<PASSWORD>")
def test_save_credential(self):
'''
test_save_credential test case to test if the credential object is saved into
the credential list
'''
self.new_credential.save_credential()
self.assertEqual( len(Credential.credential_list), 1)
def tearDown(self):
'''
clean up after each test to prevent errors
'''
Credential.credential_list=[]
def test_save_multiple_credentials(self):
self.new_credential.save_credential()
test_credential=Credential("Twitter","ruth","cheliR")
test_credential.save_credential()
self.assertEqual( len(Credential.credential_list), 2)
def test_confirm_credential_exists(self):
'''
confirm that credentials exists
'''
self.new_credential.save_credential()
test_credential = Credential("Twitter", "ruth","cheliR")
test_credential.save_credential()
credential_exists = Credential.credential_exists("Twitter")
self.assertTrue(credential_exists)
def test_display_credentials(self):
'''
test if credentials can be displayed
'''
self.assertEqual(Credential.display_credentials(), Credential.credential_list)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"credential.Credential.credential_exists",
"credential.Credential",
"credential.Credential.display_credentials"
] | [((1904, 1919), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1917, 1919), False, 'import unittest\n'), ((223, 269), 'credential.Credential', 'Credential', (['"""Instagram"""', '"""mimoh"""', '"""<PASSWORD>"""'], {}), "('Instagram', 'mimoh', '<PASSWORD>')\n", (233, 269), False, 'from credential import Credential\n'), ((1155, 1194), 'credential.Credential', 'Credential', (['"""Twitter"""', '"""ruth"""', '"""cheliR"""'], {}), "('Twitter', 'ruth', 'cheliR')\n", (1165, 1194), False, 'from credential import Credential\n'), ((1482, 1521), 'credential.Credential', 'Credential', (['"""Twitter"""', '"""ruth"""', '"""cheliR"""'], {}), "('Twitter', 'ruth', 'cheliR')\n", (1492, 1521), False, 'from credential import Credential\n'), ((1591, 1630), 'credential.Credential.credential_exists', 'Credential.credential_exists', (['"""Twitter"""'], {}), "('Twitter')\n", (1619, 1630), False, 'from credential import Credential\n'), ((1811, 1843), 'credential.Credential.display_credentials', 'Credential.display_credentials', ([], {}), '()\n', (1841, 1843), False, 'from credential import Credential\n')] |
from kektrade.exceptions import UnsupportedExchange
from kektrade.exchange.backtest_inverse import BacktestInverse
from kektrade.exchange.backtest_linear import BacktestLinear
from kektrade.exchange.dryrun_inverse import DryrunInverse
from kektrade.exchange.dryrun_linear import DryrunLinear
from kektrade.misc import EnumString
class ExchangeEndpoint(EnumString):
BinanceSpot = 'binance_spot'
BinanceFutures = 'binance_futures'
BinanceFuturesCoin = 'binance_futures_coin'
BybitFutures = 'bybit_futures'
BybitFuturesInverse = 'bybit_futures_inverse'
BacktestLinear = 'backtest_linear'
BacktestInverse = 'backtest_inverse'
DryrunLinear = "dryrun_linear"
DryrunInverse = "dryrun_inverse"
class ExchangeResolver():
@classmethod
def load_exchange(cls, exchange_name: ExchangeEndpoint):
exchange = ExchangeEndpoint.from_str(exchange_name)
if exchange == ExchangeEndpoint.BacktestInverse:
return BacktestInverse()
elif exchange == ExchangeEndpoint.BacktestLinear:
return BacktestLinear()
elif exchange == ExchangeEndpoint.DryrunLinear:
return DryrunLinear()
elif exchange == ExchangeEndpoint.DryrunInverse:
return DryrunInverse()
else:
raise UnsupportedExchange()
| [
"kektrade.exchange.backtest_linear.BacktestLinear",
"kektrade.exceptions.UnsupportedExchange",
"kektrade.exchange.dryrun_inverse.DryrunInverse",
"kektrade.exchange.dryrun_linear.DryrunLinear",
"kektrade.exchange.backtest_inverse.BacktestInverse"
] | [((970, 987), 'kektrade.exchange.backtest_inverse.BacktestInverse', 'BacktestInverse', ([], {}), '()\n', (985, 987), False, 'from kektrade.exchange.backtest_inverse import BacktestInverse\n'), ((1065, 1081), 'kektrade.exchange.backtest_linear.BacktestLinear', 'BacktestLinear', ([], {}), '()\n', (1079, 1081), False, 'from kektrade.exchange.backtest_linear import BacktestLinear\n'), ((1157, 1171), 'kektrade.exchange.dryrun_linear.DryrunLinear', 'DryrunLinear', ([], {}), '()\n', (1169, 1171), False, 'from kektrade.exchange.dryrun_linear import DryrunLinear\n'), ((1248, 1263), 'kektrade.exchange.dryrun_inverse.DryrunInverse', 'DryrunInverse', ([], {}), '()\n', (1261, 1263), False, 'from kektrade.exchange.dryrun_inverse import DryrunInverse\n'), ((1296, 1317), 'kektrade.exceptions.UnsupportedExchange', 'UnsupportedExchange', ([], {}), '()\n', (1315, 1317), False, 'from kektrade.exceptions import UnsupportedExchange\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 14:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0010_auto_20160203_1406'),
]
operations = [
migrations.RenameField(
model_name='resource',
old_name='event_date',
new_name='event_time',
),
]
| [
"django.db.migrations.RenameField"
] | [((287, 382), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""resource"""', 'old_name': '"""event_date"""', 'new_name': '"""event_time"""'}), "(model_name='resource', old_name='event_date',\n new_name='event_time')\n", (309, 382), False, 'from django.db import migrations\n')] |
"""
refer to https://github.com/jfzhang95/pytorch-deeplab-xception/blob/master/utils/metrics.py
"""
import numpy as np
__all__ = ['SegmentationMetric']
"""
confusionMetric
P\L P N
P TP FP
N FN TN
"""
class SegmentationMetric(object):
def __init__(self, numClass):
self.numClass = numClass
self.confusionMatrix = np.zeros((self.numClass,) * 2)
def pixelAccuracy(self):
# return all class overall pixel accuracy
# acc = (TP + TN) / (TP + TN + FP + TN)
acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()
return acc
def classPixelAccuracy(self):
# return each category pixel accuracy(A more accurate way to call it precision)
# acc = (TP) / TP + FP
classAcc = np.diag(self.confusionMatrix) / self.confusionMatrix.sum(axis=1)
return classAcc
def meanPixelAccuracy(self):
classAcc = self.classPixelAccuracy()
meanAcc = np.nanmean(classAcc)
return meanAcc
def meanIntersectionOverUnion(self):
# Intersection = TP Union = TP + FP + FN
# IoU = TP / (TP + FP + FN)
intersection = np.diag(self.confusionMatrix)
union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(
self.confusionMatrix)
IoU = intersection / union
mIoU = np.nanmean(IoU)
return mIoU
def genConfusionMatrix(self, imgPredict, imgLabel):
# remove classes from unlabeled pixels in gt image and predict
mask = (imgLabel >= 0) & (imgLabel < self.numClass)
label = self.numClass * imgLabel[mask] + imgPredict[mask]
count = np.bincount(label, minlength=self.numClass ** 2)
confusionMatrix = count.reshape(self.numClass, self.numClass)
return confusionMatrix
def Frequency_Weighted_Intersection_over_Union(self):
# FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]
freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)
iu = np.diag(self.confusionMatrix) / (
np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -
np.diag(self.confusionMatrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def addBatch(self, imgPredict, imgLabel):
assert imgPredict.shape == imgLabel.shape
self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)
def reset(self):
self.confusionMatrix = np.zeros((self.numClass, self.numClass))
if __name__ == '__main__':
imgPredict = np.array([0, 0, 1, 1, 2, 2])
imgLabel = np.array([0, 0, 1, 1, 2, 2])
metric = SegmentationMetric(3)
metric.addBatch(imgPredict, imgLabel)
acc = metric.pixelAccuracy()
mIoU = metric.meanIntersectionOverUnion()
print(acc, mIoU) | [
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.nanmean",
"numpy.sum",
"numpy.bincount"
] | [((2630, 2658), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (2638, 2658), True, 'import numpy as np\n'), ((2674, 2702), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (2682, 2702), True, 'import numpy as np\n'), ((359, 389), 'numpy.zeros', 'np.zeros', (['((self.numClass,) * 2)'], {}), '((self.numClass,) * 2)\n', (367, 389), True, 'import numpy as np\n'), ((975, 995), 'numpy.nanmean', 'np.nanmean', (['classAcc'], {}), '(classAcc)\n', (985, 995), True, 'import numpy as np\n'), ((1169, 1198), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (1176, 1198), True, 'import numpy as np\n'), ((1386, 1401), 'numpy.nanmean', 'np.nanmean', (['IoU'], {}), '(IoU)\n', (1396, 1401), True, 'import numpy as np\n'), ((1692, 1740), 'numpy.bincount', 'np.bincount', (['label'], {'minlength': '(self.numClass ** 2)'}), '(label, minlength=self.numClass ** 2)\n', (1703, 1740), True, 'import numpy as np\n'), ((2543, 2583), 'numpy.zeros', 'np.zeros', (['(self.numClass, self.numClass)'], {}), '((self.numClass, self.numClass))\n', (2551, 2583), True, 'import numpy as np\n'), ((789, 818), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (796, 818), True, 'import numpy as np\n'), ((1293, 1322), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (1300, 1322), True, 'import numpy as np\n'), ((1985, 2021), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(1)'}), '(self.confusionMatrix, axis=1)\n', (1991, 2021), True, 'import numpy as np\n'), ((2024, 2052), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (2030, 2052), True, 'import numpy as np\n'), ((2066, 2095), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (2073, 2095), True, 'import numpy as np\n'), ((1215, 1251), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(1)'}), '(self.confusionMatrix, axis=1)\n', (1221, 1251), True, 'import numpy as np\n'), ((1254, 1290), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(0)'}), '(self.confusionMatrix, axis=0)\n', (1260, 1290), True, 'import numpy as np\n'), ((2210, 2239), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (2217, 2239), True, 'import numpy as np\n'), ((532, 561), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (539, 561), True, 'import numpy as np\n'), ((2116, 2152), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(1)'}), '(self.confusionMatrix, axis=1)\n', (2122, 2152), True, 'import numpy as np\n'), ((2155, 2191), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(0)'}), '(self.confusionMatrix, axis=0)\n', (2161, 2191), True, 'import numpy as np\n')] |
from app import app
from flask_socketio import SocketIO
from flask_failsafe import failsafe
socketio = SocketIO(app)
@failsafe
def create_app():
return app
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
socketio.run(create_app(), debug=True, use_reloader=True, port=app.config['PORT']) | [
"flask_socketio.SocketIO",
"eventlet.monkey_patch"
] | [((103, 116), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {}), '(app)\n', (111, 116), False, 'from flask_socketio import SocketIO\n'), ((178, 201), 'eventlet.monkey_patch', 'eventlet.monkey_patch', ([], {}), '()\n', (199, 201), False, 'import eventlet\n')] |
import orjson
import pytest
from faker import Faker
from restapi.env import Env
from restapi.services.authentication import BaseAuthentication
from restapi.tests import API_URI, AUTH_URI, BaseTests, FlaskClient
from restapi.utilities.logs import Events, log
class TestApp(BaseTests):
def test_admin_groups(self, client: FlaskClient, faker: Faker) -> None:
if not Env.get_bool("MAIN_LOGIN_ENABLE") or not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping admin/users tests")
return
headers, _ = self.do_login(client, None, None)
r = client.get(f"{API_URI}/admin/groups", headers=headers)
assert r.status_code == 200
schema = self.getDynamicInputSchema(client, "admin/groups", headers)
data = self.buildData(schema)
# Event 1: create
r = client.post(f"{API_URI}/admin/groups", data=data, headers=headers)
assert r.status_code == 200
uuid = self.get_content(r)
assert isinstance(uuid, str)
r = client.get(f"{API_URI}/admin/groups", headers=headers)
assert r.status_code == 200
groups = self.get_content(r)
assert isinstance(groups, list)
assert len(groups) > 0
assert "uuid" in groups[0]
assert "shortname" in groups[0]
assert "fullname" in groups[0]
assert "members" in groups[0]
assert len(groups[0]["members"]) > 0
assert "coordinators" in groups[0]
fullname = None
for g in groups:
if g.get("uuid") == uuid:
fullname = g.get("fullname")
break
else: # pragma: no cover
pytest.fail("Group not found")
assert fullname is not None
newdata = {
"shortname": faker.company(),
"fullname": faker.company(),
}
# Test the differences between post and put schema
post_schema = {s["key"]: s for s in schema}
tmp_schema = self.getDynamicInputSchema(
client, f"admin/groups/{uuid}", headers, method="put"
)
put_schema = {s["key"]: s for s in tmp_schema}
assert "shortname" in post_schema
assert post_schema["shortname"]["required"]
assert "shortname" in put_schema
assert put_schema["shortname"]["required"]
assert "fullname" in post_schema
assert post_schema["fullname"]["required"]
assert "fullname" in put_schema
assert put_schema["fullname"]["required"]
# Event 2: modify
r = client.put(f"{API_URI}/admin/groups/{uuid}", data=newdata, headers=headers)
assert r.status_code == 204
r = client.get(f"{API_URI}/admin/groups", headers=headers)
assert r.status_code == 200
groups = self.get_content(r)
assert isinstance(groups, list)
for g in groups:
if g.get("uuid") == uuid:
assert g.get("fullname") == newdata.get("fullname")
assert g.get("fullname") != data.get("fullname")
assert g.get("fullname") != fullname
r = client.put(f"{API_URI}/admin/groups/xyz", data=data, headers=headers)
assert r.status_code == 404
# Event 3: delete
r = client.delete(f"{API_URI}/admin/groups/{uuid}", headers=headers)
assert r.status_code == 204
r = client.get(f"{API_URI}/admin/groups", headers=headers)
assert r.status_code == 200
groups = self.get_content(r)
assert isinstance(groups, list)
for g in groups:
if g.get("uuid") == uuid: # pragma: no cover
pytest.fail("Group not deleted!")
r = client.delete(f"{API_URI}/admin/groups/xyz", headers=headers)
assert r.status_code == 404
# Create a group and assign it to the main user
# Profile and AdminUsers will react to this change
# Very important: admin_groups must be tested before admin_users and profile
r = client.get(f"{AUTH_URI}/profile", headers=headers)
assert r.status_code == 200
content = self.get_content(r)
assert isinstance(content, dict)
user_uuid = content.get("uuid")
data = {
"fullname": "<NAME>",
"shortname": faker.company(),
}
# Event 4: create
uuid, _ = self.create_group(client, data=data)
data = {
"group": uuid,
# very important, otherwise the default user will lose its admin role
"roles": orjson.dumps(["admin_root"]).decode("UTF8"),
}
headers, _ = self.do_login(client, None, None)
# Event 5: modify
r = client.put(f"{API_URI}/admin/users/{user_uuid}", data=data, headers=headers)
assert r.status_code == 204
def test_events_file(self) -> None:
if not Env.get_bool("MAIN_LOGIN_ENABLE") or not Env.get_bool("AUTH_ENABLE"):
log.warning("Skipping admin/users tests")
return
events = self.get_last_events(4, filters={"target_type": "Group"})
# A new group is created
INDEX = 0
assert events[INDEX].event == Events.create.value
assert events[INDEX].user == BaseAuthentication.default_user
assert events[INDEX].target_type == "Group"
assert events[INDEX].url == "/api/admin/groups"
assert "fullname" in events[INDEX].payload
assert "shortname" in events[INDEX].payload
# Group modified (same target_id as above)
INDEX = 1
assert events[INDEX].event == Events.modify.value
assert events[INDEX].user == BaseAuthentication.default_user
assert events[INDEX].target_type == "Group"
assert events[INDEX].target_id == events[0].target_id
assert events[INDEX].url == f"/api/admin/groups/{events[0].target_id}"
assert "fullname" in events[INDEX].payload
assert "shortname" in events[INDEX].payload
# Group is deleted (same target_id as above)
INDEX = 2
assert events[INDEX].event == Events.delete.value
assert events[INDEX].user == BaseAuthentication.default_user
assert events[INDEX].target_type == "Group"
assert events[INDEX].target_id == events[0].target_id
assert events[INDEX].url == f"/api/admin/groups/{events[0].target_id}"
assert len(events[INDEX].payload) == 0
# A new group is created
INDEX = 3
assert events[INDEX].event == Events.create.value
assert events[INDEX].user == BaseAuthentication.default_user
assert events[INDEX].target_type == "Group"
assert events[INDEX].target_id != events[0].target_id
assert events[INDEX].url == "/api/admin/groups"
assert "fullname" in events[INDEX].payload
assert "shortname" in events[INDEX].payload
group_uuid = events[INDEX].target_id
events = self.get_last_events(1, filters={"target_type": "User"})
# User modified, payload contains the created group
INDEX = 0
assert events[INDEX].event == Events.modify.value
assert events[INDEX].user == BaseAuthentication.default_user
assert events[INDEX].target_type == "User"
assert events[INDEX].url == f"/api/admin/users/{events[INDEX].target_id}"
assert "fullname" not in events[INDEX].payload
assert "shortname" not in events[INDEX].payload
assert "group" in events[INDEX].payload
assert events[INDEX].payload["group"] == group_uuid
| [
"pytest.fail",
"restapi.env.Env.get_bool",
"restapi.utilities.logs.log.warning",
"orjson.dumps"
] | [((461, 502), 'restapi.utilities.logs.log.warning', 'log.warning', (['"""Skipping admin/users tests"""'], {}), "('Skipping admin/users tests')\n", (472, 502), False, 'from restapi.utilities.logs import Events, log\n'), ((1667, 1697), 'pytest.fail', 'pytest.fail', (['"""Group not found"""'], {}), "('Group not found')\n", (1678, 1697), False, 'import pytest\n'), ((4932, 4973), 'restapi.utilities.logs.log.warning', 'log.warning', (['"""Skipping admin/users tests"""'], {}), "('Skipping admin/users tests')\n", (4943, 4973), False, 'from restapi.utilities.logs import Events, log\n'), ((379, 412), 'restapi.env.Env.get_bool', 'Env.get_bool', (['"""MAIN_LOGIN_ENABLE"""'], {}), "('MAIN_LOGIN_ENABLE')\n", (391, 412), False, 'from restapi.env import Env\n'), ((420, 447), 'restapi.env.Env.get_bool', 'Env.get_bool', (['"""AUTH_ENABLE"""'], {}), "('AUTH_ENABLE')\n", (432, 447), False, 'from restapi.env import Env\n'), ((3633, 3666), 'pytest.fail', 'pytest.fail', (['"""Group not deleted!"""'], {}), "('Group not deleted!')\n", (3644, 3666), False, 'import pytest\n'), ((4850, 4883), 'restapi.env.Env.get_bool', 'Env.get_bool', (['"""MAIN_LOGIN_ENABLE"""'], {}), "('MAIN_LOGIN_ENABLE')\n", (4862, 4883), False, 'from restapi.env import Env\n'), ((4891, 4918), 'restapi.env.Env.get_bool', 'Env.get_bool', (['"""AUTH_ENABLE"""'], {}), "('AUTH_ENABLE')\n", (4903, 4918), False, 'from restapi.env import Env\n'), ((4532, 4560), 'orjson.dumps', 'orjson.dumps', (["['admin_root']"], {}), "(['admin_root'])\n", (4544, 4560), False, 'import orjson\n')] |
import re
from collections import defaultdict
def bitand(x, y):
return x & y
def bitor(x, y):
return x | y
def lshift(x, y):
return x << y
def rshift(x, y):
return x >> y
def bitnot(x):
return x ^ 65535
BIN_OPERATORS = {"AND": bitand, "OR": bitor, "LSHIFT": lshift, "RSHIFT": rshift}
UN_OPERATORS = {"NOT": bitnot}
class Container:
content = None
class Link:
"""docstring for Link"""
@staticmethod
def get_link_from_value(x):
try:
value = int(x)
return ValueLink(value)
except ValueError:
pass
return DirectLink(WIRING[x])
@staticmethod
def create_link(rule):
if len(rule) == 1:
return Link.get_link_from_value(rule[0])
if rule[0] in BIN_OPERATORS:
return BinaryLink(
rule[0],
Link.get_link_from_value(rule[1]),
Link.get_link_from_value(rule[2]),
)
if rule[0] in UN_OPERATORS:
return UnaryLink(rule[0], Link.get_link_from_value(rule[1]))
def __init__(self, operator=None):
self._cached_result = None
self._operator = operator
def get_value(self):
if not self._cached_result:
self._cached_result = self._get_value()
return self._cached_result
class ValueLink(Link):
""""""
def __init__(self, value):
self.__value = value
super().__init__()
def _get_value(self):
return self.__value
def set_value(self, value):
self.__value = value
class DirectLink(Link):
""""""
def __init__(self, op1):
self._op1 = op1
super().__init__()
def _get_value(self):
return self._op1.content.get_value()
class BinaryLink(Link):
""""""
def __init__(self, operator, op1, op2):
self._op1 = op1
self._op2 = op2
super().__init__(operator)
def _get_value(self):
return BIN_OPERATORS[self._operator](
self._op1.get_value(), self._op2.get_value()
)
class UnaryLink(Link):
def __init__(self, operator, op1):
self._op1 = op1
super().__init__(operator)
def _get_value(self):
return UN_OPERATORS[self._operator](self._op1.get_value())
WIRING = defaultdict(Container)
def parse(i):
x = i[: re.search(" ->", i).start()]
if opcode_match := re.findall("|".join(BIN_OPERATORS), x):
opcode = opcode_match[0]
first = x[: re.search(f" {opcode}", x).start()]
second = x[re.search(f"{opcode}", x).start() + len(opcode) + 1 :]
return [i[re.search(" ->", i).start() + 4 :], f"{opcode}", first, second]
if opcode_match := re.findall("|".join(UN_OPERATORS), x):
opcode = opcode_match[0]
second = x[re.search(f"{opcode}", x).start() + len(opcode) + 1 :]
return [i[re.search(" ->", i).start() + 4 :], f"{opcode}", second]
return [i[re.search(" ->", i).start() + 4 :], i[: re.search(" ->", i).start()]]
def inner_1(lista):
global WIRING
WIRING = defaultdict(Container)
parsedl = [parse(i) for i in lista]
for i in parsedl:
temp = WIRING[i[0]]
temp.content = Link.create_link(i[1:])
return {k: v.content.get_value() for k, v in WIRING.items()}
def calculate_1(x: list) -> int:
result = inner_1(x)
return result["a"]
def inner_2(lista):
global WIRING
WIRING = defaultdict(Container)
parsedl = [parse(i) for i in lista]
for i in parsedl:
temp = WIRING[i[0]]
temp.content = Link.create_link(i[1:])
WIRING["b"].content.set_value(46065)
return {k: v.content.get_value() for k, v in WIRING.items()}
def calculate_2(x: str) -> int:
result = inner_2(x)
return result["a"]
| [
"collections.defaultdict",
"re.search"
] | [((2304, 2326), 'collections.defaultdict', 'defaultdict', (['Container'], {}), '(Container)\n', (2315, 2326), False, 'from collections import defaultdict\n'), ((3076, 3098), 'collections.defaultdict', 'defaultdict', (['Container'], {}), '(Container)\n', (3087, 3098), False, 'from collections import defaultdict\n'), ((3437, 3459), 'collections.defaultdict', 'defaultdict', (['Container'], {}), '(Container)\n', (3448, 3459), False, 'from collections import defaultdict\n'), ((2355, 2374), 're.search', 're.search', (['""" ->"""', 'i'], {}), "(' ->', i)\n", (2364, 2374), False, 'import re\n'), ((2500, 2526), 're.search', 're.search', (['f""" {opcode}"""', 'x'], {}), "(f' {opcode}', x)\n", (2509, 2526), False, 'import re\n'), ((2993, 3012), 're.search', 're.search', (['""" ->"""', 'i'], {}), "(' ->', i)\n", (3002, 3012), False, 'import re\n'), ((2953, 2972), 're.search', 're.search', (['""" ->"""', 'i'], {}), "(' ->', i)\n", (2962, 2972), False, 'import re\n'), ((2555, 2580), 're.search', 're.search', (['f"""{opcode}"""', 'x'], {}), "(f'{opcode}', x)\n", (2564, 2580), False, 'import re\n'), ((2629, 2648), 're.search', 're.search', (['""" ->"""', 'i'], {}), "(' ->', i)\n", (2638, 2648), False, 'import re\n'), ((2808, 2833), 're.search', 're.search', (['f"""{opcode}"""', 'x'], {}), "(f'{opcode}', x)\n", (2817, 2833), False, 'import re\n'), ((2881, 2900), 're.search', 're.search', (['""" ->"""', 'i'], {}), "(' ->', i)\n", (2890, 2900), False, 'import re\n')] |
from django.shortcuts import render,redirect
from django.http import JsonResponse
from .models import *
from django.views import View
from django.db.models import Q
from django.forms import model_to_dict
from django.contrib.auth import get_user_model
# Create your views here.
from .models import *
from .forms import *
from ajax_datatable.views import AjaxDatatableView
def home_page(request):
return render(request, 'home.html')
#region ########### Indent ###########
class indent_table(AjaxDatatableView):
model = indent
title = 'Indent'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["recived","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'pk',
'visible': True,
'searchable': False,
'orderable': True,
'title': 'Indt.',
}, # pk
{
'name': 'material_shape',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Shape',
}, # material_shape
{
'name': 'Description',
'foreign_field': 'item_description__description',
'visible': True,
'searchable': True,
'placeholder':'description',
'title':'Item Description',
}, # Item Description
{
'name': 'description',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Description',
}, # value
{
'name': 'weight',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Weight',
}, # weight
{
'name': 'size',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Size',
}, # size
{
'name': 'thickness',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'THK',
}, # thickness
{
'name': 'quantity',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Qut',
}, # quantity
{
'name': 'value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Value',
'className': 'currency',
}, # value
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Total Value',
'className': 'currency',
}, # net_value
{
'name': 'recived',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Recived',
'className':"is_completed",
}, # recived
{'name': 'Add GRN', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False,
"title":"DEL"
}, # delete field
]
def get_initial_queryset(self, request=None):
wo_id=request.REQUEST.get('wo_id')
queryset = self.model.objects.all()
queryset = queryset.filter(WO__id=wo_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
get_str = lambda x: x if x else "--"
row['net_value'] = f''' {obj.net_value()}'''
row['size'] = get_str(obj.size)
row['thickness'] = get_str(obj.thickness)
row['weight'] = f''' {obj.get_weight()}'''
# row['recived'] = f'''<td class="is_completed" data="{obj.recived}">
# </td>'''
# print(row['recived'])
row['Add GRN'] = f'''<td class="">
<a href="/indent/{obj.pk}/grn/form/" target="_blank">
<img src="../../../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
if obj.locked:
row['Edit'] = f'''<td class="border-0">
<a data-id="{obj.pk}" onclick="edit_locked('/wo/{obj.WO.pk}/indent/form/{obj.pk}')"><img src="../../../../../static/Images/lock.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
else:
row['Edit'] = f'''<td class="border-0">
<a href="/wo/{obj.WO.pk}/indent/form/{obj.pk}"><img src="../../../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
"Recived": obj.recived_quantity,
'Material Type':obj.material_type,
"Size":obj.size,
"Thickness":obj.thickness,
"Width":obj.width,
"Internal Diameter":obj.internal_diameter,
'Tax':str(obj.tax)+"%",
"Comment":obj.comment,
"Has PO": True if obj.PO else False,
}
currency={
'Discount':obj.discount,
'Other Expanses':obj.other_expanses,
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:60%">'
if obj.PO:
html += '<tr><td class="">PO Number</td><td class=""><a href = "/po/table/">%s</a></td></tr>' % (obj.PO)
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
for key in currency:
html += '<tr><td class="">%s</td><td class="currency">%s</td></tr>' % (key, currency[key])
html += '</table>'
return html
class indent_table_page(View):
template_name = "indent/indent_table.html"
def get(self, request,wo_id):
wo = work_order.objects.filter(pk=wo_id).first()
context= {
"update":[],
'all_indents': indent.objects.all().filter(WO__id=wo_id),
'wo':wo,
'wo_id':wo_id,
}
return render(request,self.template_name,context)
def post(self, request,wo_id):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = indent.objects.filter(pk=i).first()
# obj.quantity=3
# obj.save()
obj.delete()
return JsonResponse({"deleted":True})
class indent_form(View):
template_name = "indent/indent_form.html"
def get(self, request,wo_id=None,indent_id=None):
self.context= {
"update":[],
'all_indents': indent.objects.all(),
"all_item_description":item_description.objects.all(),
'wo':work_order.objects.get(pk=wo_id),
}
if indent_id:
instance = indent.objects.get(pk=indent_id)
print(indent_id,"here in Update")
self.context['update'] = instance
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
print(self.context['update'],"here in add")
return render(request,self.template_name,self.context)
def post(self, request,wo_id=None,indent_id=None):
wo = work_order.objects.filter(pk=wo_id).first()
self.context= {
"update":[],
'all_indents': indent.objects.all(),
"all_item_description":item_description.objects.all(),
'wo':wo,
}
tempdict = self.request.POST.copy()
tempdict['value'] = tempdict['value'].replace(',', '').replace("₹","")
if indent_id:
instance = indent.objects.get(pk=indent_id)
form = add_indent(tempdict,instance=instance)
if not wo:
wo = instance.WO
else:
form = add_indent(tempdict)
if form.is_valid():
temp = form.save(commit=False)
temp.WO = wo
item_desc,_=item_description.objects.get_or_create(
description=tempdict.get("item_description")
)
print(item_desc,_)
temp.item_description = item_desc
self.context['update'] = form.instance
self.context['success'] = True
print(temp.item_description)
temp.save()
else:
self.context['errors'] = form.errors.as_ul()
print(form.errors)
# self.context['update'] = form.instance
return render(request,self.template_name,self.context)
class all_indents_datatable(AjaxDatatableView):
model = indent
title = 'Indent'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["WO","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'pk',
'visible': True,
'searchable': False,
'orderable': True,
'title': 'Indent No.',
}, # pk
{
'name': 'WO',
'foreign_field': 'WO__wo_number',
'visible': True,
'searchable': True,
'placeholder':'WO'
}, # WO
{
'name': 'PO',
'foreign_field': 'PO__po_number',
'visible': True,
'searchable': True,
'placeholder':'WO'
}, # PO
{
'name': 'material_shape',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Shape',
}, # material_shape
{
'name': 'Description',
'foreign_field': 'item_description__description',
'visible': True,
'searchable': True,
'placeholder':'description'
}, # Description
{
'name': 'weight',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Weight',
}, # weight
{
'name': 'size',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Size',
}, # size
{
'name': 'thickness',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'THK',
}, # thickness
{
'name': 'quantity',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Qut',
}, # quantity
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Net Val',
'className': 'currency',
}, # net_value
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
]
def get_initial_queryset(self, request=None):
wo_id=request.REQUEST.get('wo_id')
queryset = self.model.objects.all()
queryset = queryset.filter(recived=False)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
get_str = lambda x: x if x else "--"
row['net_value'] = f''' {obj.net_value()}'''
row['size'] = get_str(obj.size)
row['PO'] = get_str(obj.PO.po_number) if obj.PO else "----"
row['thickness'] = get_str(obj.thickness)
row['WO'] = f'<a href="/wo/{obj.WO.pk}/indent/table/">{obj.WO}</a>'
row['weight'] = f''' {obj.get_weight()}'''
if obj.locked:
row['Edit'] = f'''<td class="border-0">
<a data-id="{obj.pk}" onclick="edit_locked('/wo/{obj.WO.pk}/indent/form/{obj.pk}')"><img src="../../../../../static/Images/lock.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
else:
row['Edit'] = f'''<td class="border-0">
<a href="/wo/{obj.WO.pk}/indent/form/{obj.pk}"><img src="../../../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
"Recived": obj.recived_quantity,
'Material Type':obj.material_type,
'Item Description':obj.item_description,
"Size":obj.size,
"Thickness":obj.thickness,
"Width":obj.width,
"Internal Diameter":obj.internal_diameter,
'Description':obj.description,
'Tax':str(obj.tax)+"%",
"Comment":obj.comment,
"Has PO": True if obj.PO else False,
}
currency={
'Value':obj.value,
'Discount':obj.discount,
'Other Expanses':obj.other_expanses,
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:60%">'
if obj.PO:
html += '<tr><td class="">PO Number</td><td class=""><a href = "/po/table/">%s</a></td></tr>' % (obj.PO)
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
for key in currency:
html += '<tr><td class="">%s</td><td class="currency">%s</td></tr>' % (key, currency[key])
html += '</table>'
return html
class all_indent_table(View):
template_name = 'indent/all_indent.html'
def get(self, request):
return render(request,self.template_name)
#endregion
#region ########### Purchase Order ###########
class PO_datatable(AjaxDatatableView):
model = purchase_order
title = 'Purchase Order'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["po_number","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'po_number',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'PO Number',
}, # po number
{
'name': 'Vendor',
'foreign_field': 'vendor_id__vendor_name',
'visible': True,
'searchable': True,
'placeholder':'Vendor'
}, # vendor
{
'name': 'remaining_quantity',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Remaining Quantity',
}, # quantity
{
'name': 'po_date',
'visible': True,
'searchable': False,
'orderable': True,
'title': 'PO Date',
}, # po date
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Net Value',
'className': 'currency',
},# net_value
{
'name': 'is_complete',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Completed',
'className':"is_completed",
},
{'name': 'Print', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
net_value = 0
total_quantity,remaining_quantity = 0,0
for indent in obj.indent_set.all():
net_value += indent.net_value()
remaining_quantity += indent.get_remaining_quantity()
total_quantity += indent.quantity
row['po_date'] = obj.get_date()
row['net_value'] = f'{round(net_value,2)}'
row["remaining_quantity"] = f'{int(remaining_quantity)} out of {int(total_quantity)}'
row['Print'] = f'''<td class="">
<a href="../report_input/{obj.pk}" >
<img src="../../../static/Images/print.png" style="width:17px;height:17px" alt="print"></a>
</td>'''
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Indent List'] = f'''<td class="">
<a href="indent/table/{obj.pk}" >
<img src="../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
indent_list_html = '<table class="table-bordered" style="width:100%">'
indent_list_html += f'<tr><th class="d-flex justify-content-center">Indent</td><td class="">Balance</td></tr>'
for indent in obj.indent_set.all():
dimentions = f"{indent.size} X {indent.thickness} X {indent.width} X {indent.internal_diameter}".replace(" X None","").replace("None","")
indent_list_html += f'<tr><td class="d-flex justify-content-left">{indent.pk} -- <a href="/wo/{indent.WO.pk}/indent/table" >{indent.WO}</a> [{indent.item_description} ({dimentions})]</td><td class="">  {indent.get_remaining_quantity()} out of {int(indent.quantity)}</td></tr>'
indent_list_html += '</table>'
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:80%">'
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
html += '<tr><td class="">Indent List</td><td class="m-0 p-0">%s</td></tr>' % (indent_list_html)
html += '</table>'
return html
def update_indent_PO(indent_list,PO):
'adds the PO to all the indents'
my_indents = PO.indent_set.all()
new_indents = set(indent.objects.all().filter(pk__in = indent_list))
old_indents = set(my_indents)
to_be_deleted = old_indents.difference(new_indents)
to_be_saved = new_indents.difference(old_indents)
for i in to_be_deleted:
i.PO = None
i.save()
for i in to_be_saved:
i.PO = PO
i.save()
class PO_form(View):
template_name = "po/PO_form.html"
def get(self, request,po_id=None):
self.context= {
"update":[],
'all_vendors':vendor_details.objects.all(),
'all_indent':list(indent.objects.all().filter(PO=None).order_by("WO")),
}
if po_id:
instance = purchase_order.objects.get(pk=po_id)
my_indents = instance.indent_set.all()
self.context['update'] = instance
self.context['indent_list'] = my_indents
self.context['all_indent'] += list(my_indents)
self.context['success'] = False
return render(request,self.template_name,self.context)
def post(self, request,po_id=None):
self.context= {
"update":[],
'all_vendors':vendor_details.objects.all(),
'all_indent':list(indent.objects.all().filter(PO=None).order_by("WO")),
}
if po_id:
instance = purchase_order.objects.get(pk=po_id)
form = update_PO(request.POST,instance=instance)
else:
form = add_PO(request.POST)
if form.is_valid():
a = form.save()
print(a)
indent_list = request.POST.getlist('indent_list')
update_indent_PO(indent_list,a)
my_indents = a.indent_set.all()
self.context['update'] = form.instance
self.context['indent_list'] = my_indents
self.context['all_indent'] += list(my_indents)
self.context['all_indent'] = set(self.context['all_indent'])
self.context['success'] = True
else:
self.context['errors'] = form.errors.as_ul()
print(form.errors)
# self.context['update'] = form.instance
return render(request,self.template_name,self.context)
class PO_table(View):
template_name = "po/PO_table.html"
def get(self, request):
context= {
"update":[],
'all_PO': purchase_order.objects.all()
}
return render(request,self.template_name,context)
def post(self, request):
pass
def po_print_inputs(request,po_id):
my_po = purchase_order.objects.get(pk=po_id)
context = {'my_po':my_po}
return render(request,"po/po_print_input.html",context)
def print_report(request,po_id):
my_po = purchase_order.objects.get(pk=po_id)
my_indents = indent.objects.all().filter(PO=my_po)
total_gross_value,total_net_value,total_quantity,total_tax_value,total_weight = 0,0,0,0,0
for my_indent in my_indents:
total_net_value += my_indent.net_value()
total_quantity += my_indent.quantity
total_tax_value += my_indent.tax_amount()
total_weight += my_indent.get_weight()
total_gross_value += my_indent.gross_value()
delivery_day = request.GET['delivery_day']
payment_term = request.GET['payment_term']
freight_charges = request.GET['freight_charges']
com_name = request.GET['com_name']
# print( request.GET)
context = {
"my_po":my_po,
"all_indents":my_indents,
"total_net_value":round(total_net_value,2),
"total_quantity":round(total_quantity,2),
"total_tax_value":round(total_tax_value,2),
"total_weight":round(total_weight,3),
"total_gross_value":round(total_gross_value,2),
"delivery_day":delivery_day,
"payment_term":payment_term,
"freight_charges":freight_charges,
"com_name":com_name,
}
# print(context['total_net_value'])
# total_quantity = indent.objects.all()
return render(request,"po/report.html",context)
def lock_po_indent(request,po_id):
my_po = purchase_order.objects.get(pk=po_id)
my_indents = indent.objects.all().filter(PO=my_po)
for my_indent in my_indents:
my_indent.locked = True
# print(my_indent.locked)
my_indent.save()
return JsonResponse({"done":True})
#endregion
#region ########### Work-Order ###########
def show_stock(request):
stock_wo = work_order.objects.all().get(wo_number="STOCK")
return redirect(f"/wo/{stock_wo.pk}/indent/table/")
class WO_datatable(AjaxDatatableView):
model = work_order
title = 'work_order'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["is_complete","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'wo_number',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'WO Number',
}, # wo_number
{
'name': 'description',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Description',
}, # description
{
'name': 'quantity',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Quantity',
}, # quantity
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Net Value',
'className': 'currency',
}, # net_value
{
'name': 'is_complete',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Completed',
'className':"is_completed",
}, # is_complete
{'name': 'Indent List', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Print', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def get_initial_queryset(self, request=None):
# po_id=request.REQUEST.get('po_id')
queryset = self.model.objects.all().exclude(wo_number="STOCK")
# queryset = queryset.filter(PO__id=po_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
row['net_value'] = f''' {obj.net_value()}'''
row['Print'] = f'''<td class="">
<a href="../../wo/print_indents/{obj.pk}/" >
<img src="../../../static/Images/print.png" style="width:17px;height:17px" alt="print"></a>
</td>'''
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Indent List'] = f'''<td class="">
<a href="/wo/{obj.pk}/indent/table/" >
<img src="../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
'Vendor':obj.vendor_id,
'Comment':obj.comment,
'PO Number':obj.incoming_po_number,
'PO Date':obj.incoming_po_date.strftime("%d-%m-%Y") if obj.incoming_po_date else "-----",
'Tax':str(obj.tax)+"%",
}
net_cost = round(sum([indent.net_value() for indent in obj.indent_set.all()]),2)
currency={
'Value':obj.value,
'Discount':obj.discount,
'Other Expanses':obj.other_expanses,
"Net Costing": net_cost
}
if obj.net_value() >= net_cost:
currency['Profit'] = obj.net_value() - net_cost
else:
currency['Loss'] = net_cost - obj.net_value()
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:60%">'
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
for key in currency:
if key == "Loss":
html += '<tr><td class="">%s</td><td class="currency loss">₹ %s</td></tr>' % (key, currency[key])
elif key == "Profit":
html += '<tr><td class="">%s</td><td class="currency profit">₹ %s</td></tr>' % (key, currency[key])
else:
html += '<tr><td class="">%s</td><td class="currency">₹ %s</td></tr>' % (key, currency[key])
html += '</table>'
return html
class WO_table(View):
template_name = "WO/wo_table.html"
context = {}
def get(self, request, *args, **kwargs):
return render(request,self.template_name,self.context)
def post(self, request, *args, **kwargs):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = work_order.objects.filter(pk=i)[0]
obj.delete()
return JsonResponse({"deleted":True})
class WO_form(View):
template_name = "WO/wo_form.html"
def get(self, request,wo_id=None, *args, **kwargs):
self.context= {
"update":[],
'all_vendors':vendor_details.objects.all(),
"all_item_description":item_description.objects.all(),
}
if wo_id:
instance = work_order.objects.get(pk=wo_id)
self.context['update'] = instance
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
return render(request,self.template_name,self.context)
def post(self, request,wo_id=None, *args, **kwargs):
self.context= {
"update":[],
'all_vendors':vendor_details.objects.all(),
"all_item_description":item_description.objects.all(),
}
tempdict = self.request.POST.copy()
tempdict['value'] = tempdict['value'].replace(',', '').replace("₹","")
if wo_id:
instance = work_order.objects.get(pk=wo_id)
form = add_WO(tempdict,instance=instance)
else:
form = add_WO(tempdict)
if form.is_valid():
form.save()
self.context['update'] = form.instance
self.context['success'] = True
else:
self.context['errors'] = form.errors.as_ul()
print(form.instance.value)
print(form.errors)
# self.context['update'] = form.instance
return render(request,self.template_name,self.context)
def print_wo_indents(request,wo_id):
my_wo = work_order.objects.get(pk=wo_id)
my_indents = indent.objects.all().filter(WO=my_wo)
total_gross_value,total_net_value,total_quantity,total_tax_value,total_weight = 0,0,0,0,0
for my_indent in my_indents:
total_net_value += my_indent.net_value()
total_quantity += my_indent.quantity
total_tax_value += my_indent.tax_amount()
total_weight += my_indent.get_weight()
total_gross_value += my_indent.gross_value()
context = {
"my_wo":my_wo,
"all_indents":my_indents,
"total_net_value":round(total_net_value,2),
"total_quantity":round(total_quantity,2),
"total_tax_value":round(total_tax_value,2),
"total_weight":round(total_weight,3),
"total_gross_value":round(total_gross_value,2),
}
# print(context['total_net_value'])
# total_quantity = indent.objects.all()
return render(request,"wo/print_indents.html",context)
#endregion
#region ########### Vendor ###########
class vendor_datatable(AjaxDatatableView):
model = vendor_details
title = 'vendor'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["vendor_name","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'vendor_name',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Name',
}, # name
{
'name': 'contact_person',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Contact Person',
}, # contact_person
{
'name': 'email',
'visible': True,
'orderable': True,
'searchable': True,
'title': 'Email',
}, # email
{
'name': 'contact_number',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Contact Number',
}, # contact_number
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def get_initial_queryset(self, request=None):
# po_id=request.REQUEST.get('po_id')
queryset = self.model.objects.all()
# queryset = queryset.filter(PO__id=po_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
replace_empty = lambda x: x if x else "--"
row['contact_person'] = replace_empty(row['contact_person'])
row['contact_number'] = replace_empty(row['contact_number'])
row['email'] = replace_empty(row['email'])
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
'Address':obj.address,
'GST No.':obj.gst_no,
'Name of Bank':obj.name_of_bank,
'Account Number':obj.acc_no,
'IFSC Code':obj.ifsc_code,
'Branch':obj.branch,
"Comment":obj.comment,
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:60%">'
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
html += '</table>'
return html
class vendor_table(View):
template_name = "vendor/vendor_table.html"
context = {}
def get(self, request, *args, **kwargs):
return render(request,self.template_name,self.context)
def post(self, request, *args, **kwargs):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = vendor_details.objects.filter(pk=i)[0]
# print(obj)
obj.delete()
return JsonResponse({"deleted":True})
class vendor_form(View):
template_name = "vendor/vendor_form.html"
context= {
"update":[]
}
def get(self, request,vendor_id=None, *args, **kwargs):
if vendor_id:
instance = vendor_details.objects.get(pk=vendor_id)
self.context['update'] = instance
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
return render(request,self.template_name,self.context)
def post(self, request,vendor_id=None, *args, **kwargs):
tempdict = self.request.POST
# print(tempdict['value'])
if vendor_id:
instance = vendor_details.objects.get(pk=vendor_id)
form = add_vendor(tempdict,instance=instance)
else:
form = add_vendor(tempdict)
if form.is_valid():
form.save()
self.context['update'] = form.instance
self.context['success'] = True
else:
self.context['errors'] = form.errors.as_ul()
print(form.instance.value)
print(form.errors)
# self.context['update'] = form.instance
return render(request,self.template_name,self.context)
#endregion
#region ########### GRN ###########
class grn_datatable(AjaxDatatableView):
model = grn
title = 'grn'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["grn_date","asc"]]
search_values_separator = " "
column_defs = [
# AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'Vendor',
'foreign_field': 'vendor_id__vendor_name',
'visible': True,
'searchable': True,
'placeholder':'Vendor'
}, # vendor
{
'name': 'invoice_no',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Invoice Number',
}, # invoice
{
'name': "indent_id",
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Indent',
}, # indent
{
'name': 'quantity',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Quantity',
}, # email
{
'name': "grn_date",
'visible': True,
'orderable': True,
'searchable': False,
'title': 'GRN Date',
}, # grn_date
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def get_initial_queryset(self, request=None):
# po_id=request.REQUEST.get('po_id')
queryset = self.model.objects.all()
# queryset = queryset.filter(PO__id=po_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
row['indent_id'] = str(obj.indent_id) if obj.indent_id else "---------"
row['grn_date'] = obj.grn_date.strftime("%d-%m-%Y") if obj.grn_date else "-----"
row['Vendor'] = obj.vendor_id.vendor_name if obj.vendor_id else "-------"
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
# obj = self.model.objects.get(pk=pk)
# # fields = [f for f in self.model._meta.get_fields() if f.concrete]
# fields = {
# 'Address':obj.address,
# 'GST No.':obj.gst_no,
# 'Name of Bank':obj.name_of_bank,
# 'Account Number':obj.acc_no,
# 'IFSC Code':obj.ifsc_code,
# 'Branch':obj.branch,
# "Comment":obj.comment,
# }
# fields = {k: v for k, v in fields.items() if v != None}
# fields = {k: v for k, v in fields.items() if v != ""}
# # print(student_details.Division_id.Semester_id)
# html = '<table class="table-bordered" style="width:60%">'
# for key in fields:
# html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
# html += '</table>'
html = ""
return html
class grn_table(View):
template_name = "grn/grn_table.html"
context = {}
def get(self, request, *args, **kwargs):
return render(request,self.template_name,self.context)
def post(self, request, *args, **kwargs):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = grn.objects.filter(pk=i)[0]
# print(obj)
obj.delete()
return JsonResponse({"deleted":True})
class grn_form(View):
template_name = "grn/grn_form.html"
def get(self, request,indent_id=None,grn_id=None, *args, **kwargs):
self.context= {
"update":[],
"all_vendors":vendor_details.objects.all(),
}
if grn_id:
instance = grn.objects.get(pk=grn_id)
self.context["all_indent"]=indent.objects.all().filter(Q(recived=False)|Q(id=instance.indent_id.id))
self.context['update'] = instance
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
self.context["all_indent"]=indent.objects.all().filter(Q(recived=False))
if indent_id:
my_indent = indent.objects.filter(id=indent_id).first()
self.context['update'] = {"indent_id":my_indent,"quantity":my_indent.get_weight(),"tax":my_indent.tax}
self.context["all_indent"]=indent.objects.all().filter(Q(recived=False)|Q(id=my_indent.id))
return render(request,self.template_name,self.context)
def post(self, request,grn_id=None, *args, **kwargs):
self.context= {
"update":[],
"all_indent":indent.objects.all().filter(recived=False)
}
tempdict = self.request.POST.copy()
tempdict['value'] = tempdict['value'].replace(',', '').replace("₹","")
if grn_id:
instance = grn.objects.get(pk=grn_id)
form = add_grn(tempdict,instance=instance)
else:
form = add_grn(tempdict)
if form.is_valid():
new_grn = form.instance
indent_id = new_grn.indent_id
print(new_grn.indent_id)
# indent_id = form.indent_id
if grn_id:
old_grn = grn.objects.get(pk=grn_id)
stock_wo = work_order.objects.all().get(wo_number="STOCK")
kwargs = model_to_dict(indent_id, exclude=['id',"order_ptr","WO","PO"])
kwargs["item_description_id"] = kwargs.pop("item_description")
stock_indent = indent.objects.filter(
item_description_id=kwargs["item_description_id"],
material_shape=kwargs["material_shape"],
WO = stock_wo
).first()
old_val = old_grn.quantity
new_val = new_grn.quantity
if stock_indent.quantity:
print(stock_indent.quantity)
if stock_indent.quantity >= old_val:
stock_indent.quantity -= old_val
stock_indent.save()
else:
delta = old_val - stock_indent.quantity
stock_indent.quantity = 0
print(delta)
indent_id.recived_quantity -= delta
stock_indent.delete()
indent_id.save()
else:
if indent_id.recived_quantity >= old_val:
indent_id.recived_quantity -= old_val
else:
indent_id.recived_quantity = 0
indent_id.save()
# delta = new_grn.
# if indent_id.recived_quantity > instance.quantity :
# # if a less quantity was received then the indent required
# indent_id.recived_quantity -= instance.quantity
# else:
# # if more quantity was received then the indent required
# if stock_indent:
# print("here at stock_indent")
# print(f"stock_indent.quantity - {stock_indent.quantity}")
# print(f'indent_id.recived_quantity - {indent_id.recived_quantity}')
# print(f"instance.quantity - {instance.quantity}")
# stock_indent.quantity -= instance.quantity - indent_id.recived_quantity
# print(f"stock_indent.quantity - {stock_indent.quantity}")
# stock_indent.save()
# indent_id.recived_quantity = 0
# indent_id.save()
if new_grn.quantity > indent_id.get_remaining_quantity():
# if the quantity received is more then the indent
# then create an indent and save it in the STOCK wo
stock_wo = work_order.objects.all().get(wo_number="STOCK")
kwargs = model_to_dict(indent_id, exclude=['id',"order_ptr","WO","PO"])
kwargs["item_description_id"] = kwargs.pop("item_description")
stock_indent = indent.objects.filter(
item_description_id=kwargs["item_description_id"],
material_shape=kwargs["material_shape"],
WO = stock_wo
).first()
extra_quantity = new_grn.quantity - indent_id.get_remaining_quantity()
if stock_indent:
# if same indent is in stock
print("same found",stock_indent)
stock_indent.quantity += extra_quantity
stock_indent.save()
else:
# if same indent is not in stock
new_indent = indent(**kwargs)
new_indent.WO = stock_wo
new_indent.quantity = extra_quantity
new_indent.save()
print(f"New Stock indent saved with {extra_quantity} quantity")
indent_id.recived_quantity += indent_id.get_remaining_quantity()
indent_id.save()
else:
print("Here in else- ",new_grn.quantity," ",indent_id.recived_quantity)
indent_id.recived_quantity += new_grn.quantity
indent_id.save()
self.context['save_message'] = form.instance.get_save_messages(tempdict['quantity'])
form.save()
print(tempdict['quantity'])
self.context['update'] = form.instance
self.context['success'] = True
else:
self.context['errors'] = form.errors.as_ul()
print(form.instance.value)
print(form.errors)
# return redirect("/grn/table")
return render(request,self.template_name,self.context)
#endregion
#region ########### Assembly ###########
class assembly_datatable(AjaxDatatableView):
model = assembly
title = 'assembly'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["name","asc"]]
search_values_separator = " "
column_defs = [
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'name',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Name',
}, # Name
{
'name': 'estimate_value',
'visible': True,
'searchable': True,
'orderable': True,
'className':"currency",
'title': 'Estimate',
}, # estimate
{
'name': 'description',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Description',
}, # description
{'name': 'Item List', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def get_initial_queryset(self, request=None):
# po_id=request.REQUEST.get('po_id')
queryset = self.model.objects.all()
# queryset = queryset.filter(PO__id=po_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Item List'] = f'''<td class="">
<a href="/assembly/form/{obj.id}/" >
<img src="../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
# def render_row_details(self, pk, request=None):
# html = ""
# obj = self.model.objects.get(pk=pk)
# item_list_html = '<table class="table-bordered">'
# item_list_html += f'<tr><th class="">Sr.no</td><td class="">Items</td><td class="">Estimated Value</td></tr>'
# for i,item in enumerate(obj.items.all()):
# item_list_html += f'<tr><td class="d-flex justify-content-left">{i}</td><td class="">{item}</td><td class="currency">{item.get_estimated_value()}</td></tr>'
# item_list_html += '</table>'
# html += item_list_html
# # return html
class assembly_table(View):
template_name = "assembly/assembly_table.html"
context = {}
def get(self, request, *args, **kwargs):
return render(request,self.template_name,self.context)
def post(self, request, *args, **kwargs):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = assembly.objects.filter(pk=i)[0]
# print(obj)
obj.delete()
return JsonResponse({"deleted":True})
class assembly_form(View):
template_name = "assembly/assembly_form.html"
def get_context(self, request):
context= {
"update":[],
"all_items":item_description.objects.all().exclude(estimated_value=0),
}
return context
context = {}
def get(self, request,assembly_id=None):
self.context = self.get_context(request)
if assembly_id:
instance = assembly.objects.get(pk=assembly_id)
self.context['update'] = instance
self.context['estimate_value'] = instance.estimate_value
self.context['items_tr'] = []
for i in instance.items.all():
j = instance.item_json[f'{i.pk}']
self.context['items_tr'].append({
'pk':i.id,
'item_name':i.description,
'estimated_value':i.estimated_value,
'quantity':float(j['quantity']),
'size':float(j['size']),
'thickness':float(j['thickness']),
'width':float(j['width']),
'internal_diameter':float(j['internal_diameter']),
})
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
return render(request,self.template_name,self.context)
def post(self, request,assembly_id=None):
self.context = self.get_context(request)
if assembly_id:
# self.context['update'] =
instance = assembly.objects.get(pk=assembly_id)
form = add_assembly(request.POST,instance=instance)
else:
form = add_assembly(request.POST)
if form.is_valid():
if assembly_id:
instance = form.save(commit=False)
instance.save()
else:
form.save()
# self.context['update'] = form.instance
# self.context['success'] = True
else:
self.context['errors'] = form.errors.as_ul()
return render(request,self.template_name,self.context)
#endregion
#region ########### Plan ###########
class plan_datatable(AjaxDatatableView):
model = plan
title = 'plan'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["name","asc"]]
search_values_separator = " "
column_defs = [
{
'name': 'id',
'visible': False,
'searchable': False,
}, # pk
{
'name': 'name',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Name',
}, # Name
{
'name': 'estimate_value',
'visible': True,
'searchable': True,
'orderable': True,
'className':"currency",
'title': 'Estimate',
}, # estimate
{
'name': 'description',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Description',
}, # description
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def get_initial_queryset(self, request=None):
# po_id=request.REQUEST.get('po_id')
queryset = self.model.objects.all()
# queryset = queryset.filter(PO__id=po_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
class plan_table(View):
template_name = "plan/plan_table.html"
context = {}
def get(self, request, *args, **kwargs):
return render(request,self.template_name,self.context)
def post(self, request, *args, **kwargs):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = plan.objects.filter(pk=i)[0]
obj.delete()
return JsonResponse({"deleted":True})
class plan_form(View):
template_name = "plan/plan_form.html"
def get_context(self, request):
context= {
"update":[],
"all_assemblies":assembly.objects.all(),
}
return context
context = {}
def get(self, request,plan_id=None):
self.context = self.get_context(request)
if plan_id:
instance = plan.objects.get(pk=plan_id)
self.context['update'] = instance
self.context['estimate_value'] = instance.estimate_value
self.context['items_tr'] = []
for i in instance.assemblies.all():
j = instance.assembly_json[f'{i.pk}']
self.context['items_tr'].append({
'pk':i.id,
'item_name':i.name,
'estimated_value':i.estimate_value,
'quantity':float(j['quantity']),
})
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
return render(request,self.template_name,self.context)
def post(self, request,assembly_id=None):
return render(request,self.template_name,self.context)
#endregion
| [
"django.shortcuts.render",
"django.http.JsonResponse",
"ajax_datatable.views.AjaxDatatableView.render_row_tools_column_def",
"django.shortcuts.redirect",
"django.db.models.Q",
"django.forms.model_to_dict"
] | [((404, 432), 'django.shortcuts.render', 'render', (['request', '"""home.html"""'], {}), "(request, 'home.html')\n", (410, 432), False, 'from django.shortcuts import render, redirect\n'), ((18501, 18551), 'django.shortcuts.render', 'render', (['request', '"""po/po_print_input.html"""', 'context'], {}), "(request, 'po/po_print_input.html', context)\n", (18507, 18551), False, 'from django.shortcuts import render, redirect\n'), ((19709, 19751), 'django.shortcuts.render', 'render', (['request', '"""po/report.html"""', 'context'], {}), "(request, 'po/report.html', context)\n", (19715, 19751), False, 'from django.shortcuts import render, redirect\n'), ((19995, 20023), 'django.http.JsonResponse', 'JsonResponse', (["{'done': True}"], {}), "({'done': True})\n", (20007, 20023), False, 'from django.http import JsonResponse\n'), ((20173, 20217), 'django.shortcuts.redirect', 'redirect', (['f"""/wo/{stock_wo.pk}/indent/table/"""'], {}), "(f'/wo/{stock_wo.pk}/indent/table/')\n", (20181, 20217), False, 'from django.shortcuts import render, redirect\n'), ((26901, 26950), 'django.shortcuts.render', 'render', (['request', '"""wo/print_indents.html"""', 'context'], {}), "(request, 'wo/print_indents.html', context)\n", (26907, 26950), False, 'from django.shortcuts import render, redirect\n'), ((688, 735), 'ajax_datatable.views.AjaxDatatableView.render_row_tools_column_def', 'AjaxDatatableView.render_row_tools_column_def', ([], {}), '()\n', (733, 735), False, 'from ajax_datatable.views import AjaxDatatableView\n'), ((5661, 5705), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'context'], {}), '(request, self.template_name, context)\n', (5667, 5705), False, 'from django.shortcuts import render, redirect\n'), ((5896, 5927), 'django.http.JsonResponse', 'JsonResponse', (["{'deleted': True}"], {}), "({'deleted': True})\n", (5908, 5927), False, 'from django.http import JsonResponse\n'), ((7642, 7691), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (7648, 7691), False, 'from django.shortcuts import render, redirect\n'), ((7910, 7957), 'ajax_datatable.views.AjaxDatatableView.render_row_tools_column_def', 'AjaxDatatableView.render_row_tools_column_def', ([], {}), '()\n', (7955, 7957), False, 'from ajax_datatable.views import AjaxDatatableView\n'), ((11972, 12007), 'django.shortcuts.render', 'render', (['request', 'self.template_name'], {}), '(request, self.template_name)\n', (11978, 12007), False, 'from django.shortcuts import render, redirect\n'), ((12299, 12346), 'ajax_datatable.views.AjaxDatatableView.render_row_tools_column_def', 'AjaxDatatableView.render_row_tools_column_def', ([], {}), '()\n', (12344, 12346), False, 'from ajax_datatable.views import AjaxDatatableView\n'), ((17150, 17199), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (17156, 17199), False, 'from django.shortcuts import render, redirect\n'), ((18091, 18140), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (18097, 18140), False, 'from django.shortcuts import render, redirect\n'), ((18307, 18351), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'context'], {}), '(request, self.template_name, context)\n', (18313, 18351), False, 'from django.shortcuts import render, redirect\n'), ((20446, 20493), 'ajax_datatable.views.AjaxDatatableView.render_row_tools_column_def', 'AjaxDatatableView.render_row_tools_column_def', ([], {}), '()\n', (20491, 20493), False, 'from ajax_datatable.views import AjaxDatatableView\n'), ((24504, 24553), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (24510, 24553), False, 'from django.shortcuts import render, redirect\n'), ((24719, 24750), 'django.http.JsonResponse', 'JsonResponse', (["{'deleted': True}"], {}), "({'deleted': True})\n", (24731, 24750), False, 'from django.http import JsonResponse\n'), ((26011, 26060), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (26017, 26060), False, 'from django.shortcuts import render, redirect\n'), ((27233, 27280), 'ajax_datatable.views.AjaxDatatableView.render_row_tools_column_def', 'AjaxDatatableView.render_row_tools_column_def', ([], {}), '()\n', (27278, 27280), False, 'from ajax_datatable.views import AjaxDatatableView\n'), ((29937, 29986), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (29943, 29986), False, 'from django.shortcuts import render, redirect\n'), ((30172, 30203), 'django.http.JsonResponse', 'JsonResponse', (["{'deleted': True}"], {}), "({'deleted': True})\n", (30184, 30203), False, 'from django.http import JsonResponse\n'), ((31210, 31259), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (31216, 31259), False, 'from django.shortcuts import render, redirect\n'), ((34412, 34461), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (34418, 34461), False, 'from django.shortcuts import render, redirect\n'), ((34636, 34667), 'django.http.JsonResponse', 'JsonResponse', (["{'deleted': True}"], {}), "({'deleted': True})\n", (34648, 34667), False, 'from django.http import JsonResponse\n'), ((39649, 39698), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (39655, 39698), False, 'from django.shortcuts import render, redirect\n'), ((42382, 42431), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (42388, 42431), False, 'from django.shortcuts import render, redirect\n'), ((42611, 42642), 'django.http.JsonResponse', 'JsonResponse', (["{'deleted': True}"], {}), "({'deleted': True})\n", (42623, 42642), False, 'from django.http import JsonResponse\n'), ((44327, 44376), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (44333, 44376), False, 'from django.shortcuts import render, redirect\n'), ((46111, 46160), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (46117, 46160), False, 'from django.shortcuts import render, redirect\n'), ((46320, 46351), 'django.http.JsonResponse', 'JsonResponse', (["{'deleted': True}"], {}), "({'deleted': True})\n", (46332, 46351), False, 'from django.http import JsonResponse\n'), ((47315, 47364), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (47321, 47364), False, 'from django.shortcuts import render, redirect\n'), ((6407, 6456), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (6413, 6456), False, 'from django.shortcuts import render, redirect\n'), ((6551, 6600), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (6557, 6600), False, 'from django.shortcuts import render, redirect\n'), ((25145, 25194), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (25151, 25194), False, 'from django.shortcuts import render, redirect\n'), ((25242, 25291), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (25248, 25291), False, 'from django.shortcuts import render, redirect\n'), ((30512, 30561), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (30518, 30561), False, 'from django.shortcuts import render, redirect\n'), ((30609, 30658), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (30615, 30658), False, 'from django.shortcuts import render, redirect\n'), ((35123, 35172), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (35129, 35172), False, 'from django.shortcuts import render, redirect\n'), ((35576, 35625), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (35582, 35625), False, 'from django.shortcuts import render, redirect\n'), ((43622, 43671), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (43628, 43671), False, 'from django.shortcuts import render, redirect\n'), ((43719, 43768), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (43725, 43768), False, 'from django.shortcuts import render, redirect\n'), ((47117, 47166), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (47123, 47166), False, 'from django.shortcuts import render, redirect\n'), ((47214, 47263), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'self.context'], {}), '(request, self.template_name, self.context)\n', (47220, 47263), False, 'from django.shortcuts import render, redirect\n'), ((35268, 35284), 'django.db.models.Q', 'Q', ([], {'recived': '(False)'}), '(recived=False)\n', (35269, 35284), False, 'from django.db.models import Q\n'), ((36301, 36366), 'django.forms.model_to_dict', 'model_to_dict', (['indent_id'], {'exclude': "['id', 'order_ptr', 'WO', 'PO']"}), "(indent_id, exclude=['id', 'order_ptr', 'WO', 'PO'])\n", (36314, 36366), False, 'from django.forms import model_to_dict\n'), ((38244, 38309), 'django.forms.model_to_dict', 'model_to_dict', (['indent_id'], {'exclude': "['id', 'order_ptr', 'WO', 'PO']"}), "(indent_id, exclude=['id', 'order_ptr', 'WO', 'PO'])\n", (38257, 38309), False, 'from django.forms import model_to_dict\n'), ((34995, 35011), 'django.db.models.Q', 'Q', ([], {'recived': '(False)'}), '(recived=False)\n', (34996, 35011), False, 'from django.db.models import Q\n'), ((35012, 35039), 'django.db.models.Q', 'Q', ([], {'id': 'instance.indent_id.id'}), '(id=instance.indent_id.id)\n', (35013, 35039), False, 'from django.db.models import Q\n'), ((35529, 35545), 'django.db.models.Q', 'Q', ([], {'recived': '(False)'}), '(recived=False)\n', (35530, 35545), False, 'from django.db.models import Q\n'), ((35546, 35564), 'django.db.models.Q', 'Q', ([], {'id': 'my_indent.id'}), '(id=my_indent.id)\n', (35547, 35564), False, 'from django.db.models import Q\n')] |
'''
@inproceedings{golestaneh2017spatially,
title={Spatially-Varying Blur Detection Based on Multiscale Fused and Sorted Transform Coefficients of Gradient Magnitudes},
author={<NAME> and Karam, <NAME>},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year={2017}
}
'''
import cv2
import numpy as np
import os
from skimage.filters.rank import entropy
from skimage.morphology import square
import copy
import time
class BlurDetector(object):
def __init__(self, downsampling_factor=4, num_scales=4, scale_start=3, entropy_filt_kernel_sze=7, sigma_s_RF_filter=15, sigma_r_RF_filter=0.25, num_iterations_RF_filter=3):
self.downsampling_factor = downsampling_factor
self.num_scales = num_scales
self.scale_start = scale_start
self.entropy_filt_kernel_sze = entropy_filt_kernel_sze
self.sigma_s_RF_filter = sigma_s_RF_filter
self.sigma_r_RF_filter = sigma_r_RF_filter
self.num_iterations_RF_filter = num_iterations_RF_filter
self.scales = self.createScalePyramid()
self.__freqBands = []
self.__dct_matrices = []
self.freq_index = []
def disp_progress(self, i, rows, old_progress):
progress_dict = {10:'[| ] 10%',
20:'[| | ] 20%',
30:'[| | | ] 30%',
40:'[| | | | ] 40%',
50:'[| | | | | ] 50%',
60:'[| | | | | | ] 60%',
70:'[| | | | | | | ] 70%',
80:'[| | | | | | | | ] 80%',
90:'[| | | | | | | | | ] 90%',
100:'[| | | | | | | | | |] 100%'}
i_done = i / rows * 100;
p_done = round(i_done / 10) * 10;
if(p_done != old_progress):
os.system('cls' if os.name == 'nt' else 'clear')
print(progress_dict[p_done])
old_progress = p_done
return(p_done)
def createScalePyramid(self):
scales = []
for i in range(self.num_scales):
scales.append((2**(self.scale_start + i)) - 1) # Scales would be 7, 15, 31, 63 ...
return(scales)
def computeImageGradientMagnitude(self, img):
__sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, borderType=cv2.BORDER_REFLECT) # Find x and y gradients
__sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, borderType=cv2.BORDER_REFLECT)
# Find gradient magnitude
__magnitude = np.sqrt(__sobelx ** 2.0 + __sobely ** 2.0)
return(__magnitude)
def __computeFrequencyBands(self):
for current_scale in self.scales:
matrixInds = np.zeros((current_scale, current_scale))
for i in range(current_scale):
matrixInds[0 : max(0, int(((current_scale-1)/2) - i +1)), i] = 1
for i in range(current_scale):
if (current_scale-((current_scale-1)/2) - i) <= 0:
matrixInds[0:current_scale - i - 1, i] = 2
else:
matrixInds[int(current_scale - ((current_scale - 1) / 2) - i - 1): int(current_scale - i - 1), i]=2;
matrixInds[0, 0] = 3
self.__freqBands.append(matrixInds)
def __dctmtx(self, n):
[mesh_cols, mesh_rows] = np.meshgrid(np.linspace(0, n-1, n), np.linspace(0, n-1, n))
dct_matrix = np.sqrt(2/n) * np.cos(np.pi * np.multiply((2 * mesh_cols + 1), mesh_rows) / (2*n));
dct_matrix[0, :] = dct_matrix[0, :] / np.sqrt(2)
return(dct_matrix)
def __createDCT_Matrices(self):
if(len(self.__dct_matrices) > 0):
raise TypeError("dct matrices are already defined. Redefinition is not allowed.")
for curr_scale in self.scales:
dct_matrix = self.__dctmtx(curr_scale)
self.__dct_matrices.append(dct_matrix)
def __getDCTCoefficients(self, img_blk, ind):
rows, cols = np.shape(img_blk)
# D = self.__dctmtx(rows)
D = self.__dct_matrices[ind]
dct_coeff = np.matmul(np.matmul(D, img_blk), np.transpose(D))
return(dct_coeff)
def entropyFilt(self, img):
return(entropy(img, square(self.entropy_filt_kernel_sze)))
def computeScore(self, weighted_local_entropy, T_max):
# normalize weighted T max matrix
min_val = weighted_local_entropy.min()
weighted_T_Max = weighted_local_entropy - min_val
max_val = weighted_local_entropy.max()
weighted_T_Max = weighted_local_entropy / max_val
score = np.median(weighted_local_entropy)
return(score)
def TransformedDomainRecursiveFilter_Horizontal(self, I, D, sigma):
# Feedback Coefficient (Appendix of the paper)
a = np.exp(-np.sqrt(2) / sigma)
F = copy.deepcopy(I)
V = a ** D
rows, cols = np.shape(I)
# Left --> Right Filter
for i in range(1, cols):
F[:, i] = F[:, i] + np.multiply(V[:, i], (F[:, i-1] - F[:, i]))
# Right --> Left Filter
for i in range(cols-2, 1, -1):
F[:, i] = F[:, i] + np.multiply(V[:, i+1], (F[:, i + 1] - F[:, i]))
return(F)
def RF(self, img, joint_img):
if(len(joint_img) == 0):
joint_img = img
joint_img = joint_img.astype('float64')
joint_img = joint_img / 255
if(len(np.shape(joint_img)) == 2):
cols, rows = np.shape(joint_img)
channels = 1
elif(len(np.shape(joint_img)) == 3):
cols, rows, channels = np.shape(joint_img)
# Estimate horizontal and vertical partial derivatives using finite differences.
dIcdx = np.diff(joint_img, n=1, axis=1)
dIcdy = np.diff(joint_img, n=1, axis=0)
dIdx = np.zeros((cols, rows));
dIdy = np.zeros((cols, rows));
# Compute the l1 - norm distance of neighbor pixels.
dIdx[:, 1::] = abs(dIcdx)
dIdy[1::, :] = abs(dIcdy)
dHdx = (1 + self.sigma_s_RF_filter / self.sigma_r_RF_filter * dIdx)
dVdy = (1 + self.sigma_s_RF_filter / self.sigma_r_RF_filter * dIdy)
dVdy = np.transpose(dVdy)
N = self.num_iterations_RF_filter
F = copy.deepcopy(img)
for i in range(self.num_iterations_RF_filter):
# Compute the sigma value for this iteration (Equation 14 of our paper).
sigma_H_i = self.sigma_s_RF_filter * np.sqrt(3) * 2 ** (N - (i + 1)) / np.sqrt(4 ** N - 1)
F = self.TransformedDomainRecursiveFilter_Horizontal(F, dHdx, sigma_H_i)
F = np.transpose(F)
F = self.TransformedDomainRecursiveFilter_Horizontal(F, dVdy, sigma_H_i)
F = np.transpose(F)
return(F)
def detectBlur(self, img, ):
ori_rows, ori_cols = np.shape(img)
# perform initial gausssian smoothing
InputImageGaus = cv2.GaussianBlur(img, (3, 3), sigmaX=0.5, sigmaY=0.5)
__gradient_image = self.computeImageGradientMagnitude(InputImageGaus)
total_num_layers = 1 + sum(self.scales)
# create all dct_matrices beforehand to save computation time
self.__createDCT_Matrices()
# Create Frequency Labels at all the scalesv
self.__computeFrequencyBands()
# Compute the indices of the high frequency content inside each frequency band
for i in range(self.num_scales):
curr_freq_band = self.__freqBands[i]
self.freq_index.append(np.where(curr_freq_band == 0))
__padded_image = np.pad(__gradient_image, int(np.floor(max(self.scales)/2)), mode='constant')
rows, cols = np.shape(__padded_image)
L = []
total_num_points = len([i for i in range(int(max(self.scales)/2), rows - int(max(self.scales)/2), self.downsampling_factor)]) * len([j for j in range(int(max(self.scales) / 2), cols - int(max(self.scales) / 2), self.downsampling_factor)])
L = np.zeros((total_num_points, total_num_layers))
iter = 0
n = 0
old_progress = 0
for i in range(int(max(self.scales)/2), rows - int(max(self.scales)/2), self.downsampling_factor):
old_progress = self.disp_progress(i, rows, old_progress)
m = 0
n += 1
for j in range(int(max(self.scales) / 2), cols - int(max(self.scales) / 2), self.downsampling_factor):
m += 1
high_freq_components = []
for ind, curr_scale in enumerate(self.scales):
Patch = __padded_image[i-np.int(curr_scale/2) : i+np.int(curr_scale/2) + 1, j-np.int(curr_scale/2) : j+np.int(curr_scale/2) + 1]
dct_coefficients = np.abs(self.__getDCTCoefficients(Patch, ind))
# store all high frequency components
high_freq_components.append(dct_coefficients[self.freq_index[ind]])
# Find the first `total_num_layers` smallest values in all the high frequency components - we must not sort the entire array since that is very inefficient
high_freq_components = np.hstack(high_freq_components)
result = np.argpartition(high_freq_components, total_num_layers)
L[iter, :] = high_freq_components[result[:total_num_layers]]
iter += 1
L = np.array(L)
# normalize the L matrix
for i in range(total_num_layers):
max_val = max(L[:, i])
L[:, i] = L[:, i] / max_val
# perform max pooling on the normalized frequencies
ind1d = 0
T_max = np.zeros((n, m))
max_val = 0
min_val = 99999
for i in range(n):
for j in range(m):
T_max[i][j] = max(L[ind1d, :])
max_val = max(max_val, T_max[i][j])
min_val = min(min_val, T_max[i][j])
ind1d += 1
# Final Map and Post Processing
local_entropy = self.entropyFilt(T_max)
weighted_local_entropy = np.multiply(local_entropy, T_max)
score = self.computeScore(weighted_local_entropy, T_max)
rows, cols = np.shape(weighted_local_entropy)
# resize the input image to match the size of local_entropy matrix
resized_input_image = cv2.resize(InputImageGaus, (cols, rows))
aSmooth = cv2.GaussianBlur(resized_input_image, (3, 3), sigmaX=1, sigmaY=1)
final_map = self.RF(weighted_local_entropy, aSmooth)
# resize the map to the original resolution
final_map = cv2.resize(final_map, (ori_cols, ori_rows))
# normalize the map
final_map = final_map / np.max(final_map)
return(final_map)
| [
"numpy.sqrt",
"numpy.hstack",
"numpy.array",
"copy.deepcopy",
"numpy.multiply",
"numpy.where",
"numpy.diff",
"numpy.max",
"numpy.linspace",
"numpy.matmul",
"skimage.morphology.square",
"cv2.resize",
"cv2.GaussianBlur",
"numpy.shape",
"numpy.transpose",
"numpy.int",
"numpy.median",
... | [((2361, 2424), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'borderType': 'cv2.BORDER_REFLECT'}), '(img, cv2.CV_64F, 1, 0, borderType=cv2.BORDER_REFLECT)\n', (2370, 2424), False, 'import cv2\n'), ((2470, 2533), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'borderType': 'cv2.BORDER_REFLECT'}), '(img, cv2.CV_64F, 0, 1, borderType=cv2.BORDER_REFLECT)\n', (2479, 2533), False, 'import cv2\n'), ((2591, 2633), 'numpy.sqrt', 'np.sqrt', (['(__sobelx ** 2.0 + __sobely ** 2.0)'], {}), '(__sobelx ** 2.0 + __sobely ** 2.0)\n', (2598, 2633), True, 'import numpy as np\n'), ((4029, 4046), 'numpy.shape', 'np.shape', (['img_blk'], {}), '(img_blk)\n', (4037, 4046), True, 'import numpy as np\n'), ((4643, 4676), 'numpy.median', 'np.median', (['weighted_local_entropy'], {}), '(weighted_local_entropy)\n', (4652, 4676), True, 'import numpy as np\n'), ((4879, 4895), 'copy.deepcopy', 'copy.deepcopy', (['I'], {}), '(I)\n', (4892, 4895), False, 'import copy\n'), ((4936, 4947), 'numpy.shape', 'np.shape', (['I'], {}), '(I)\n', (4944, 4947), True, 'import numpy as np\n'), ((5760, 5791), 'numpy.diff', 'np.diff', (['joint_img'], {'n': '(1)', 'axis': '(1)'}), '(joint_img, n=1, axis=1)\n', (5767, 5791), True, 'import numpy as np\n'), ((5808, 5839), 'numpy.diff', 'np.diff', (['joint_img'], {'n': '(1)', 'axis': '(0)'}), '(joint_img, n=1, axis=0)\n', (5815, 5839), True, 'import numpy as np\n'), ((5856, 5878), 'numpy.zeros', 'np.zeros', (['(cols, rows)'], {}), '((cols, rows))\n', (5864, 5878), True, 'import numpy as np\n'), ((5895, 5917), 'numpy.zeros', 'np.zeros', (['(cols, rows)'], {}), '((cols, rows))\n', (5903, 5917), True, 'import numpy as np\n'), ((6218, 6236), 'numpy.transpose', 'np.transpose', (['dVdy'], {}), '(dVdy)\n', (6230, 6236), True, 'import numpy as np\n'), ((6292, 6310), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (6305, 6310), False, 'import copy\n'), ((6871, 6884), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6879, 6884), True, 'import numpy as np\n'), ((6956, 7009), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)'], {'sigmaX': '(0.5)', 'sigmaY': '(0.5)'}), '(img, (3, 3), sigmaX=0.5, sigmaY=0.5)\n', (6972, 7009), False, 'import cv2\n'), ((7706, 7730), 'numpy.shape', 'np.shape', (['__padded_image'], {}), '(__padded_image)\n', (7714, 7730), True, 'import numpy as np\n'), ((8006, 8052), 'numpy.zeros', 'np.zeros', (['(total_num_points, total_num_layers)'], {}), '((total_num_points, total_num_layers))\n', (8014, 8052), True, 'import numpy as np\n'), ((9389, 9400), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (9397, 9400), True, 'import numpy as np\n'), ((9647, 9663), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (9655, 9663), True, 'import numpy as np\n'), ((10066, 10099), 'numpy.multiply', 'np.multiply', (['local_entropy', 'T_max'], {}), '(local_entropy, T_max)\n', (10077, 10099), True, 'import numpy as np\n'), ((10187, 10219), 'numpy.shape', 'np.shape', (['weighted_local_entropy'], {}), '(weighted_local_entropy)\n', (10195, 10219), True, 'import numpy as np\n'), ((10326, 10366), 'cv2.resize', 'cv2.resize', (['InputImageGaus', '(cols, rows)'], {}), '(InputImageGaus, (cols, rows))\n', (10336, 10366), False, 'import cv2\n'), ((10385, 10450), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['resized_input_image', '(3, 3)'], {'sigmaX': '(1)', 'sigmaY': '(1)'}), '(resized_input_image, (3, 3), sigmaX=1, sigmaY=1)\n', (10401, 10450), False, 'import cv2\n'), ((10585, 10628), 'cv2.resize', 'cv2.resize', (['final_map', '(ori_cols, ori_rows)'], {}), '(final_map, (ori_cols, ori_rows))\n', (10595, 10628), False, 'import cv2\n'), ((1921, 1969), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (1930, 1969), False, 'import os\n'), ((2769, 2809), 'numpy.zeros', 'np.zeros', (['(current_scale, current_scale)'], {}), '((current_scale, current_scale))\n', (2777, 2809), True, 'import numpy as np\n'), ((3406, 3430), 'numpy.linspace', 'np.linspace', (['(0)', '(n - 1)', 'n'], {}), '(0, n - 1, n)\n', (3417, 3430), True, 'import numpy as np\n'), ((3430, 3454), 'numpy.linspace', 'np.linspace', (['(0)', '(n - 1)', 'n'], {}), '(0, n - 1, n)\n', (3441, 3454), True, 'import numpy as np\n'), ((3475, 3489), 'numpy.sqrt', 'np.sqrt', (['(2 / n)'], {}), '(2 / n)\n', (3482, 3489), True, 'import numpy as np\n'), ((3605, 3615), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3612, 3615), True, 'import numpy as np\n'), ((4148, 4169), 'numpy.matmul', 'np.matmul', (['D', 'img_blk'], {}), '(D, img_blk)\n', (4157, 4169), True, 'import numpy as np\n'), ((4171, 4186), 'numpy.transpose', 'np.transpose', (['D'], {}), '(D)\n', (4183, 4186), True, 'import numpy as np\n'), ((4275, 4311), 'skimage.morphology.square', 'square', (['self.entropy_filt_kernel_sze'], {}), '(self.entropy_filt_kernel_sze)\n', (4281, 4311), False, 'from skimage.morphology import square\n'), ((5510, 5529), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5518, 5529), True, 'import numpy as np\n'), ((6655, 6670), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (6667, 6670), True, 'import numpy as np\n'), ((6773, 6788), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (6785, 6788), True, 'import numpy as np\n'), ((10690, 10707), 'numpy.max', 'np.max', (['final_map'], {}), '(final_map)\n', (10696, 10707), True, 'import numpy as np\n'), ((5046, 5089), 'numpy.multiply', 'np.multiply', (['V[:, i]', '(F[:, i - 1] - F[:, i])'], {}), '(V[:, i], F[:, i - 1] - F[:, i])\n', (5057, 5089), True, 'import numpy as np\n'), ((5194, 5241), 'numpy.multiply', 'np.multiply', (['V[:, i + 1]', '(F[:, i + 1] - F[:, i])'], {}), '(V[:, i + 1], F[:, i + 1] - F[:, i])\n', (5205, 5241), True, 'import numpy as np\n'), ((5457, 5476), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5465, 5476), True, 'import numpy as np\n'), ((5635, 5654), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5643, 5654), True, 'import numpy as np\n'), ((6534, 6553), 'numpy.sqrt', 'np.sqrt', (['(4 ** N - 1)'], {}), '(4 ** N - 1)\n', (6541, 6553), True, 'import numpy as np\n'), ((7550, 7579), 'numpy.where', 'np.where', (['(curr_freq_band == 0)'], {}), '(curr_freq_band == 0)\n', (7558, 7579), True, 'import numpy as np\n'), ((9159, 9190), 'numpy.hstack', 'np.hstack', (['high_freq_components'], {}), '(high_freq_components)\n', (9168, 9190), True, 'import numpy as np\n'), ((9216, 9271), 'numpy.argpartition', 'np.argpartition', (['high_freq_components', 'total_num_layers'], {}), '(high_freq_components, total_num_layers)\n', (9231, 9271), True, 'import numpy as np\n'), ((4847, 4857), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4854, 4857), True, 'import numpy as np\n'), ((5572, 5591), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5580, 5591), True, 'import numpy as np\n'), ((3505, 3546), 'numpy.multiply', 'np.multiply', (['(2 * mesh_cols + 1)', 'mesh_rows'], {}), '(2 * mesh_cols + 1, mesh_rows)\n', (3516, 3546), True, 'import numpy as np\n'), ((6500, 6510), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (6507, 6510), True, 'import numpy as np\n'), ((8611, 8633), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8617, 8633), True, 'import numpy as np\n'), ((8664, 8686), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8670, 8686), True, 'import numpy as np\n'), ((8636, 8658), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8642, 8658), True, 'import numpy as np\n'), ((8689, 8711), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8695, 8711), True, 'import numpy as np\n')] |
import os
from rlib.streamio import open_file_as_stream
from rlib.string_stream import StringStream
from som.compiler.class_generation_context import ClassGenerationContext
from som.interp_type import is_ast_interpreter
if is_ast_interpreter():
from som.compiler.ast.parser import Parser
else:
from som.compiler.bc.parser import Parser
def compile_class_from_file(path, filename, system_class, universe):
fname = path + os.sep + filename + ".som"
try:
input_file = open_file_as_stream(fname, "r")
try:
parser = Parser(input_file, fname, universe)
result = _compile(parser, system_class, universe)
finally:
input_file.close()
except OSError:
raise IOError()
cname = result.get_name()
cname_str = cname.get_embedded_string()
if filename != cname_str:
from som.vm.universe import error_println
error_println(
"File name %s does not match class name %s." % (filename, cname_str)
)
universe.exit(1)
return result
def compile_class_from_string(stream, system_class, universe):
parser = Parser(StringStream(stream), "$str", universe)
result = _compile(parser, system_class, universe)
return result
def _compile(parser, system_class, universe):
cgc = ClassGenerationContext(universe)
result = system_class
parser.classdef(cgc)
if not system_class:
result = cgc.assemble()
else:
cgc.assemble_system_class(result)
return result
| [
"som.vm.universe.error_println",
"som.interp_type.is_ast_interpreter",
"som.compiler.bc.parser.Parser",
"som.compiler.class_generation_context.ClassGenerationContext",
"rlib.string_stream.StringStream",
"rlib.streamio.open_file_as_stream"
] | [((225, 245), 'som.interp_type.is_ast_interpreter', 'is_ast_interpreter', ([], {}), '()\n', (243, 245), False, 'from som.interp_type import is_ast_interpreter\n'), ((1320, 1352), 'som.compiler.class_generation_context.ClassGenerationContext', 'ClassGenerationContext', (['universe'], {}), '(universe)\n', (1342, 1352), False, 'from som.compiler.class_generation_context import ClassGenerationContext\n'), ((494, 525), 'rlib.streamio.open_file_as_stream', 'open_file_as_stream', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (513, 525), False, 'from rlib.streamio import open_file_as_stream\n'), ((915, 1002), 'som.vm.universe.error_println', 'error_println', (["('File name %s does not match class name %s.' % (filename, cname_str))"], {}), "('File name %s does not match class name %s.' % (filename,\n cname_str))\n", (928, 1002), False, 'from som.vm.universe import error_println\n'), ((1150, 1170), 'rlib.string_stream.StringStream', 'StringStream', (['stream'], {}), '(stream)\n', (1162, 1170), False, 'from rlib.string_stream import StringStream\n'), ((560, 595), 'som.compiler.bc.parser.Parser', 'Parser', (['input_file', 'fname', 'universe'], {}), '(input_file, fname, universe)\n', (566, 595), False, 'from som.compiler.bc.parser import Parser\n')] |
import matplotlib.pyplot as plt
import matplotlib.axes
class PlotCreator:
"creates a plot of experiment results"
def __init__(self):
"""initialize
basex is the logarithm base scale of the x axis
basey is the logarithm base scale of the y axis
default is linear axes
"""
self.linestyle = ('k-', 'k--', 'k-.', 'k:', 'ko', 'k^', 'kv')
self.linecolor = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
def create(self, matrix, outputfile, xbase = 0, ybase = 0, xlabel = 'x axis', ylabel = 'y axis'):
"""receives a matrix of results.
first row is the x scale.
first column are the labes (each row has it's own lable)
creates a file with name outputfile with the created plot"""
graphs = len(matrix) #number of graphs
labels = [matrix[row][0] for row in range(graphs)]
data = [matrix[row][1:] for row in range(graphs)]
for graph in range(1, graphs):
plt.plot(data[0], data[graph], label=labels[graph], color=self.linecolor[graph])
#plt.plot(data[0], data[graph], self.linestyle[graph], label=labels[graph], color=self.linecolor[graph])
if xbase > 1:
plt.gca().set_xscale('log', basex=xbase)
if ybase > 1:
plt.gca().set_yscale('log', basey=ybase)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(outputfile)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend"
] | [((1364, 1382), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1374, 1382), True, 'import matplotlib.pyplot as plt\n'), ((1391, 1409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (1401, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1431), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1429, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1441, 1464), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputfile'], {}), '(outputfile)\n', (1452, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1091), 'matplotlib.pyplot.plot', 'plt.plot', (['data[0]', 'data[graph]'], {'label': 'labels[graph]', 'color': 'self.linecolor[graph]'}), '(data[0], data[graph], label=labels[graph], color=self.linecolor[graph]\n )\n', (1014, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1248), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1246, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1323), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1321, 1323), True, 'import matplotlib.pyplot as plt\n')] |
#! /usr/bin/env python3
"""A module which provides a unified CLI to all the functionality.
This module serves as a single entry point for a user. It exposes a CLI
which can be use to call the different commands and set their properties.
This CLI replaces `__main__.py` files scattered around the project with
a single, unified interface for running everything.
"""
from typing import List, Set
import pathlib as pl
from sys import stderr
import click
import evacsim.bench as bench
import evacsim.expansion as expansion
import evacsim.lcmae as lcmae
import evacsim.plots as plots
from .level import Level
# grid is imported only when it's required for the application to work.
# That's because GitLab CI doesn't have OpenGL libraries installed and
# will fail if we try to import arcade, even indirectly.
@click.group()
def cli():
pass
@cli.command()
@click.option("--algorithm",
type=click.Choice(("lcmae", "postmae", "flow")),
default="lcmae",
help="Algorithm to use when planning")
@click.option("--visualize/--no-visualize",
default=False,
help="Start visualization after creating the plan")
@click.option("--debug/--no-debug",
default=False,
help="Print planning algorithm's debug output")
@click.argument("map_path",
type=click.Path(exists=True, dir_okay=False))
@click.argument("scenario_path",
type=click.Path(exists=True, dir_okay=False))
def plan(map_path, scenario_path, algorithm, visualize, debug):
"Create an evacuation plan for a map and a scenario"
lvl = Level(map_path, scenario_path)
if not lvl.frontier:
print("No passage to safety exists!", file=stderr)
exit(2)
paths: List[List[int]] = []
if algorithm == "lcmae":
paths = lcmae.plan_evacuation(lvl, debug=debug)
else:
paths = expansion.plan_evacuation(lvl,
postprocess=(algorithm == "postmae"),
debug=debug)
if visualize:
import evacsim.grid as grid
grid.start(lvl, map_path, paths)
else:
print(paths_to_str(paths))
@cli.command()
@click.argument("benchfile", type=click.File("r"))
@click.option("--processes",
type=click.INT,
help="Number of processors to use while benchmarking")
@click.option("-f", "--format",
type=click.Choice(("text", "json")),
default="text",
help="Benchmark results format")
@click.option("--flow/--no-flow",
default=False,
help="Also run the network flow algorithm on eligible scenarios")
@click.option("-p", "--plot-dest", 'plot_dest',
type=click.Path(exists=True, file_okay=False),
help="Directory into which agent safety plots should be saved")
@click.option("-d", "--path-dest", 'path_dest',
type=click.Path(exists=True, file_okay=False),
help="Directory into which evacuation plan paths should be saved")
def benchmark(benchfile, processes, format, flow, plot_dest, path_dest):
"""Evaluate benchmark algorithms' performance
Plans evacuations for multiple maps and scenarios (optionally in parallel)
and then displays stats about them.
By default, all processor cores on the system will be used to speed up
benchmarking.
When the --flow option is passed, the network flow algorithm is ran on
some of the scenarios and its results are reported. This only works on
scenarios which only have retargeting agents.
CAUTION: The network flow algorithm may take up massive amounts of memory.
"""
bench_cases = bench.parse_benchfile(benchfile)
results = bench.BenchResults.for_benchfile(bench_cases, processes, flow)
if format == "text":
print(results.as_text())
else:
print(results.as_json())
if plot_dest:
plots.generate_plots(results, pl.Path(plot_dest))
if path_dest:
path = pl.Path(path_dest)
for name, result in results.results.items():
write_paths(path.joinpath(f"{name}.out"), result.paths)
@cli.command()
@click.option("-s", "--square-size",
default=15,
help="Size of a single square on a map, in pixels")
@click.option("-b", "--border-size",
default=0,
help="Size of the border between squares, in pixels")
@click.argument("map_path",
type=click.Path(exists=True, dir_okay=False))
@click.argument("scenario_path",
type=click.Path(exists=True, dir_okay=False))
@click.argument("solution_path",
required=False,
type=click.Path(exists=True, dir_okay=False))
def gui(map_path, scenario_path, solution_path, square_size, border_size):
"""Show a GUI for plan visualization and editing."""
import evacsim.grid as grid
level = Level(map_path, scenario_path)
if solution_path:
paths = parse_paths(solution_path)
else:
paths = None
grid.start(level, map_path, paths, cell_size=square_size, border=border_size)
@cli.command()
@click.argument("map_path",
type=click.Path(exists=True, dir_okay=False))
@click.argument("scenario_path",
type=click.Path(exists=True, dir_okay=False))
@click.argument("solution_path",
required=False,
type=click.Path(exists=True, dir_okay=False))
def check(map_path, scenario_path, solution_path):
"""Check the validity of given solution."""
level = Level(map_path, scenario_path)
paths = parse_paths(solution_path)
check_paths(paths, level)
def paths_to_str(paths: List[List[int]]) -> str:
"""Create a string with given paths printed in a readable format"""
lines = []
for path in paths:
nums = ["{:02d}".format(n) for n in path]
lines.append(" ".join(nums))
return "\n".join(lines)
def write_paths(filename: str, paths: List[List[int]]):
"""Write the given agent paths into a file, in the format used by all the tools"""
with open(filename, "w") as f:
print(paths_to_str(paths), file=f)
def parse_paths(path: str) -> List[List[int]]:
result = []
with open(path) as f:
lines = f.readlines()
if lines[-1] == [""]:
lines = lines[:-1]
for line in lines:
result.append(list(map(int, line.strip().split(" "))))
return result
def check_paths(paths: List[List[int]], level: Level):
path_len = len(paths[0])
for p in paths:
if len(p) != path_len:
print("Not all paths have equal sizes")
exit(1)
previous: Set[int] = set()
for t in range(path_len):
current = set()
for p in paths:
current.add(p[t])
if p[t] in previous and p[t] != p[t-1]:
print(f"Direct trailing at time {t}")
if len(current) != len(paths):
print(f"Collision at time {t}")
previous = current
for agent, p in enumerate(paths):
if level.scenario.agents[agent].origin != p[0]:
print("Agent starts at a point different from the scenario")
for i in range(1, len(p)):
if p[i - 1] not in level.g[p[i]] and p[i - 1] != p[i]:
print(f"Non-adjacent movement of agent {agent} at time {i}")
if __name__ == "__main__":
cli()
| [
"click.Choice",
"evacsim.bench.BenchResults.for_benchfile",
"click.group",
"click.option",
"evacsim.expansion.plan_evacuation",
"pathlib.Path",
"click.File",
"evacsim.grid.start",
"evacsim.bench.parse_benchfile",
"evacsim.lcmae.plan_evacuation",
"click.Path"
] | [((811, 824), 'click.group', 'click.group', ([], {}), '()\n', (822, 824), False, 'import click\n'), ((1039, 1153), 'click.option', 'click.option', (['"""--visualize/--no-visualize"""'], {'default': '(False)', 'help': '"""Start visualization after creating the plan"""'}), "('--visualize/--no-visualize', default=False, help=\n 'Start visualization after creating the plan')\n", (1051, 1153), False, 'import click\n'), ((1178, 1280), 'click.option', 'click.option', (['"""--debug/--no-debug"""'], {'default': '(False)', 'help': '"""Print planning algorithm\'s debug output"""'}), '(\'--debug/--no-debug\', default=False, help=\n "Print planning algorithm\'s debug output")\n', (1190, 1280), False, 'import click\n'), ((2269, 2372), 'click.option', 'click.option', (['"""--processes"""'], {'type': 'click.INT', 'help': '"""Number of processors to use while benchmarking"""'}), "('--processes', type=click.INT, help=\n 'Number of processors to use while benchmarking')\n", (2281, 2372), False, 'import click\n'), ((2557, 2675), 'click.option', 'click.option', (['"""--flow/--no-flow"""'], {'default': '(False)', 'help': '"""Also run the network flow algorithm on eligible scenarios"""'}), "('--flow/--no-flow', default=False, help=\n 'Also run the network flow algorithm on eligible scenarios')\n", (2569, 2675), False, 'import click\n'), ((4198, 4302), 'click.option', 'click.option', (['"""-s"""', '"""--square-size"""'], {'default': '(15)', 'help': '"""Size of a single square on a map, in pixels"""'}), "('-s', '--square-size', default=15, help=\n 'Size of a single square on a map, in pixels')\n", (4210, 4302), False, 'import click\n'), ((4327, 4432), 'click.option', 'click.option', (['"""-b"""', '"""--border-size"""'], {'default': '(0)', 'help': '"""Size of the border between squares, in pixels"""'}), "('-b', '--border-size', default=0, help=\n 'Size of the border between squares, in pixels')\n", (4339, 4432), False, 'import click\n'), ((3720, 3752), 'evacsim.bench.parse_benchfile', 'bench.parse_benchfile', (['benchfile'], {}), '(benchfile)\n', (3741, 3752), True, 'import evacsim.bench as bench\n'), ((3767, 3829), 'evacsim.bench.BenchResults.for_benchfile', 'bench.BenchResults.for_benchfile', (['bench_cases', 'processes', 'flow'], {}), '(bench_cases, processes, flow)\n', (3799, 3829), True, 'import evacsim.bench as bench\n'), ((5075, 5152), 'evacsim.grid.start', 'grid.start', (['level', 'map_path', 'paths'], {'cell_size': 'square_size', 'border': 'border_size'}), '(level, map_path, paths, cell_size=square_size, border=border_size)\n', (5085, 5152), True, 'import evacsim.grid as grid\n'), ((1828, 1867), 'evacsim.lcmae.plan_evacuation', 'lcmae.plan_evacuation', (['lvl'], {'debug': 'debug'}), '(lvl, debug=debug)\n', (1849, 1867), True, 'import evacsim.lcmae as lcmae\n'), ((1894, 1973), 'evacsim.expansion.plan_evacuation', 'expansion.plan_evacuation', (['lvl'], {'postprocess': "(algorithm == 'postmae')", 'debug': 'debug'}), "(lvl, postprocess=algorithm == 'postmae', debug=debug)\n", (1919, 1973), True, 'import evacsim.expansion as expansion\n'), ((2122, 2154), 'evacsim.grid.start', 'grid.start', (['lvl', 'map_path', 'paths'], {}), '(lvl, map_path, paths)\n', (2132, 2154), True, 'import evacsim.grid as grid\n'), ((910, 952), 'click.Choice', 'click.Choice', (["('lcmae', 'postmae', 'flow')"], {}), "(('lcmae', 'postmae', 'flow'))\n", (922, 952), False, 'import click\n'), ((1353, 1392), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (1363, 1392), False, 'import click\n'), ((1448, 1487), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (1458, 1487), False, 'import click\n'), ((4040, 4058), 'pathlib.Path', 'pl.Path', (['path_dest'], {}), '(path_dest)\n', (4047, 4058), True, 'import pathlib as pl\n'), ((2251, 2266), 'click.File', 'click.File', (['"""r"""'], {}), "('r')\n", (2261, 2266), False, 'import click\n'), ((2447, 2477), 'click.Choice', 'click.Choice', (["('text', 'json')"], {}), "(('text', 'json'))\n", (2459, 2477), False, 'import click\n'), ((2766, 2806), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)'}), '(exists=True, file_okay=False)\n', (2776, 2806), False, 'import click\n'), ((2953, 2993), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)'}), '(exists=True, file_okay=False)\n', (2963, 2993), False, 'import click\n'), ((4505, 4544), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (4515, 4544), False, 'import click\n'), ((4600, 4639), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (4610, 4639), False, 'import click\n'), ((4727, 4766), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (4737, 4766), False, 'import click\n'), ((5219, 5258), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (5229, 5258), False, 'import click\n'), ((5314, 5353), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (5324, 5353), False, 'import click\n'), ((5441, 5480), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (5451, 5480), False, 'import click\n'), ((3987, 4005), 'pathlib.Path', 'pl.Path', (['plot_dest'], {}), '(plot_dest)\n', (3994, 4005), True, 'import pathlib as pl\n')] |
#!/usr/bin/env python3
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An experiment to help determine the best way to use music21 objects.
The music21 libaries have a lot of purposes beyond what I need so for now I
think all I need is to know how to access the note pitches and their positions
and durations within the work. From those three bits of info I can then
construct a waveform representing that music given a tempo to define the length
of a quarter note.
"""
import numpy
from music21 import corpus
from potty_oh.common import get_cmd_line_parser
from potty_oh.common import call_main
from potty_oh.common import ParserArguments
from potty_oh.common import defaults
from potty_oh.wav_file import wav_file_context
from potty_oh.waveform import Waveform
from potty_oh.waveform import seconds_to_frame
from potty_oh.waveform import quarter_note_length
from potty_oh.signal_generator import Generator
from potty_oh.audify import audify_to_file
def main():
parser = get_cmd_line_parser(description=__doc__)
ParserArguments.filename(parser)
ParserArguments.tempo(parser)
ParserArguments.framerate(parser)
ParserArguments.set_defaults(parser)
ParserArguments.best(parser)
args = parser.parse_args()
defaults.framerate = args.framerate
print('Generating Signal:')
sig_gen = Generator()
song = Waveform([])
qnl = quarter_note_length(args.tempo)
# work = corpus.parse(numpy.random.choice(corpus.getComposer('bach')))
work = corpus.parse(numpy.random.choice(corpus.getCorePaths()))
notes = work.flat.notes
if args.best:
audify_to_file(notes, args.tempo, args.filename, args.verbose)
return 0
note_count = len(notes)
try:
for count, note in enumerate(notes):
print('{}/{}: {} [{}]: {} {}'.format(
count, note_count, note.offset, note.duration.quarterLength,
note.pitch, note.pitch.frequency))
note_length = qnl * note.quarterLength
start = seconds_to_frame(qnl * note.offset)
print(' inserting {} seconds into frame {}'.format(
note_length, start))
song = song.insert(
start, sig_gen.sin_constant(note.pitch.frequency,
length=note_length))
except KeyboardInterrupt:
print('Stopping song generating here...')
print('Writing Song {} to file {}...'.format(
work.corpusFilepath, args.filename))
with wav_file_context(args.filename) as fout:
fout.write_frames(song.frames)
return 0
if __name__ == "__main__":
call_main(main)
| [
"potty_oh.common.get_cmd_line_parser",
"potty_oh.common.ParserArguments.tempo",
"potty_oh.common.ParserArguments.filename",
"potty_oh.waveform.Waveform",
"potty_oh.common.call_main",
"potty_oh.common.ParserArguments.set_defaults",
"potty_oh.signal_generator.Generator",
"potty_oh.waveform.quarter_note_... | [((1528, 1568), 'potty_oh.common.get_cmd_line_parser', 'get_cmd_line_parser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (1547, 1568), False, 'from potty_oh.common import get_cmd_line_parser\n'), ((1573, 1605), 'potty_oh.common.ParserArguments.filename', 'ParserArguments.filename', (['parser'], {}), '(parser)\n', (1597, 1605), False, 'from potty_oh.common import ParserArguments\n'), ((1610, 1639), 'potty_oh.common.ParserArguments.tempo', 'ParserArguments.tempo', (['parser'], {}), '(parser)\n', (1631, 1639), False, 'from potty_oh.common import ParserArguments\n'), ((1644, 1677), 'potty_oh.common.ParserArguments.framerate', 'ParserArguments.framerate', (['parser'], {}), '(parser)\n', (1669, 1677), False, 'from potty_oh.common import ParserArguments\n'), ((1682, 1718), 'potty_oh.common.ParserArguments.set_defaults', 'ParserArguments.set_defaults', (['parser'], {}), '(parser)\n', (1710, 1718), False, 'from potty_oh.common import ParserArguments\n'), ((1723, 1751), 'potty_oh.common.ParserArguments.best', 'ParserArguments.best', (['parser'], {}), '(parser)\n', (1743, 1751), False, 'from potty_oh.common import ParserArguments\n'), ((1870, 1881), 'potty_oh.signal_generator.Generator', 'Generator', ([], {}), '()\n', (1879, 1881), False, 'from potty_oh.signal_generator import Generator\n'), ((1893, 1905), 'potty_oh.waveform.Waveform', 'Waveform', (['[]'], {}), '([])\n', (1901, 1905), False, 'from potty_oh.waveform import Waveform\n'), ((1916, 1947), 'potty_oh.waveform.quarter_note_length', 'quarter_note_length', (['args.tempo'], {}), '(args.tempo)\n', (1935, 1947), False, 'from potty_oh.waveform import quarter_note_length\n'), ((3171, 3186), 'potty_oh.common.call_main', 'call_main', (['main'], {}), '(main)\n', (3180, 3186), False, 'from potty_oh.common import call_main\n'), ((2146, 2208), 'potty_oh.audify.audify_to_file', 'audify_to_file', (['notes', 'args.tempo', 'args.filename', 'args.verbose'], {}), '(notes, args.tempo, args.filename, args.verbose)\n', (2160, 2208), False, 'from potty_oh.audify import audify_to_file\n'), ((3044, 3075), 'potty_oh.wav_file.wav_file_context', 'wav_file_context', (['args.filename'], {}), '(args.filename)\n', (3060, 3075), False, 'from potty_oh.wav_file import wav_file_context\n'), ((2068, 2089), 'music21.corpus.getCorePaths', 'corpus.getCorePaths', ([], {}), '()\n', (2087, 2089), False, 'from music21 import corpus\n'), ((2558, 2593), 'potty_oh.waveform.seconds_to_frame', 'seconds_to_frame', (['(qnl * note.offset)'], {}), '(qnl * note.offset)\n', (2574, 2593), False, 'from potty_oh.waveform import seconds_to_frame\n')] |
from flask import Blueprint
jobs_bp = Blueprint("jobs", __name__)
from demo.api.jobs import views
| [
"flask.Blueprint"
] | [((39, 66), 'flask.Blueprint', 'Blueprint', (['"""jobs"""', '__name__'], {}), "('jobs', __name__)\n", (48, 66), False, 'from flask import Blueprint\n')] |
"""search.py
인자값을 전달한 키워드 기반으로 상위 n개 링크
웹 브라우저 실행"""
import sys
import requests, webbrowser, bs4
URLS = {
'google': 'https://google.com/search?q=',
'duckduckgo': 'https://duckduckgo.com/?q='
}
def parse_args() -> list:
if len(sys.argv) < 2:
print(f'python {__file__} <search query>')
sys.exit(1)
return sys.argv[1:]
def get_http_resp(query, url):
print('Searching...')
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0"
headers = {"user-agent" : USER_AGENT}
resp = requests.get(f'{url}{query}', headers=headers)
resp.raise_for_status()
return resp
def find_google_elems(resp):
soup = bs4.BeautifulSoup(resp.text, 'lxml')
link_elemns = soup.select('.r > a')
return link_elemns
def get_duckduckgo_resp(resp):
soup = bs4.BeautifulSoup(resp.text, 'lxml')
link_elemns = soup.select('.result__a')
return link_elemns
def main():
args = parse_args()
query = ' '.join(args).replace(' ', '+')
resp = get_http_resp(query, URLS['google'])
link_elemns = find_google_elems(resp)
num_open = min(5, len(link_elemns))
index = 0
while index < num_open:
href = link_elemns[index].get('href')
print(href)
if href.startswith('http'):
webbrowser.open(link_elemns[index].get('href'))
index += 1
if __name__ == "__main__":
main()
| [
"bs4.BeautifulSoup",
"requests.get",
"sys.exit"
] | [((572, 618), 'requests.get', 'requests.get', (['f"""{url}{query}"""'], {'headers': 'headers'}), "(f'{url}{query}', headers=headers)\n", (584, 618), False, 'import requests, webbrowser, bs4\n'), ((706, 742), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['resp.text', '"""lxml"""'], {}), "(resp.text, 'lxml')\n", (723, 742), False, 'import requests, webbrowser, bs4\n'), ((851, 887), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['resp.text', '"""lxml"""'], {}), "(resp.text, 'lxml')\n", (868, 887), False, 'import requests, webbrowser, bs4\n'), ((316, 327), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (324, 327), False, 'import sys\n')] |
"""
This module contains all the functions needed for extracting satellite-derived
shorelines (SDS)
Author: <NAME>, Water Research Laboratory, University of New South Wales
"""
# load modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pdb
# image processing modules
import skimage.filters as filters
import skimage.measure as measure
import skimage.morphology as morphology
# machine learning modules
import sklearn
if sklearn.__version__[:4] == '0.20':
from sklearn.externals import joblib
else:
import joblib
from shapely.geometry import LineString
# other modules
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.cm as cm
from matplotlib import gridspec
import pickle
from datetime import datetime
from pylab import ginput
# CoastSat modules
from coastsat import SDS_tools, SDS_preprocess
np.seterr(all='ignore') # raise/ignore divisions by 0 and nans
# Main function for batch shoreline detection
def extract_shorelines(metadata, settings):
"""
Main function to extract shorelines from satellite images
KV WRL 2018
Arguments:
-----------
metadata: dict
contains all the information about the satellite images that were downloaded
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'cloud_thresh': float
value between 0 and 1 indicating the maximum cloud fraction in
the cropped image that is accepted
'cloud_mask_issue': boolean
True if there is an issue with the cloud mask and sand pixels
are erroneously being masked on the images
'buffer_size': int
size of the buffer (m) around the sandy pixels over which the pixels
are considered in the thresholding algorithm
'min_beach_area': int
minimum allowable object area (in metres^2) for the class 'sand',
the area is converted to number of connected pixels
'min_length_sl': int
minimum length (in metres) of shoreline contour to be valid
'sand_color': str
default', 'dark' (for grey/black sand beaches) or 'bright' (for white sand beaches)
'output_epsg': int
output spatial reference system as EPSG code
'check_detection': bool
if True, lets user manually accept/reject the mapped shorelines
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
'adjust_detection': bool
if True, allows user to manually adjust the detected shoreline
Returns:
-----------
output: dict
contains the extracted shorelines and corresponding dates + metadata
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
filepath_models = os.path.join(os.getcwd(), 'classification', 'models')
# initialise output structure
output = dict([])
# create a subfolder to store the .jpg images showing the detection
filepath_jpg = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
if not os.path.exists(filepath_jpg):
os.makedirs(filepath_jpg)
# close all open figures
plt.close('all')
print('Mapping shorelines:')
# loop through satellite list
for satname in metadata.keys():
# get images
filepath = SDS_tools.get_filepath(settings['inputs'],satname)
filenames = metadata[satname]['filenames']
# initialise the output variables
output_timestamp = [] # datetime at which the image was acquired (UTC time)
output_shoreline = [] # vector of shoreline points
output_filename = [] # filename of the images from which the shorelines where derived
output_cloudcover = [] # cloud cover of the images
output_geoaccuracy = []# georeferencing accuracy of the images
output_idxkeep = [] # index that were kept during the analysis (cloudy images are skipped)
output_t_mndwi = [] # MNDWI threshold used to map the shoreline
# load classifiers (if sklearn version above 0.20, learn the new files)
str_new = ''
if not sklearn.__version__[:4] == '0.20':
str_new = '_new'
if satname in ['L5','L7','L8']:
pixel_size = 15
if settings['sand_color'] == 'dark':
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat_dark%s.pkl'%str_new))
elif settings['sand_color'] == 'bright':
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat_bright%s.pkl'%str_new))
else:
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat%s.pkl'%str_new))
elif satname == 'S2':
pixel_size = 10
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_S2%s.pkl'%str_new))
# convert settings['min_beach_area'] and settings['buffer_size'] from metres to pixels
buffer_size_pixels = np.ceil(settings['buffer_size']/pixel_size)
min_beach_area_pixels = np.ceil(settings['min_beach_area']/pixel_size**2)
# loop through the images
for i in range(len(filenames)):
print('\r%s: %d%%' % (satname,int(((i+1)/len(filenames))*100)), end='')
# get image filename
fn = SDS_tools.get_filenames(filenames[i],filepath, satname)
# preprocess image (cloud mask + pansharpening/downsampling)
im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = SDS_preprocess.preprocess_single(fn, satname, settings['cloud_mask_issue'])
# get image spatial reference system (epsg code) from metadata dict
image_epsg = metadata[satname]['epsg'][i]
# compute cloud_cover percentage (with no data pixels)
cloud_cover_combined = np.divide(sum(sum(cloud_mask.astype(int))),
(cloud_mask.shape[0]*cloud_mask.shape[1]))
if cloud_cover_combined > 0.99: # if 99% of cloudy pixels in image skip
continue
# remove no data pixels from the cloud mask
# (for example L7 bands of no data should not be accounted for)
cloud_mask_adv = np.logical_xor(cloud_mask, im_nodata)
# compute updated cloud cover percentage (without no data pixels)
cloud_cover = np.divide(sum(sum(cloud_mask_adv.astype(int))),
(sum(sum((~im_nodata).astype(int)))))
# skip image if cloud cover is above user-defined threshold
if cloud_cover > settings['cloud_thresh']:
continue
# calculate a buffer around the reference shoreline (if any has been digitised)
im_ref_buffer = create_shoreline_buffer(cloud_mask.shape, georef, image_epsg,
pixel_size, settings)
# classify image in 4 classes (sand, whitewater, water, other) with NN classifier
im_classif, im_labels = classify_image_NN(im_ms, im_extra, cloud_mask,
min_beach_area_pixels, clf)
# if adjust_detection is True, let the user adjust the detected shoreline
if settings['adjust_detection']:
date = filenames[i][:19]
skip_image, shoreline, t_mndwi = adjust_detection(im_ms, cloud_mask, im_labels,
im_ref_buffer, image_epsg, georef,
settings, date, satname, buffer_size_pixels)
# if the user decides to skip the image, continue and do not save the mapped shoreline
if skip_image:
continue
# otherwise map the contours automatically with one of the two following functions:
# if there are pixels in the 'sand' class --> use find_wl_contours2 (enhanced)
# otherwise use find_wl_contours2 (traditional)
else:
try: # use try/except structure for long runs
if sum(sum(im_labels[:,:,0])) < 10 : # minimum number of sand pixels
# compute MNDWI image (SWIR-G)
im_mndwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# find water contours on MNDWI grayscale image
contours_mwi, t_mndwi = find_wl_contours1(im_mndwi, cloud_mask, im_ref_buffer)
else:
# use classification to refine threshold and extract the sand/water interface
contours_mwi, t_mndwi = find_wl_contours2(im_ms, im_labels, cloud_mask,
buffer_size_pixels, im_ref_buffer)
except:
print('Could not map shoreline for this image: ' + filenames[i])
continue
# process the water contours into a shoreline
shoreline = process_shoreline(contours_mwi, cloud_mask, georef, image_epsg, settings)
# visualise the mapped shorelines, there are two options:
# if settings['check_detection'] = True, shows the detection to the user for accept/reject
# if settings['save_figure'] = True, saves a figure for each mapped shoreline
if settings['check_detection'] or settings['save_figure']:
date = filenames[i][:19]
if not settings['check_detection']:
plt.ioff() # turning interactive plotting off
skip_image = show_detection(im_ms, cloud_mask, im_labels, shoreline,
image_epsg, georef, settings, date, satname)
# if the user decides to skip the image, continue and do not save the mapped shoreline
if skip_image:
continue
# append to output variables
output_timestamp.append(metadata[satname]['dates'][i])
output_shoreline.append(shoreline)
output_filename.append(filenames[i])
output_cloudcover.append(cloud_cover)
output_geoaccuracy.append(metadata[satname]['acc_georef'][i])
output_idxkeep.append(i)
output_t_mndwi.append(t_mndwi)
# create dictionnary of output
output[satname] = {
'dates': output_timestamp,
'shorelines': output_shoreline,
'filename': output_filename,
'cloud_cover': output_cloudcover,
'geoaccuracy': output_geoaccuracy,
'idx': output_idxkeep,
'MNDWI_threshold': output_t_mndwi,
}
print('')
# close figure window if still open
if plt.get_fignums():
plt.close()
# change the format to have one list sorted by date with all the shorelines (easier to use)
output = SDS_tools.merge_output(output)
# save outputput structure as output.pkl
filepath = os.path.join(filepath_data, sitename)
with open(os.path.join(filepath, sitename + '_output.pkl'), 'wb') as f:
pickle.dump(output, f)
return output
###################################################################################################
# IMAGE CLASSIFICATION FUNCTIONS
###################################################################################################
def calculate_features(im_ms, cloud_mask, im_bool):
"""
Calculates features on the image that are used for the supervised classification.
The features include spectral normalized-difference indices and standard
deviation of the image for all the bands and indices.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_bool: np.array
2D array of boolean indicating where on the image to calculate the features
Returns:
-----------
features: np.array
matrix containing each feature (columns) calculated for all
the pixels (rows) indicated in im_bool
"""
# add all the multispectral bands
features = np.expand_dims(im_ms[im_bool,0],axis=1)
for k in range(1,im_ms.shape[2]):
feature = np.expand_dims(im_ms[im_bool,k],axis=1)
features = np.append(features, feature, axis=-1)
# NIR-G
im_NIRG = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask)
features = np.append(features, np.expand_dims(im_NIRG[im_bool],axis=1), axis=-1)
# SWIR-G
im_SWIRG = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
features = np.append(features, np.expand_dims(im_SWIRG[im_bool],axis=1), axis=-1)
# NIR-R
im_NIRR = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,2], cloud_mask)
features = np.append(features, np.expand_dims(im_NIRR[im_bool],axis=1), axis=-1)
# SWIR-NIR
im_SWIRNIR = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,3], cloud_mask)
features = np.append(features, np.expand_dims(im_SWIRNIR[im_bool],axis=1), axis=-1)
# B-R
im_BR = SDS_tools.nd_index(im_ms[:,:,0], im_ms[:,:,2], cloud_mask)
features = np.append(features, np.expand_dims(im_BR[im_bool],axis=1), axis=-1)
# calculate standard deviation of individual bands
for k in range(im_ms.shape[2]):
im_std = SDS_tools.image_std(im_ms[:,:,k], 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
# calculate standard deviation of the spectral indices
im_std = SDS_tools.image_std(im_NIRG, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_SWIRG, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_NIRR, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_SWIRNIR, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_BR, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
return features
def classify_image_NN(im_ms, im_extra, cloud_mask, min_beach_area, clf):
"""
Classifies every pixel in the image in one of 4 classes:
- sand --> label = 1
- whitewater (breaking waves and swash) --> label = 2
- water --> label = 3
- other (vegetation, buildings, rocks...) --> label = 0
The classifier is a Neural Network that is already trained.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
Pansharpened RGB + downsampled NIR and SWIR
im_extra:
only used for Landsat 7 and 8 where im_extra is the panchromatic band
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
min_beach_area: int
minimum number of pixels that have to be connected to belong to the SAND class
clf: joblib object
pre-trained classifier
Returns:
-----------
im_classif: np.array
2D image containing labels
im_labels: np.array of booleans
3D image containing a boolean image for each class (im_classif == label)
"""
# calculate features
vec_features = calculate_features(im_ms, cloud_mask, np.ones(cloud_mask.shape).astype(bool))
vec_features[np.isnan(vec_features)] = 1e-9 # NaN values are create when std is too close to 0
# remove NaNs and cloudy pixels
vec_cloud = cloud_mask.reshape(cloud_mask.shape[0]*cloud_mask.shape[1])
vec_nan = np.any(np.isnan(vec_features), axis=1)
vec_inf = np.any(np.isinf(vec_features), axis=1)
vec_mask = np.logical_or(vec_cloud,np.logical_or(vec_nan,vec_inf))
vec_features = vec_features[~vec_mask, :]
# classify pixels
labels = clf.predict(vec_features)
# recompose image
vec_classif = np.nan*np.ones((cloud_mask.shape[0]*cloud_mask.shape[1]))
vec_classif[~vec_mask] = labels
im_classif = vec_classif.reshape((cloud_mask.shape[0], cloud_mask.shape[1]))
# create a stack of boolean images for each label
im_sand = im_classif == 1
im_swash = im_classif == 2
im_water = im_classif == 3
# remove small patches of sand or water that could be around the image (usually noise)
im_sand = morphology.remove_small_objects(im_sand, min_size=min_beach_area, connectivity=2)
im_water = morphology.remove_small_objects(im_water, min_size=min_beach_area, connectivity=2)
im_labels = np.stack((im_sand,im_swash,im_water), axis=-1)
return im_classif, im_labels
###################################################################################################
# CONTOUR MAPPING FUNCTIONS
###################################################################################################
def find_wl_contours1(im_ndwi, cloud_mask, im_ref_buffer):
"""
Traditional method for shoreline detection using a global threshold.
Finds the water line by thresholding the Normalized Difference Water Index
and applying the Marching Squares Algorithm to contour the iso-value
corresponding to the threshold.
KV WRL 2018
Arguments:
-----------
im_ndwi: np.ndarray
Image (2D) with the NDWI (water index)
cloud_mask: np.ndarray
2D cloud mask with True where cloud pixels are
im_ref_buffer: np.array
Binary image containing a buffer around the reference shoreline
Returns:
-----------
contours: list of np.arrays
contains the coordinates of the contour lines
t_mwi: float
Otsu threshold used to map the contours
"""
# reshape image to vector
vec_ndwi = im_ndwi.reshape(im_ndwi.shape[0] * im_ndwi.shape[1])
vec_mask = cloud_mask.reshape(cloud_mask.shape[0] * cloud_mask.shape[1])
vec = vec_ndwi[~vec_mask]
# apply otsu's threshold
vec = vec[~np.isnan(vec)]
t_otsu = filters.threshold_otsu(vec)
# use Marching Squares algorithm to detect contours on ndwi image
im_ndwi_buffer = np.copy(im_ndwi)
im_ndwi_buffer[~im_ref_buffer] = np.nan
contours = measure.find_contours(im_ndwi_buffer, t_otsu)
# remove contours that contain NaNs (due to cloud pixels in the contour)
contours = process_contours(contours)
return contours, t_otsu
def find_wl_contours2(im_ms, im_labels, cloud_mask, buffer_size, im_ref_buffer):
"""
New robust method for extracting shorelines. Incorporates the classification
component to refine the treshold and make it specific to the sand/water interface.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
im_labels: np.array
3D image containing a boolean image for each class in the order (sand, swash, water)
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
buffer_size: int
size of the buffer around the sandy beach over which the pixels are considered in the
thresholding algorithm.
im_ref_buffer: np.array
binary image containing a buffer around the reference shoreline
Returns:
-----------
contours_mwi: list of np.arrays
contains the coordinates of the contour lines extracted from the
MNDWI (Modified Normalized Difference Water Index) image
t_mwi: float
Otsu sand/water threshold used to map the contours
"""
nrows = cloud_mask.shape[0]
ncols = cloud_mask.shape[1]
# calculate Normalized Difference Modified Water Index (SWIR - G)
im_mwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# calculate Normalized Difference Modified Water Index (NIR - G)
im_wi = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask)
# stack indices together
im_ind = np.stack((im_wi, im_mwi), axis=-1)
vec_ind = im_ind.reshape(nrows*ncols,2)
# reshape labels into vectors
vec_sand = im_labels[:,:,0].reshape(ncols*nrows)
vec_water = im_labels[:,:,2].reshape(ncols*nrows)
# create a buffer around the sandy beach
se = morphology.disk(buffer_size)
im_buffer = morphology.binary_dilation(im_labels[:,:,0], se)
vec_buffer = im_buffer.reshape(nrows*ncols)
# select water/sand/swash pixels that are within the buffer
int_water = vec_ind[np.logical_and(vec_buffer,vec_water),:]
int_sand = vec_ind[np.logical_and(vec_buffer,vec_sand),:]
# make sure both classes have the same number of pixels before thresholding
if len(int_water) > 0 and len(int_sand) > 0:
if np.argmin([int_sand.shape[0],int_water.shape[0]]) == 1:
int_sand = int_sand[np.random.choice(int_sand.shape[0],int_water.shape[0], replace=False),:]
else:
int_water = int_water[np.random.choice(int_water.shape[0],int_sand.shape[0], replace=False),:]
# threshold the sand/water intensities
int_all = np.append(int_water,int_sand, axis=0)
t_mwi = filters.threshold_otsu(int_all[:,0])
t_wi = filters.threshold_otsu(int_all[:,1])
# find contour with MS algorithm
im_wi_buffer = np.copy(im_wi)
im_wi_buffer[~im_ref_buffer] = np.nan
im_mwi_buffer = np.copy(im_mwi)
im_mwi_buffer[~im_ref_buffer] = np.nan
contours_wi = measure.find_contours(im_wi_buffer, t_wi)
contours_mwi = measure.find_contours(im_mwi_buffer, t_mwi)
# remove contour points that are NaNs (around clouds)
contours_wi = process_contours(contours_wi)
contours_mwi = process_contours(contours_mwi)
# only return MNDWI contours and threshold
return contours_mwi, t_mwi
###################################################################################################
# SHORELINE PROCESSING FUNCTIONS
###################################################################################################
def create_shoreline_buffer(im_shape, georef, image_epsg, pixel_size, settings):
"""
Creates a buffer around the reference shoreline. The size of the buffer is
given by settings['max_dist_ref'].
KV WRL 2018
Arguments:
-----------
im_shape: np.array
size of the image (rows,columns)
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
image_epsg: int
spatial reference system of the image from which the contours were extracted
pixel_size: int
size of the pixel in metres (15 for Landsat, 10 for Sentinel-2)
settings: dict with the following keys
'output_epsg': int
output spatial reference system
'reference_shoreline': np.array
coordinates of the reference shoreline
'max_dist_ref': int
maximum distance from the reference shoreline in metres
Returns:
-----------
im_buffer: np.array
binary image, True where the buffer is, False otherwise
"""
# initialise the image buffer
im_buffer = np.ones(im_shape).astype(bool)
if 'reference_shoreline' in settings.keys():
# convert reference shoreline to pixel coordinates
ref_sl = settings['reference_shoreline']
ref_sl_conv = SDS_tools.convert_epsg(ref_sl, settings['output_epsg'],image_epsg)[:,:-1]
ref_sl_pix = SDS_tools.convert_world2pix(ref_sl_conv, georef)
ref_sl_pix_rounded = np.round(ref_sl_pix).astype(int)
# make sure that the pixel coordinates of the reference shoreline are inside the image
idx_row = np.logical_and(ref_sl_pix_rounded[:,0] > 0, ref_sl_pix_rounded[:,0] < im_shape[1])
idx_col = np.logical_and(ref_sl_pix_rounded[:,1] > 0, ref_sl_pix_rounded[:,1] < im_shape[0])
idx_inside = np.logical_and(idx_row, idx_col)
ref_sl_pix_rounded = ref_sl_pix_rounded[idx_inside,:]
# create binary image of the reference shoreline (1 where the shoreline is 0 otherwise)
im_binary = np.zeros(im_shape)
for j in range(len(ref_sl_pix_rounded)):
im_binary[ref_sl_pix_rounded[j,1], ref_sl_pix_rounded[j,0]] = 1
im_binary = im_binary.astype(bool)
# dilate the binary image to create a buffer around the reference shoreline
max_dist_ref_pixels = np.ceil(settings['max_dist_ref']/pixel_size)
se = morphology.disk(max_dist_ref_pixels)
im_buffer = morphology.binary_dilation(im_binary, se)
return im_buffer
def process_contours(contours):
"""
Remove contours that contain NaNs, usually these are contours that are in contact
with clouds.
KV WRL 2020
Arguments:
-----------
contours: list of np.array
image contours as detected by the function skimage.measure.find_contours
Returns:
-----------
contours: list of np.array
processed image contours (only the ones that do not contains NaNs)
"""
# initialise variable
contours_nonans = []
# loop through contours and only keep the ones without NaNs
for k in range(len(contours)):
if np.any(np.isnan(contours[k])):
index_nan = np.where(np.isnan(contours[k]))[0]
contours_temp = np.delete(contours[k], index_nan, axis=0)
if len(contours_temp) > 1:
contours_nonans.append(contours_temp)
else:
contours_nonans.append(contours[k])
return contours_nonans
def process_shoreline(contours, cloud_mask, georef, image_epsg, settings):
"""
Converts the contours from image coordinates to world coordinates.
This function also removes the contours that are too small to be a shoreline
(based on the parameter settings['min_length_sl'])
KV WRL 2018
Arguments:
-----------
contours: np.array or list of np.array
image contours as detected by the function find_contours
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
image_epsg: int
spatial reference system of the image from which the contours were extracted
settings: dict with the following keys
'output_epsg': int
output spatial reference system
'min_length_sl': float
minimum length of shoreline contour to be kept (in meters)
Returns:
-----------
shoreline: np.array
array of points with the X and Y coordinates of the shoreline
"""
# convert pixel coordinates to world coordinates
contours_world = SDS_tools.convert_pix2world(contours, georef)
# convert world coordinates to desired spatial reference system
contours_epsg = SDS_tools.convert_epsg(contours_world, image_epsg, settings['output_epsg'])
# remove contours that have a perimeter < min_length_sl (provided in settings dict)
# this enables to remove the very small contours that do not correspond to the shoreline
contours_long = []
for l, wl in enumerate(contours_epsg):
coords = [(wl[k,0], wl[k,1]) for k in range(len(wl))]
a = LineString(coords) # shapely LineString structure
if a.length >= settings['min_length_sl']:
contours_long.append(wl)
# format points into np.array
x_points = np.array([])
y_points = np.array([])
for k in range(len(contours_long)):
x_points = np.append(x_points,contours_long[k][:,0])
y_points = np.append(y_points,contours_long[k][:,1])
contours_array = np.transpose(np.array([x_points,y_points]))
shoreline = contours_array
# now remove any shoreline points that are attached to cloud pixels
if sum(sum(cloud_mask)) > 0:
# get the coordinates of the cloud pixels
idx_cloud = np.where(cloud_mask)
idx_cloud = np.array([(idx_cloud[0][k], idx_cloud[1][k]) for k in range(len(idx_cloud[0]))])
# convert to world coordinates and same epsg as the shoreline points
coords_cloud = SDS_tools.convert_epsg(SDS_tools.convert_pix2world(idx_cloud, georef),
image_epsg, settings['output_epsg'])[:,:-1]
# only keep the shoreline points that are at least 30m from any cloud pixel
idx_keep = np.ones(len(shoreline)).astype(bool)
for k in range(len(shoreline)):
if np.any(np.linalg.norm(shoreline[k,:] - coords_cloud, axis=1) < 30):
idx_keep[k] = False
shoreline = shoreline[idx_keep]
return shoreline
###################################################################################################
# PLOTTING FUNCTIONS
###################################################################################################
def show_detection(im_ms, cloud_mask, im_labels, shoreline,image_epsg, georef,
settings, date, satname):
"""
Shows the detected shoreline to the user for visual quality control.
The user can accept/reject the detected shorelines by using keep/skip
buttons.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_labels: np.array
3D image containing a boolean image for each class in the order (sand, swash, water)
shoreline: np.array
array of points with the X and Y coordinates of the shoreline
image_epsg: int
spatial reference system of the image from which the contours were extracted
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
date: string
date at which the image was taken
satname: string
indicates the satname (L5,L7,L8 or S2)
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'output_epsg': int
output spatial reference system as EPSG code
'check_detection': bool
if True, lets user manually accept/reject the mapped shorelines
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
Returns:
-----------
skip_image: boolean
True if the user wants to skip the image, False otherwise
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
# subfolder where the .jpg file is stored if the user accepts the shoreline detection
filepath = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)
# compute classified image
im_class = np.copy(im_RGB)
cmap = cm.get_cmap('tab20c')
colorpalette = cmap(np.arange(0,13,1))
colours = np.zeros((3,4))
colours[0,:] = colorpalette[5]
colours[1,:] = np.array([204/255,1,1,1])
colours[2,:] = np.array([0,91/255,1,1])
for k in range(0,im_labels.shape[2]):
im_class[im_labels[:,:,k],0] = colours[k,0]
im_class[im_labels[:,:,k],1] = colours[k,1]
im_class[im_labels[:,:,k],2] = colours[k,2]
# compute MNDWI grayscale image
im_mwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# transform world coordinates of shoreline into pixel coordinates
# use try/except in case there are no coordinates to be transformed (shoreline = [])
try:
sl_pix = SDS_tools.convert_world2pix(SDS_tools.convert_epsg(shoreline,
settings['output_epsg'],
image_epsg)[:,[0,1]], georef)
except:
# if try fails, just add nan into the shoreline vector so the next parts can still run
sl_pix = np.array([[np.nan, np.nan],[np.nan, np.nan]])
if plt.get_fignums():
# get open figure if it exists
fig = plt.gcf()
ax1 = fig.axes[0]
ax2 = fig.axes[1]
ax3 = fig.axes[2]
else:
# else create a new figure
fig = plt.figure()
fig.set_size_inches([18, 9])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
# according to the image shape, decide whether it is better to have the images
# in vertical subplots or horizontal subplots
if im_RGB.shape[1] > 2.5*im_RGB.shape[0]:
# vertical subplots
gs = gridspec.GridSpec(3, 1)
gs.update(bottom=0.03, top=0.97, left=0.03, right=0.97)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[2,0], sharex=ax1, sharey=ax1)
else:
# horizontal subplots
gs = gridspec.GridSpec(1, 3)
gs.update(bottom=0.05, top=0.95, left=0.05, right=0.95)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[0,1], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[0,2], sharex=ax1, sharey=ax1)
# change the color of nans to either black (0.0) or white (1.0) or somewhere in between
nan_color = 1.0
im_RGB = np.where(np.isnan(im_RGB), nan_color, im_RGB)
im_class = np.where(np.isnan(im_class), 1.0, im_class)
# create image 1 (RGB)
ax1.imshow(im_RGB)
ax1.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
ax1.axis('off')
ax1.set_title(sitename, fontweight='bold', fontsize=16)
# create image 2 (classification)
ax2.imshow(im_class)
ax2.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
ax2.axis('off')
orange_patch = mpatches.Patch(color=colours[0,:], label='sand')
white_patch = mpatches.Patch(color=colours[1,:], label='whitewater')
blue_patch = mpatches.Patch(color=colours[2,:], label='water')
black_line = mlines.Line2D([],[],color='k',linestyle='-', label='shoreline')
ax2.legend(handles=[orange_patch,white_patch,blue_patch, black_line],
bbox_to_anchor=(1, 0.5), fontsize=10)
ax2.set_title(date, fontweight='bold', fontsize=16)
# create image 3 (MNDWI)
ax3.imshow(im_mwi, cmap='bwr')
ax3.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
ax3.axis('off')
ax3.set_title(satname, fontweight='bold', fontsize=16)
# additional options
# ax1.set_anchor('W')
# ax2.set_anchor('W')
# cb = plt.colorbar()
# cb.ax.tick_params(labelsize=10)
# cb.set_label('MNDWI values')
# ax3.set_anchor('W')
# if check_detection is True, let user manually accept/reject the images
skip_image = False
if settings['check_detection']:
# set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071)
# this variable needs to be immuatable so we can access it after the keypress event
key_event = {}
def press(event):
# store what key was pressed in the dictionary
key_event['pressed'] = event.key
# let the user press a key, right arrow to keep the image, left arrow to skip it
# to break the loop the user can press 'escape'
while True:
btn_keep = plt.text(1.1, 0.9, 'keep ⇨', size=12, ha="right", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_skip = plt.text(-0.1, 0.9, '⇦ skip', size=12, ha="left", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_esc = plt.text(0.5, 0, '<esc> to quit', size=12, ha="center", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
plt.draw()
fig.canvas.mpl_connect('key_press_event', press)
plt.waitforbuttonpress()
# after button is pressed, remove the buttons
btn_skip.remove()
btn_keep.remove()
btn_esc.remove()
# keep/skip image according to the pressed key, 'escape' to break the loop
if key_event.get('pressed') == 'right':
skip_image = False
break
elif key_event.get('pressed') == 'left':
skip_image = True
break
elif key_event.get('pressed') == 'escape':
plt.close()
raise StopIteration('User cancelled checking shoreline detection')
else:
plt.waitforbuttonpress()
# if save_figure is True, save a .jpg under /jpg_files/detection
if settings['save_figure'] and not skip_image:
fig.savefig(os.path.join(filepath, date + '_' + satname + '.jpg'), dpi=150)
# don't close the figure window, but remove all axes and settings, ready for next plot
for ax in fig.axes:
ax.clear()
return skip_image
def adjust_detection(im_ms, cloud_mask, im_labels, im_ref_buffer, image_epsg, georef,
settings, date, satname, buffer_size_pixels):
"""
Advanced version of show detection where the user can adjust the detected
shorelines with a slide bar.
KV WRL 2020
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_labels: np.array
3D image containing a boolean image for each class in the order (sand, swash, water)
im_ref_buffer: np.array
Binary image containing a buffer around the reference shoreline
image_epsg: int
spatial reference system of the image from which the contours were extracted
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
date: string
date at which the image was taken
satname: string
indicates the satname (L5,L7,L8 or S2)
buffer_size_pixels: int
buffer_size converted to number of pixels
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'output_epsg': int
output spatial reference system as EPSG code
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
Returns:
-----------
skip_image: boolean
True if the user wants to skip the image, False otherwise
shoreline: np.array
array of points with the X and Y coordinates of the shoreline
t_mndwi: float
value of the MNDWI threshold used to map the shoreline
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
# subfolder where the .jpg file is stored if the user accepts the shoreline detection
filepath = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
# format date
date_str = datetime.strptime(date,'%Y-%m-%d-%H-%M-%S').strftime('%Y-%m-%d %H:%M:%S')
im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)
# compute classified image
im_class = np.copy(im_RGB)
cmap = cm.get_cmap('tab20c')
colorpalette = cmap(np.arange(0,13,1))
colours = np.zeros((3,4))
colours[0,:] = colorpalette[5]
colours[1,:] = np.array([204/255,1,1,1])
colours[2,:] = np.array([0,91/255,1,1])
for k in range(0,im_labels.shape[2]):
im_class[im_labels[:,:,k],0] = colours[k,0]
im_class[im_labels[:,:,k],1] = colours[k,1]
im_class[im_labels[:,:,k],2] = colours[k,2]
# compute MNDWI grayscale image
im_mndwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# buffer MNDWI using reference shoreline
im_mndwi_buffer = np.copy(im_mndwi)
im_mndwi_buffer[~im_ref_buffer] = np.nan
# get MNDWI pixel intensity in each class (for histogram plot)
int_sand = im_mndwi[im_labels[:,:,0]]
int_ww = im_mndwi[im_labels[:,:,1]]
int_water = im_mndwi[im_labels[:,:,2]]
labels_other = np.logical_and(np.logical_and(~im_labels[:,:,0],~im_labels[:,:,1]),~im_labels[:,:,2])
int_other = im_mndwi[labels_other]
# create figure
if plt.get_fignums():
# if it exists, open the figure
fig = plt.gcf()
ax1 = fig.axes[0]
ax2 = fig.axes[1]
ax3 = fig.axes[2]
ax4 = fig.axes[3]
else:
# else create a new figure
fig = plt.figure()
fig.set_size_inches([18, 9])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
gs = gridspec.GridSpec(2, 3, height_ratios=[4,1])
gs.update(bottom=0.05, top=0.95, left=0.03, right=0.97)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[0,1], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[0,2], sharex=ax1, sharey=ax1)
ax4 = fig.add_subplot(gs[1,:])
##########################################################################
# to do: rotate image if too wide
##########################################################################
# change the color of nans to either black (0.0) or white (1.0) or somewhere in between
nan_color = 1.0
im_RGB = np.where(np.isnan(im_RGB), nan_color, im_RGB)
im_class = np.where(np.isnan(im_class), 1.0, im_class)
# plot image 1 (RGB)
ax1.imshow(im_RGB)
ax1.axis('off')
ax1.set_title('%s - %s'%(sitename, satname), fontsize=12)
# plot image 2 (classification)
ax2.imshow(im_class)
ax2.axis('off')
orange_patch = mpatches.Patch(color=colours[0,:], label='sand')
white_patch = mpatches.Patch(color=colours[1,:], label='whitewater')
blue_patch = mpatches.Patch(color=colours[2,:], label='water')
black_line = mlines.Line2D([],[],color='k',linestyle='-', label='shoreline')
ax2.legend(handles=[orange_patch,white_patch,blue_patch, black_line],
bbox_to_anchor=(1.1, 0.5), fontsize=10)
ax2.set_title(date_str, fontsize=12)
# plot image 3 (MNDWI)
ax3.imshow(im_mndwi, cmap='bwr')
ax3.axis('off')
ax3.set_title('MNDWI', fontsize=12)
# plot histogram of MNDWI values
binwidth = 0.01
ax4.set_facecolor('0.75')
ax4.yaxis.grid(color='w', linestyle='--', linewidth=0.5)
ax4.set(ylabel='PDF',yticklabels=[], xlim=[-1,1])
if len(int_sand) > 0 and sum(~np.isnan(int_sand)) > 0:
bins = np.arange(np.nanmin(int_sand), np.nanmax(int_sand) + binwidth, binwidth)
ax4.hist(int_sand, bins=bins, density=True, color=colours[0,:], label='sand')
if len(int_ww) > 0 and sum(~np.isnan(int_ww)) > 0:
bins = np.arange(np.nanmin(int_ww), np.nanmax(int_ww) + binwidth, binwidth)
ax4.hist(int_ww, bins=bins, density=True, color=colours[1,:], label='whitewater', alpha=0.75)
if len(int_water) > 0 and sum(~np.isnan(int_water)) > 0:
bins = np.arange(np.nanmin(int_water), np.nanmax(int_water) + binwidth, binwidth)
ax4.hist(int_water, bins=bins, density=True, color=colours[2,:], label='water', alpha=0.75)
if len(int_other) > 0 and sum(~np.isnan(int_other)) > 0:
bins = np.arange(np.nanmin(int_other), np.nanmax(int_other) + binwidth, binwidth)
ax4.hist(int_other, bins=bins, density=True, color='C4', label='other', alpha=0.5)
# automatically map the shoreline based on the classifier if enough sand pixels
try:
if sum(sum(im_labels[:,:,0])) > 10:
# use classification to refine threshold and extract the sand/water interface
contours_mndwi, t_mndwi = find_wl_contours2(im_ms, im_labels, cloud_mask,
buffer_size_pixels, im_ref_buffer)
else:
# find water contours on MNDWI grayscale image
contours_mndwi, t_mndwi = find_wl_contours1(im_mndwi, cloud_mask, im_ref_buffer)
except:
print('Could not map shoreline so image was skipped')
# clear axes and return skip_image=True, so that image is skipped above
for ax in fig.axes:
ax.clear()
return True,[],[]
# process the water contours into a shoreline
shoreline = process_shoreline(contours_mndwi, cloud_mask, georef, image_epsg, settings)
# convert shoreline to pixels
if len(shoreline) > 0:
sl_pix = SDS_tools.convert_world2pix(SDS_tools.convert_epsg(shoreline,
settings['output_epsg'],
image_epsg)[:,[0,1]], georef)
else: sl_pix = np.array([[np.nan, np.nan],[np.nan, np.nan]])
# plot the shoreline on the images
sl_plot1 = ax1.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
sl_plot2 = ax2.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
sl_plot3 = ax3.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
t_line = ax4.axvline(x=t_mndwi,ls='--', c='k', lw=1.5, label='threshold')
ax4.legend(loc=1)
plt.draw() # to update the plot
# adjust the threshold manually by letting the user change the threshold
ax4.set_title('Click on the plot below to change the location of the threhsold and adjust the shoreline detection. When finished, press <Enter>')
while True:
# let the user click on the threshold plot
pt = ginput(n=1, show_clicks=True, timeout=-1)
# if a point was clicked
if len(pt) > 0:
# if user clicked somewhere wrong and value is not between -1 and 1
if np.abs(pt[0][0]) >= 1: continue
# update the threshold value
t_mndwi = pt[0][0]
# update the plot
t_line.set_xdata([t_mndwi,t_mndwi])
# map contours with new threshold
contours = measure.find_contours(im_mndwi_buffer, t_mndwi)
# remove contours that contain NaNs (due to cloud pixels in the contour)
contours = process_contours(contours)
# process the water contours into a shoreline
shoreline = process_shoreline(contours, cloud_mask, georef, image_epsg, settings)
# convert shoreline to pixels
if len(shoreline) > 0:
sl_pix = SDS_tools.convert_world2pix(SDS_tools.convert_epsg(shoreline,
settings['output_epsg'],
image_epsg)[:,[0,1]], georef)
else: sl_pix = np.array([[np.nan, np.nan],[np.nan, np.nan]])
# update the plotted shorelines
sl_plot1[0].set_data([sl_pix[:,0], sl_pix[:,1]])
sl_plot2[0].set_data([sl_pix[:,0], sl_pix[:,1]])
sl_plot3[0].set_data([sl_pix[:,0], sl_pix[:,1]])
fig.canvas.draw_idle()
else:
ax4.set_title('MNDWI pixel intensities and threshold')
break
# let user manually accept/reject the image
skip_image = False
# set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071)
# this variable needs to be immuatable so we can access it after the keypress event
key_event = {}
def press(event):
# store what key was pressed in the dictionary
key_event['pressed'] = event.key
# let the user press a key, right arrow to keep the image, left arrow to skip it
# to break the loop the user can press 'escape'
while True:
btn_keep = plt.text(1.1, 0.9, 'keep ⇨', size=12, ha="right", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_skip = plt.text(-0.1, 0.9, '⇦ skip', size=12, ha="left", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_esc = plt.text(0.5, 0, '<esc> to quit', size=12, ha="center", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
plt.draw()
fig.canvas.mpl_connect('key_press_event', press)
plt.waitforbuttonpress()
# after button is pressed, remove the buttons
btn_skip.remove()
btn_keep.remove()
btn_esc.remove()
# keep/skip image according to the pressed key, 'escape' to break the loop
if key_event.get('pressed') == 'right':
skip_image = False
break
elif key_event.get('pressed') == 'left':
skip_image = True
break
elif key_event.get('pressed') == 'escape':
plt.close()
raise StopIteration('User cancelled checking shoreline detection')
else:
plt.waitforbuttonpress()
# if save_figure is True, save a .jpg under /jpg_files/detection
if settings['save_figure'] and not skip_image:
fig.savefig(os.path.join(filepath, date + '_' + satname + '.jpg'), dpi=150)
# don't close the figure window, but remove all axes and settings, ready for next plot
for ax in fig.axes:
ax.clear()
return skip_image, shoreline, t_mndwi | [
"coastsat.SDS_tools.get_filepath",
"skimage.filters.threshold_otsu",
"numpy.array",
"coastsat.SDS_preprocess.preprocess_single",
"coastsat.SDS_tools.merge_output",
"numpy.linalg.norm",
"numpy.nanmin",
"matplotlib.lines.Line2D",
"numpy.arange",
"skimage.morphology.binary_dilation",
"os.path.exist... | [((874, 897), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (883, 897), True, 'import numpy as np\n'), ((3131, 3194), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename', '"""jpg_files"""', '"""detection"""'], {}), "(filepath_data, sitename, 'jpg_files', 'detection')\n", (3143, 3194), False, 'import os\n'), ((3307, 3323), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3316, 3323), True, 'import matplotlib.pyplot as plt\n'), ((11121, 11138), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (11136, 11138), True, 'import matplotlib.pyplot as plt\n'), ((11270, 11300), 'coastsat.SDS_tools.merge_output', 'SDS_tools.merge_output', (['output'], {}), '(output)\n', (11292, 11300), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((11362, 11399), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename'], {}), '(filepath_data, sitename)\n', (11374, 11399), False, 'import os\n'), ((12581, 12622), 'numpy.expand_dims', 'np.expand_dims', (['im_ms[im_bool, 0]'], {'axis': '(1)'}), '(im_ms[im_bool, 0], axis=1)\n', (12595, 12622), True, 'import numpy as np\n'), ((12800, 12862), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 3]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 3], im_ms[:, :, 1], cloud_mask)\n', (12818, 12862), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((12972, 13034), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (12990, 13034), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13143, 13205), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 3]', 'im_ms[:, :, 2]', 'cloud_mask'], {}), '(im_ms[:, :, 3], im_ms[:, :, 2], cloud_mask)\n', (13161, 13205), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13319, 13381), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 3]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 3], cloud_mask)\n', (13337, 13381), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13488, 13550), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 0]', 'im_ms[:, :, 2]', 'cloud_mask'], {}), '(im_ms[:, :, 0], im_ms[:, :, 2], cloud_mask)\n', (13506, 13550), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13936, 13967), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_NIRG', '(1)'], {}), '(im_NIRG, 1)\n', (13955, 13967), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14065, 14097), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_SWIRG', '(1)'], {}), '(im_SWIRG, 1)\n', (14084, 14097), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14195, 14226), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_NIRR', '(1)'], {}), '(im_NIRR, 1)\n', (14214, 14226), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14324, 14358), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_SWIRNIR', '(1)'], {}), '(im_SWIRNIR, 1)\n', (14343, 14358), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14456, 14485), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_BR', '(1)'], {}), '(im_BR, 1)\n', (14475, 14485), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((16849, 16934), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['im_sand'], {'min_size': 'min_beach_area', 'connectivity': '(2)'}), '(im_sand, min_size=min_beach_area,\n connectivity=2)\n', (16880, 16934), True, 'import skimage.morphology as morphology\n'), ((16946, 17032), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['im_water'], {'min_size': 'min_beach_area', 'connectivity': '(2)'}), '(im_water, min_size=min_beach_area,\n connectivity=2)\n', (16977, 17032), True, 'import skimage.morphology as morphology\n'), ((17046, 17094), 'numpy.stack', 'np.stack', (['(im_sand, im_swash, im_water)'], {'axis': '(-1)'}), '((im_sand, im_swash, im_water), axis=-1)\n', (17054, 17094), True, 'import numpy as np\n'), ((18461, 18488), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['vec'], {}), '(vec)\n', (18483, 18488), True, 'import skimage.filters as filters\n'), ((18580, 18596), 'numpy.copy', 'np.copy', (['im_ndwi'], {}), '(im_ndwi)\n', (18587, 18596), True, 'import numpy as np\n'), ((18656, 18701), 'skimage.measure.find_contours', 'measure.find_contours', (['im_ndwi_buffer', 't_otsu'], {}), '(im_ndwi_buffer, t_otsu)\n', (18677, 18701), True, 'import skimage.measure as measure\n'), ((20102, 20164), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (20120, 20164), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((20242, 20304), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 3]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 3], im_ms[:, :, 1], cloud_mask)\n', (20260, 20304), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((20343, 20377), 'numpy.stack', 'np.stack', (['(im_wi, im_mwi)'], {'axis': '(-1)'}), '((im_wi, im_mwi), axis=-1)\n', (20351, 20377), True, 'import numpy as np\n'), ((20619, 20647), 'skimage.morphology.disk', 'morphology.disk', (['buffer_size'], {}), '(buffer_size)\n', (20634, 20647), True, 'import skimage.morphology as morphology\n'), ((20664, 20714), 'skimage.morphology.binary_dilation', 'morphology.binary_dilation', (['im_labels[:, :, 0]', 'se'], {}), '(im_labels[:, :, 0], se)\n', (20690, 20714), True, 'import skimage.morphology as morphology\n'), ((21433, 21471), 'numpy.append', 'np.append', (['int_water', 'int_sand'], {'axis': '(0)'}), '(int_water, int_sand, axis=0)\n', (21442, 21471), True, 'import numpy as np\n'), ((21483, 21520), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['int_all[:, 0]'], {}), '(int_all[:, 0])\n', (21505, 21520), True, 'import skimage.filters as filters\n'), ((21531, 21568), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['int_all[:, 1]'], {}), '(int_all[:, 1])\n', (21553, 21568), True, 'import skimage.filters as filters\n'), ((21625, 21639), 'numpy.copy', 'np.copy', (['im_wi'], {}), '(im_wi)\n', (21632, 21639), True, 'import numpy as np\n'), ((21702, 21717), 'numpy.copy', 'np.copy', (['im_mwi'], {}), '(im_mwi)\n', (21709, 21717), True, 'import numpy as np\n'), ((21779, 21820), 'skimage.measure.find_contours', 'measure.find_contours', (['im_wi_buffer', 't_wi'], {}), '(im_wi_buffer, t_wi)\n', (21800, 21820), True, 'import skimage.measure as measure\n'), ((21840, 21883), 'skimage.measure.find_contours', 'measure.find_contours', (['im_mwi_buffer', 't_mwi'], {}), '(im_mwi_buffer, t_mwi)\n', (21861, 21883), True, 'import skimage.measure as measure\n'), ((27022, 27067), 'coastsat.SDS_tools.convert_pix2world', 'SDS_tools.convert_pix2world', (['contours', 'georef'], {}), '(contours, georef)\n', (27049, 27067), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((27156, 27231), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['contours_world', 'image_epsg', "settings['output_epsg']"], {}), "(contours_world, image_epsg, settings['output_epsg'])\n", (27178, 27231), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((27739, 27751), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (27747, 27751), True, 'import numpy as np\n'), ((27767, 27779), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (27775, 27779), True, 'import numpy as np\n'), ((30959, 31022), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename', '"""jpg_files"""', '"""detection"""'], {}), "(filepath_data, sitename, 'jpg_files', 'detection')\n", (30971, 31022), False, 'import os\n'), ((31037, 31122), 'coastsat.SDS_preprocess.rescale_image_intensity', 'SDS_preprocess.rescale_image_intensity', (['im_ms[:, :, [2, 1, 0]]', 'cloud_mask', '(99.9)'], {}), '(im_ms[:, :, [2, 1, 0]], cloud_mask, 99.9\n )\n', (31075, 31122), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((31161, 31176), 'numpy.copy', 'np.copy', (['im_RGB'], {}), '(im_RGB)\n', (31168, 31176), True, 'import numpy as np\n'), ((31188, 31209), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""tab20c"""'], {}), "('tab20c')\n", (31199, 31209), True, 'import matplotlib.cm as cm\n'), ((31267, 31283), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (31275, 31283), True, 'import numpy as np\n'), ((31337, 31367), 'numpy.array', 'np.array', (['[204 / 255, 1, 1, 1]'], {}), '([204 / 255, 1, 1, 1])\n', (31345, 31367), True, 'import numpy as np\n'), ((31382, 31411), 'numpy.array', 'np.array', (['[0, 91 / 255, 1, 1]'], {}), '([0, 91 / 255, 1, 1])\n', (31390, 31411), True, 'import numpy as np\n'), ((31655, 31717), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (31673, 31717), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((32331, 32348), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (32346, 32348), True, 'import matplotlib.pyplot as plt\n'), ((34126, 34175), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[0, :]', 'label': '"""sand"""'}), "(color=colours[0, :], label='sand')\n", (34140, 34175), True, 'import matplotlib.patches as mpatches\n'), ((34193, 34248), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[1, :]', 'label': '"""whitewater"""'}), "(color=colours[1, :], label='whitewater')\n", (34207, 34248), True, 'import matplotlib.patches as mpatches\n'), ((34265, 34315), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[2, :]', 'label': '"""water"""'}), "(color=colours[2, :], label='water')\n", (34279, 34315), True, 'import matplotlib.patches as mpatches\n'), ((34332, 34398), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'label': '"""shoreline"""'}), "([], [], color='k', linestyle='-', label='shoreline')\n", (34345, 34398), True, 'import matplotlib.lines as mlines\n'), ((39381, 39444), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename', '"""jpg_files"""', '"""detection"""'], {}), "(filepath_data, sitename, 'jpg_files', 'detection')\n", (39393, 39444), False, 'import os\n'), ((39566, 39651), 'coastsat.SDS_preprocess.rescale_image_intensity', 'SDS_preprocess.rescale_image_intensity', (['im_ms[:, :, [2, 1, 0]]', 'cloud_mask', '(99.9)'], {}), '(im_ms[:, :, [2, 1, 0]], cloud_mask, 99.9\n )\n', (39604, 39651), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((39690, 39705), 'numpy.copy', 'np.copy', (['im_RGB'], {}), '(im_RGB)\n', (39697, 39705), True, 'import numpy as np\n'), ((39717, 39738), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""tab20c"""'], {}), "('tab20c')\n", (39728, 39738), True, 'import matplotlib.cm as cm\n'), ((39796, 39812), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (39804, 39812), True, 'import numpy as np\n'), ((39866, 39896), 'numpy.array', 'np.array', (['[204 / 255, 1, 1, 1]'], {}), '([204 / 255, 1, 1, 1])\n', (39874, 39896), True, 'import numpy as np\n'), ((39911, 39940), 'numpy.array', 'np.array', (['[0, 91 / 255, 1, 1]'], {}), '([0, 91 / 255, 1, 1])\n', (39919, 39940), True, 'import numpy as np\n'), ((40186, 40248), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (40204, 40248), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((40312, 40329), 'numpy.copy', 'np.copy', (['im_mndwi'], {}), '(im_mndwi)\n', (40319, 40329), True, 'import numpy as np\n'), ((40744, 40761), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (40759, 40761), True, 'import matplotlib.pyplot as plt\n'), ((42135, 42184), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[0, :]', 'label': '"""sand"""'}), "(color=colours[0, :], label='sand')\n", (42149, 42184), True, 'import matplotlib.patches as mpatches\n'), ((42202, 42257), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[1, :]', 'label': '"""whitewater"""'}), "(color=colours[1, :], label='whitewater')\n", (42216, 42257), True, 'import matplotlib.patches as mpatches\n'), ((42274, 42324), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[2, :]', 'label': '"""water"""'}), "(color=colours[2, :], label='water')\n", (42288, 42324), True, 'import matplotlib.patches as mpatches\n'), ((42341, 42407), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'label': '"""shoreline"""'}), "([], [], color='k', linestyle='-', label='shoreline')\n", (42354, 42407), True, 'import matplotlib.lines as mlines\n'), ((45586, 45596), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (45594, 45596), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2954), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2952, 2954), False, 'import os\n'), ((3206, 3234), 'os.path.exists', 'os.path.exists', (['filepath_jpg'], {}), '(filepath_jpg)\n', (3220, 3234), False, 'import os\n'), ((3248, 3273), 'os.makedirs', 'os.makedirs', (['filepath_jpg'], {}), '(filepath_jpg)\n', (3259, 3273), False, 'import os\n'), ((3470, 3521), 'coastsat.SDS_tools.get_filepath', 'SDS_tools.get_filepath', (["settings['inputs']", 'satname'], {}), "(settings['inputs'], satname)\n", (3492, 3521), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((5135, 5180), 'numpy.ceil', 'np.ceil', (["(settings['buffer_size'] / pixel_size)"], {}), "(settings['buffer_size'] / pixel_size)\n", (5142, 5180), True, 'import numpy as np\n'), ((5211, 5264), 'numpy.ceil', 'np.ceil', (["(settings['min_beach_area'] / pixel_size ** 2)"], {}), "(settings['min_beach_area'] / pixel_size ** 2)\n", (5218, 5264), True, 'import numpy as np\n'), ((11148, 11159), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11157, 11159), True, 'import matplotlib.pyplot as plt\n'), ((11484, 11506), 'pickle.dump', 'pickle.dump', (['output', 'f'], {}), '(output, f)\n', (11495, 11506), False, 'import pickle\n'), ((12677, 12718), 'numpy.expand_dims', 'np.expand_dims', (['im_ms[im_bool, k]'], {'axis': '(1)'}), '(im_ms[im_bool, k], axis=1)\n', (12691, 12718), True, 'import numpy as np\n'), ((12736, 12773), 'numpy.append', 'np.append', (['features', 'feature'], {'axis': '(-1)'}), '(features, feature, axis=-1)\n', (12745, 12773), True, 'import numpy as np\n'), ((12894, 12934), 'numpy.expand_dims', 'np.expand_dims', (['im_NIRG[im_bool]'], {'axis': '(1)'}), '(im_NIRG[im_bool], axis=1)\n', (12908, 12934), True, 'import numpy as np\n'), ((13066, 13107), 'numpy.expand_dims', 'np.expand_dims', (['im_SWIRG[im_bool]'], {'axis': '(1)'}), '(im_SWIRG[im_bool], axis=1)\n', (13080, 13107), True, 'import numpy as np\n'), ((13237, 13277), 'numpy.expand_dims', 'np.expand_dims', (['im_NIRR[im_bool]'], {'axis': '(1)'}), '(im_NIRR[im_bool], axis=1)\n', (13251, 13277), True, 'import numpy as np\n'), ((13413, 13456), 'numpy.expand_dims', 'np.expand_dims', (['im_SWIRNIR[im_bool]'], {'axis': '(1)'}), '(im_SWIRNIR[im_bool], axis=1)\n', (13427, 13456), True, 'import numpy as np\n'), ((13582, 13620), 'numpy.expand_dims', 'np.expand_dims', (['im_BR[im_bool]'], {'axis': '(1)'}), '(im_BR[im_bool], axis=1)\n', (13596, 13620), True, 'import numpy as np\n'), ((13739, 13777), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_ms[:, :, k]', '(1)'], {}), '(im_ms[:, :, k], 1)\n', (13758, 13777), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14003, 14042), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14017, 14042), True, 'import numpy as np\n'), ((14133, 14172), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14147, 14172), True, 'import numpy as np\n'), ((14262, 14301), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14276, 14301), True, 'import numpy as np\n'), ((14394, 14433), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14408, 14433), True, 'import numpy as np\n'), ((14521, 14560), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14535, 14560), True, 'import numpy as np\n'), ((15897, 15919), 'numpy.isnan', 'np.isnan', (['vec_features'], {}), '(vec_features)\n', (15905, 15919), True, 'import numpy as np\n'), ((16113, 16135), 'numpy.isnan', 'np.isnan', (['vec_features'], {}), '(vec_features)\n', (16121, 16135), True, 'import numpy as np\n'), ((16166, 16188), 'numpy.isinf', 'np.isinf', (['vec_features'], {}), '(vec_features)\n', (16174, 16188), True, 'import numpy as np\n'), ((16241, 16272), 'numpy.logical_or', 'np.logical_or', (['vec_nan', 'vec_inf'], {}), '(vec_nan, vec_inf)\n', (16254, 16272), True, 'import numpy as np\n'), ((16429, 16479), 'numpy.ones', 'np.ones', (['(cloud_mask.shape[0] * cloud_mask.shape[1])'], {}), '(cloud_mask.shape[0] * cloud_mask.shape[1])\n', (16436, 16479), True, 'import numpy as np\n'), ((23754, 23802), 'coastsat.SDS_tools.convert_world2pix', 'SDS_tools.convert_world2pix', (['ref_sl_conv', 'georef'], {}), '(ref_sl_conv, georef)\n', (23781, 23802), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((23979, 24067), 'numpy.logical_and', 'np.logical_and', (['(ref_sl_pix_rounded[:, 0] > 0)', '(ref_sl_pix_rounded[:, 0] < im_shape[1])'], {}), '(ref_sl_pix_rounded[:, 0] > 0, ref_sl_pix_rounded[:, 0] <\n im_shape[1])\n', (23993, 24067), True, 'import numpy as np\n'), ((24080, 24168), 'numpy.logical_and', 'np.logical_and', (['(ref_sl_pix_rounded[:, 1] > 0)', '(ref_sl_pix_rounded[:, 1] < im_shape[0])'], {}), '(ref_sl_pix_rounded[:, 1] > 0, ref_sl_pix_rounded[:, 1] <\n im_shape[0])\n', (24094, 24168), True, 'import numpy as np\n'), ((24184, 24216), 'numpy.logical_and', 'np.logical_and', (['idx_row', 'idx_col'], {}), '(idx_row, idx_col)\n', (24198, 24216), True, 'import numpy as np\n'), ((24396, 24414), 'numpy.zeros', 'np.zeros', (['im_shape'], {}), '(im_shape)\n', (24404, 24414), True, 'import numpy as np\n'), ((24698, 24744), 'numpy.ceil', 'np.ceil', (["(settings['max_dist_ref'] / pixel_size)"], {}), "(settings['max_dist_ref'] / pixel_size)\n", (24705, 24744), True, 'import numpy as np\n'), ((24756, 24792), 'skimage.morphology.disk', 'morphology.disk', (['max_dist_ref_pixels'], {}), '(max_dist_ref_pixels)\n', (24771, 24792), True, 'import skimage.morphology as morphology\n'), ((24813, 24854), 'skimage.morphology.binary_dilation', 'morphology.binary_dilation', (['im_binary', 'se'], {}), '(im_binary, se)\n', (24839, 24854), True, 'import skimage.morphology as morphology\n'), ((27553, 27571), 'shapely.geometry.LineString', 'LineString', (['coords'], {}), '(coords)\n', (27563, 27571), False, 'from shapely.geometry import LineString\n'), ((27839, 27882), 'numpy.append', 'np.append', (['x_points', 'contours_long[k][:, 0]'], {}), '(x_points, contours_long[k][:, 0])\n', (27848, 27882), True, 'import numpy as np\n'), ((27900, 27943), 'numpy.append', 'np.append', (['y_points', 'contours_long[k][:, 1]'], {}), '(y_points, contours_long[k][:, 1])\n', (27909, 27943), True, 'import numpy as np\n'), ((27976, 28006), 'numpy.array', 'np.array', (['[x_points, y_points]'], {}), '([x_points, y_points])\n', (27984, 28006), True, 'import numpy as np\n'), ((28215, 28235), 'numpy.where', 'np.where', (['cloud_mask'], {}), '(cloud_mask)\n', (28223, 28235), True, 'import numpy as np\n'), ((31234, 31253), 'numpy.arange', 'np.arange', (['(0)', '(13)', '(1)'], {}), '(0, 13, 1)\n', (31243, 31253), True, 'import numpy as np\n'), ((32411, 32420), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (32418, 32420), True, 'import matplotlib.pyplot as plt\n'), ((32570, 32582), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32580, 32582), True, 'import matplotlib.pyplot as plt\n'), ((32634, 32663), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (32661, 32663), True, 'import matplotlib.pyplot as plt\n'), ((33678, 33694), 'numpy.isnan', 'np.isnan', (['im_RGB'], {}), '(im_RGB)\n', (33686, 33694), True, 'import numpy as np\n'), ((33739, 33757), 'numpy.isnan', 'np.isnan', (['im_class'], {}), '(im_class)\n', (33747, 33757), True, 'import numpy as np\n'), ((39763, 39782), 'numpy.arange', 'np.arange', (['(0)', '(13)', '(1)'], {}), '(0, 13, 1)\n', (39772, 39782), True, 'import numpy as np\n'), ((40602, 40658), 'numpy.logical_and', 'np.logical_and', (['(~im_labels[:, :, 0])', '(~im_labels[:, :, 1])'], {}), '(~im_labels[:, :, 0], ~im_labels[:, :, 1])\n', (40616, 40658), True, 'import numpy as np\n'), ((40826, 40835), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (40833, 40835), True, 'import matplotlib.pyplot as plt\n'), ((41021, 41033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (41031, 41033), True, 'import matplotlib.pyplot as plt\n'), ((41085, 41114), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (41112, 41114), True, 'import matplotlib.pyplot as plt\n'), ((41163, 41208), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {'height_ratios': '[4, 1]'}), '(2, 3, height_ratios=[4, 1])\n', (41180, 41208), False, 'from matplotlib import gridspec\n'), ((41807, 41823), 'numpy.isnan', 'np.isnan', (['im_RGB'], {}), '(im_RGB)\n', (41815, 41823), True, 'import numpy as np\n'), ((41868, 41886), 'numpy.isnan', 'np.isnan', (['im_class'], {}), '(im_class)\n', (41876, 41886), True, 'import numpy as np\n'), ((45187, 45233), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [np.nan, np.nan]]'], {}), '([[np.nan, np.nan], [np.nan, np.nan]])\n', (45195, 45233), True, 'import numpy as np\n'), ((45927, 45968), 'pylab.ginput', 'ginput', ([], {'n': '(1)', 'show_clicks': '(True)', 'timeout': '(-1)'}), '(n=1, show_clicks=True, timeout=-1)\n', (45933, 45968), False, 'from pylab import ginput\n'), ((48693, 48703), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (48701, 48703), True, 'import matplotlib.pyplot as plt\n'), ((48769, 48793), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (48791, 48793), True, 'import matplotlib.pyplot as plt\n'), ((5474, 5530), 'coastsat.SDS_tools.get_filenames', 'SDS_tools.get_filenames', (['filenames[i]', 'filepath', 'satname'], {}), '(filenames[i], filepath, satname)\n', (5497, 5530), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((5671, 5746), 'coastsat.SDS_preprocess.preprocess_single', 'SDS_preprocess.preprocess_single', (['fn', 'satname', "settings['cloud_mask_issue']"], {}), "(fn, satname, settings['cloud_mask_issue'])\n", (5703, 5746), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((6390, 6427), 'numpy.logical_xor', 'np.logical_xor', (['cloud_mask', 'im_nodata'], {}), '(cloud_mask, im_nodata)\n', (6404, 6427), True, 'import numpy as np\n'), ((11414, 11462), 'os.path.join', 'os.path.join', (['filepath', "(sitename + '_output.pkl')"], {}), "(filepath, sitename + '_output.pkl')\n", (11426, 11462), False, 'import os\n'), ((13815, 13854), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (13829, 13854), True, 'import numpy as np\n'), ((18433, 18446), 'numpy.isnan', 'np.isnan', (['vec'], {}), '(vec)\n', (18441, 18446), True, 'import numpy as np\n'), ((20850, 20887), 'numpy.logical_and', 'np.logical_and', (['vec_buffer', 'vec_water'], {}), '(vec_buffer, vec_water)\n', (20864, 20887), True, 'import numpy as np\n'), ((20913, 20949), 'numpy.logical_and', 'np.logical_and', (['vec_buffer', 'vec_sand'], {}), '(vec_buffer, vec_sand)\n', (20927, 20949), True, 'import numpy as np\n'), ((21093, 21143), 'numpy.argmin', 'np.argmin', (['[int_sand.shape[0], int_water.shape[0]]'], {}), '([int_sand.shape[0], int_water.shape[0]])\n', (21102, 21143), True, 'import numpy as np\n'), ((23447, 23464), 'numpy.ones', 'np.ones', (['im_shape'], {}), '(im_shape)\n', (23454, 23464), True, 'import numpy as np\n'), ((23659, 23726), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['ref_sl', "settings['output_epsg']", 'image_epsg'], {}), "(ref_sl, settings['output_epsg'], image_epsg)\n", (23681, 23726), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((25526, 25547), 'numpy.isnan', 'np.isnan', (['contours[k]'], {}), '(contours[k])\n', (25534, 25547), True, 'import numpy as np\n'), ((25637, 25678), 'numpy.delete', 'np.delete', (['contours[k]', 'index_nan'], {'axis': '(0)'}), '(contours[k], index_nan, axis=0)\n', (25646, 25678), True, 'import numpy as np\n'), ((32277, 32323), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [np.nan, np.nan]]'], {}), '([[np.nan, np.nan], [np.nan, np.nan]])\n', (32285, 32323), True, 'import numpy as np\n'), ((32940, 32963), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (32957, 32963), False, 'from matplotlib import gridspec\n'), ((33274, 33297), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {}), '(1, 3)\n', (33291, 33297), False, 'from matplotlib import gridspec\n'), ((36320, 36330), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (36328, 36330), True, 'import matplotlib.pyplot as plt\n'), ((36404, 36428), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (36426, 36428), True, 'import matplotlib.pyplot as plt\n'), ((37248, 37301), 'os.path.join', 'os.path.join', (['filepath', "(date + '_' + satname + '.jpg')"], {}), "(filepath, date + '_' + satname + '.jpg')\n", (37260, 37301), False, 'import os\n'), ((39478, 39522), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d-%H-%M-%S"""'], {}), "(date, '%Y-%m-%d-%H-%M-%S')\n", (39495, 39522), False, 'from datetime import datetime\n'), ((42991, 43010), 'numpy.nanmin', 'np.nanmin', (['int_sand'], {}), '(int_sand)\n', (43000, 43010), True, 'import numpy as np\n'), ((43220, 43237), 'numpy.nanmin', 'np.nanmin', (['int_ww'], {}), '(int_ww)\n', (43229, 43237), True, 'import numpy as np\n'), ((43468, 43488), 'numpy.nanmin', 'np.nanmin', (['int_water'], {}), '(int_water)\n', (43477, 43488), True, 'import numpy as np\n'), ((43720, 43740), 'numpy.nanmin', 'np.nanmin', (['int_other'], {}), '(int_other)\n', (43729, 43740), True, 'import numpy as np\n'), ((46373, 46420), 'skimage.measure.find_contours', 'measure.find_contours', (['im_mndwi_buffer', 't_mndwi'], {}), '(im_mndwi_buffer, t_mndwi)\n', (46394, 46420), True, 'import skimage.measure as measure\n'), ((49549, 49602), 'os.path.join', 'os.path.join', (['filepath', "(date + '_' + satname + '.jpg')"], {}), "(filepath, date + '_' + satname + '.jpg')\n", (49561, 49602), False, 'import os\n'), ((15840, 15865), 'numpy.ones', 'np.ones', (['cloud_mask.shape'], {}), '(cloud_mask.shape)\n', (15847, 15865), True, 'import numpy as np\n'), ((23832, 23852), 'numpy.round', 'np.round', (['ref_sl_pix'], {}), '(ref_sl_pix)\n', (23840, 23852), True, 'import numpy as np\n'), ((28460, 28506), 'coastsat.SDS_tools.convert_pix2world', 'SDS_tools.convert_pix2world', (['idx_cloud', 'georef'], {}), '(idx_cloud, georef)\n', (28487, 28506), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((31928, 31998), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['shoreline', "settings['output_epsg']", 'image_epsg'], {}), "(shoreline, settings['output_epsg'], image_epsg)\n", (31950, 31998), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((43012, 43031), 'numpy.nanmax', 'np.nanmax', (['int_sand'], {}), '(int_sand)\n', (43021, 43031), True, 'import numpy as np\n'), ((43239, 43256), 'numpy.nanmax', 'np.nanmax', (['int_ww'], {}), '(int_ww)\n', (43248, 43256), True, 'import numpy as np\n'), ((43490, 43510), 'numpy.nanmax', 'np.nanmax', (['int_water'], {}), '(int_water)\n', (43499, 43510), True, 'import numpy as np\n'), ((43742, 43762), 'numpy.nanmax', 'np.nanmax', (['int_other'], {}), '(int_other)\n', (43751, 43762), True, 'import numpy as np\n'), ((44943, 45013), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['shoreline', "settings['output_epsg']", 'image_epsg'], {}), "(shoreline, settings['output_epsg'], image_epsg)\n", (44965, 45013), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((46122, 46138), 'numpy.abs', 'np.abs', (['pt[0][0]'], {}), '(pt[0][0])\n', (46128, 46138), True, 'import numpy as np\n'), ((47107, 47153), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [np.nan, np.nan]]'], {}), '([[np.nan, np.nan], [np.nan, np.nan]])\n', (47115, 47153), True, 'import numpy as np\n'), ((4503, 4576), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_Landsat_dark%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_Landsat_dark%s.pkl' % str_new)\n", (4515, 4576), False, 'import os\n'), ((4947, 5010), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_S2%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_S2%s.pkl' % str_new)\n", (4959, 5010), False, 'import os\n'), ((21181, 21251), 'numpy.random.choice', 'np.random.choice', (['int_sand.shape[0]', 'int_water.shape[0]'], {'replace': '(False)'}), '(int_sand.shape[0], int_water.shape[0], replace=False)\n', (21197, 21251), True, 'import numpy as np\n'), ((21302, 21372), 'numpy.random.choice', 'np.random.choice', (['int_water.shape[0]', 'int_sand.shape[0]'], {'replace': '(False)'}), '(int_water.shape[0], int_sand.shape[0], replace=False)\n', (21318, 21372), True, 'import numpy as np\n'), ((25583, 25604), 'numpy.isnan', 'np.isnan', (['contours[k]'], {}), '(contours[k])\n', (25591, 25604), True, 'import numpy as np\n'), ((28801, 28855), 'numpy.linalg.norm', 'np.linalg.norm', (['(shoreline[k, :] - coords_cloud)'], {'axis': '(1)'}), '(shoreline[k, :] - coords_cloud, axis=1)\n', (28815, 28855), True, 'import numpy as np\n'), ((42941, 42959), 'numpy.isnan', 'np.isnan', (['int_sand'], {}), '(int_sand)\n', (42949, 42959), True, 'import numpy as np\n'), ((43172, 43188), 'numpy.isnan', 'np.isnan', (['int_ww'], {}), '(int_ww)\n', (43180, 43188), True, 'import numpy as np\n'), ((43417, 43436), 'numpy.isnan', 'np.isnan', (['int_water'], {}), '(int_water)\n', (43425, 43436), True, 'import numpy as np\n'), ((43669, 43688), 'numpy.isnan', 'np.isnan', (['int_other'], {}), '(int_other)\n', (43677, 43688), True, 'import numpy as np\n'), ((49266, 49277), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (49275, 49277), True, 'import matplotlib.pyplot as plt\n'), ((49383, 49407), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (49405, 49407), True, 'import matplotlib.pyplot as plt\n'), ((4663, 4738), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_Landsat_bright%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_Landsat_bright%s.pkl' % str_new)\n", (4675, 4738), False, 'import os\n'), ((4790, 4858), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_Landsat%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_Landsat%s.pkl' % str_new)\n", (4802, 4858), False, 'import os\n'), ((8485, 8547), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (8503, 8547), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((9830, 9840), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (9838, 9840), True, 'import matplotlib.pyplot as plt\n'), ((36953, 36964), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (36962, 36964), True, 'import matplotlib.pyplot as plt\n'), ((37082, 37106), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (37104, 37106), True, 'import matplotlib.pyplot as plt\n'), ((46839, 46909), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['shoreline', "settings['output_epsg']", 'image_epsg'], {}), "(shoreline, settings['output_epsg'], image_epsg)\n", (46861, 46909), False, 'from coastsat import SDS_tools, SDS_preprocess\n')] |
from django.db import models
from taggit.models import TagBase, ItemBase, GenericTaggedItemBase
from taggit.managers import TaggableManager
from treebeard.mp_tree import MP_Node
class Site(models.Model):
slug = models.SlugField(max_length=250, unique=True)
url = models.CharField(max_length=2083)
def __str__(self):
return self.slug
class Zone(models.Model):
slug = models.SlugField(max_length=250, unique=True)
def __str__(self):
return self.slug
class HierarchicalTag (TagBase, MP_Node):
node_order_by = [ 'name' ]
class TaggedAdvert (GenericTaggedItemBase):
tag = models.ForeignKey('HierarchicalTag', related_name='items')
class Advert(models.Model):
name = models.CharField(max_length=250)
image = models.CharField(max_length=2083)
click = models.CharField(max_length=2083)
start_date = models.DateTimeField()
end_date = models.DateTimeField()
active = models.BooleanField(default=True)
site = models.ForeignKey(Site)
zone = models.ForeignKey(Zone)
tags = TaggableManager(through=TaggedAdvert, blank=True)
def __str__(self):
return self.name
| [
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"taggit.managers.TaggableManager",
"django.db.models.SlugField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((217, 262), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(250)', 'unique': '(True)'}), '(max_length=250, unique=True)\n', (233, 262), False, 'from django.db import models\n'), ((273, 306), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2083)'}), '(max_length=2083)\n', (289, 306), False, 'from django.db import models\n'), ((394, 439), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(250)', 'unique': '(True)'}), '(max_length=250, unique=True)\n', (410, 439), False, 'from django.db import models\n'), ((618, 676), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""HierarchicalTag"""'], {'related_name': '"""items"""'}), "('HierarchicalTag', related_name='items')\n", (635, 676), False, 'from django.db import models\n'), ((717, 749), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (733, 749), False, 'from django.db import models\n'), ((762, 795), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2083)'}), '(max_length=2083)\n', (778, 795), False, 'from django.db import models\n'), ((808, 841), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2083)'}), '(max_length=2083)\n', (824, 841), False, 'from django.db import models\n'), ((859, 881), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (879, 881), False, 'from django.db import models\n'), ((897, 919), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (917, 919), False, 'from django.db import models\n'), ((934, 967), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (953, 967), False, 'from django.db import models\n'), ((980, 1003), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Site'], {}), '(Site)\n', (997, 1003), False, 'from django.db import models\n'), ((1015, 1038), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Zone'], {}), '(Zone)\n', (1032, 1038), False, 'from django.db import models\n'), ((1051, 1100), 'taggit.managers.TaggableManager', 'TaggableManager', ([], {'through': 'TaggedAdvert', 'blank': '(True)'}), '(through=TaggedAdvert, blank=True)\n', (1066, 1100), False, 'from taggit.managers import TaggableManager\n')] |
"""
Bosons.
Package:
RoadNarrows elemenpy package.
File:
boson.py
Link:
https://github.com/roadnarrows-robotics/
Copyright:
(c) 2019. RoadNarrows LLC
http://www.roadnarrows.com
All Rights Reserved
License:
MIT
"""
from copy import copy
from enum import Enum
from elemenpy.core.common import (isderivedclass)
from elemenpy.core.format import (Format, default_encoder)
from elemenpy.core.prettyprint import (print2cols)
from elemenpy.sm.standardmodel import (StandardModel as sm, SubatomicParticle)
from elemenpy.sm.spin import (SpinQuantumNumber)
from elemenpy.sm.electriccharge import (ElectricCharge)
from elemenpy.sm.colorcharge import (ColorCharge)
# -----------------------------------------------------------------------------
# Boson Base Class
# -----------------------------------------------------------------------------
class Boson(SubatomicParticle):
""" Boson base class. """
class BosonSubfamily(Enum):
""" Boson subfamily enumeration. """
UNKNOWN = 0
SCALAR = 1 # scalar
VECTOR = 2 # vector
Classification = sm.Classification.BOSON
Family = sm.Family.BOSON
Statistics = sm.Statistics.BOSONIC
Name = 'boson'
Symbol = 'boson'
Subfamily = BosonSubfamily.UNKNOWN
# registered boson subclasses by the @Boson.subclass decorator
Subclasses = {}
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def subclass(klass):
"""
Boson subclass decorator to add a subclass to an internal list.
"""
def wrap(D):
"""
Store derived subclass.
Parameters:
D Prospective derived class.
"""
if isderivedclass(D, klass):
klass.Subclasses[D.__name__] = D
return D
return wrap
@classmethod
def finalize_boson_family(klass):
"""
Finalize all registered boson subclass attributes.
Bosons are interdependent.
"""
for qname, qklass in klass.Subclasses.items():
qklass.finalize_boson()
@classmethod
def boson_family(klass):
"""
Get the dictionary of all registered boson subclasses.
Returns:
{qname: qclass, ...}
"""
return klass.Subclasses
@classmethod
def boson_class(klass, qname):
"""
Get the boson subclass.
Parameters:
qname Boson subclass name.
Returns:
qclass
"""
return klass.Subclasses[qname]
@classmethod
def subfamily(klass):
""" Return boson subfamily. """
return klass.Subfamily
@classmethod
def print_boson_properties(klass, indent=0, **print_kwargs):
"""
Print fixed meson particle properties to output stream.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
klass.print_subatomic_properties(indent=indent, **print_kwargs)
#print(f"{'':<{indent+2}}Boson", **print_kwargs)
print2cols([
('Subfamily', klass.Subfamily.name),],
c1width=16, indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Boson initializer. """
SubatomicParticle.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.name
@property
def subfamily(self):
""" Return boson subfamily. """
return self.Subfamily
def print_state(self, indent=0, **print_kwargs):
"""
Print boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
SubatomicParticle.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# Photon Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class Photon(Boson):
""" Photon class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.PHOTON
Name = "photon"
Symbol = default_encoder('$sm(gamma)')
RestMass = 0.0
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('Photon')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Photon initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print photon state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# WBosonN Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class WBosonN(Boson):
""" WBosonN class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.W_BOSON_N
Name = "W-boson-"
Symbol = default_encoder('$sm(W-)')
RestMass = 80.385e3
ElecCharge = ElectricCharge(-1)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('WBosonP')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" W- boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print W- boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# WBosonP Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class WBosonP(Boson):
""" WBosonP class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.W_BOSON_P
Name = "W-boson+"
Symbol = default_encoder('$sm(W+)')
RestMass = 80.385e3
ElecCharge = ElectricCharge(1)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('WBosonN')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" W+ boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print W+ boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# ZBoson Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class ZBoson(Boson):
""" ZBoson class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.Z_BOSON
Name = "Z-boson"
Symbol = default_encoder('$sm(Z)')
RestMass = 91.1875e3
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('ZBoson')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Z boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print Z boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# Gluon Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class Gluon(Boson):
""" Gluon class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.GLUON
Name = "gluon"
Symbol = default_encoder('$sm(g)')
RestMass = 0.0
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('Gluon')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self, color, anticolor):
"""
Gluon initializer.
Parameters:
color Primary color charge.
anticolor Anticolor charge.
"""
Boson.__init__(self)
self._color_charge = ColorCharge(color)
self._anticolor_charge = ColorCharge(anticolor)
if not self.color_charge.is_primary_color():
raise ValueError(
f"{self.name} '{self.color_charge.name}' is not a primary color")
if not self.anticolor_charge.is_anticolor():
raise ValueError(
f"{self.name} '{self.anticolor_charge.name}' is not an anticolor")
if self.color_charge == self.anticolor_charge.complement:
raise ValueError(f"{self.name} " +
f"'{self.color_charge.name}-{self.anticolor_charge.name}' " +
"defines a meson")
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"{self.color_charge!r}, {self.anticolor_charge!r})"
def __str__(self):
return self.fqname
def __eq__(self, gluon):
"""
Equal to. self == gluon.
Two gluons are considered equal if they are of the same kind.
That is, gluons with the same color charges.
"""
return self.color_charge == gluon.color_charge and \
self.anticolor_charge == gluon.anticolor_charge
def __ne__(self, gluon):
"""
Not equal to. self != gluon.
Two gluons are considered not equal if they are not of the same kind.
That is, gluons that do not have the same color charges.
"""
return self.color_charge != gluon.color_charge or \
self.anticolor_charge != gluon.anticolor_charge
@property
def fqname(self):
return f"{self.color_charge.name}-{self.anticolor_charge.name} {self.name}"
@property
def color_charge(self):
""" Return primary color charge. """
return self._color_charge
@property
def anticolor_charge(self):
""" Return anticolor charge. """
return self._anticolor_charge
def print_state(self, indent=0, **print_kwargs):
"""
Print gluon state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
print2cols([
('FQ Name', self.fqname),
('Color Charge',
f"{self.color_charge.symbol} {self.color_charge.name}"),
('Anticolor Charge',
f"{self.anticolor_charge.symbol} {self.anticolor_charge.name}"),],
indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# HiggsBoson Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class HiggsBoson(Boson):
""" HiggsBoson class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.HIGGS_BOSON
Name = "higgs-boson"
Symbol = default_encoder('$sm(H0)')
RestMass = 125.09e3
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(0)
Subfamily = Boson.BosonSubfamily.SCALAR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('HiggsBoson')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Higgs boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print Higgs boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# On module load execution
# -----------------------------------------------------------------------------
Boson.finalize_boson_family()
# -----------------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import sys
import tests.utboson as ut
sys.exit(ut.utmain())
| [
"elemenpy.core.format.default_encoder",
"elemenpy.sm.electriccharge.ElectricCharge",
"elemenpy.sm.colorcharge.ColorCharge",
"elemenpy.sm.spin.SpinQuantumNumber",
"elemenpy.core.prettyprint.print2cols",
"elemenpy.sm.standardmodel.SubatomicParticle.print_state",
"tests.utboson.utmain",
"elemenpy.core.co... | [((4312, 4341), 'elemenpy.core.format.default_encoder', 'default_encoder', (['"""$sm(gamma)"""'], {}), "('$sm(gamma)')\n", (4327, 4341), False, 'from elemenpy.core.format import Format, default_encoder\n'), ((4378, 4395), 'elemenpy.sm.electriccharge.ElectricCharge', 'ElectricCharge', (['(0)'], {}), '(0)\n', (4392, 4395), False, 'from elemenpy.sm.electriccharge import ElectricCharge\n'), ((4412, 4432), 'elemenpy.sm.spin.SpinQuantumNumber', 'SpinQuantumNumber', (['(1)'], {}), '(1)\n', (4429, 4432), False, 'from elemenpy.sm.spin import SpinQuantumNumber\n'), ((6155, 6181), 'elemenpy.core.format.default_encoder', 'default_encoder', (['"""$sm(W-)"""'], {}), "('$sm(W-)')\n", (6170, 6181), False, 'from elemenpy.core.format import Format, default_encoder\n'), ((6223, 6241), 'elemenpy.sm.electriccharge.ElectricCharge', 'ElectricCharge', (['(-1)'], {}), '(-1)\n', (6237, 6241), False, 'from elemenpy.sm.electriccharge import ElectricCharge\n'), ((6258, 6278), 'elemenpy.sm.spin.SpinQuantumNumber', 'SpinQuantumNumber', (['(1)'], {}), '(1)\n', (6275, 6278), False, 'from elemenpy.sm.spin import SpinQuantumNumber\n'), ((8005, 8031), 'elemenpy.core.format.default_encoder', 'default_encoder', (['"""$sm(W+)"""'], {}), "('$sm(W+)')\n", (8020, 8031), False, 'from elemenpy.core.format import Format, default_encoder\n'), ((8073, 8090), 'elemenpy.sm.electriccharge.ElectricCharge', 'ElectricCharge', (['(1)'], {}), '(1)\n', (8087, 8090), False, 'from elemenpy.sm.electriccharge import ElectricCharge\n'), ((8107, 8127), 'elemenpy.sm.spin.SpinQuantumNumber', 'SpinQuantumNumber', (['(1)'], {}), '(1)\n', (8124, 8127), False, 'from elemenpy.sm.spin import SpinQuantumNumber\n'), ((9848, 9873), 'elemenpy.core.format.default_encoder', 'default_encoder', (['"""$sm(Z)"""'], {}), "('$sm(Z)')\n", (9863, 9873), False, 'from elemenpy.core.format import Format, default_encoder\n'), ((9916, 9933), 'elemenpy.sm.electriccharge.ElectricCharge', 'ElectricCharge', (['(0)'], {}), '(0)\n', (9930, 9933), False, 'from elemenpy.sm.electriccharge import ElectricCharge\n'), ((9950, 9970), 'elemenpy.sm.spin.SpinQuantumNumber', 'SpinQuantumNumber', (['(1)'], {}), '(1)\n', (9967, 9970), False, 'from elemenpy.sm.spin import SpinQuantumNumber\n'), ((11681, 11706), 'elemenpy.core.format.default_encoder', 'default_encoder', (['"""$sm(g)"""'], {}), "('$sm(g)')\n", (11696, 11706), False, 'from elemenpy.core.format import Format, default_encoder\n'), ((11743, 11760), 'elemenpy.sm.electriccharge.ElectricCharge', 'ElectricCharge', (['(0)'], {}), '(0)\n', (11757, 11760), False, 'from elemenpy.sm.electriccharge import ElectricCharge\n'), ((11777, 11797), 'elemenpy.sm.spin.SpinQuantumNumber', 'SpinQuantumNumber', (['(1)'], {}), '(1)\n', (11794, 11797), False, 'from elemenpy.sm.spin import SpinQuantumNumber\n'), ((15566, 15592), 'elemenpy.core.format.default_encoder', 'default_encoder', (['"""$sm(H0)"""'], {}), "('$sm(H0)')\n", (15581, 15592), False, 'from elemenpy.core.format import Format, default_encoder\n'), ((15634, 15651), 'elemenpy.sm.electriccharge.ElectricCharge', 'ElectricCharge', (['(0)'], {}), '(0)\n', (15648, 15651), False, 'from elemenpy.sm.electriccharge import ElectricCharge\n'), ((15668, 15688), 'elemenpy.sm.spin.SpinQuantumNumber', 'SpinQuantumNumber', (['(0)'], {}), '(0)\n', (15685, 15688), False, 'from elemenpy.sm.spin import SpinQuantumNumber\n'), ((3017, 3113), 'elemenpy.core.prettyprint.print2cols', 'print2cols', (["[('Subfamily', klass.Subfamily.name)]"], {'c1width': '(16)', 'indent': 'indent'}), "([('Subfamily', klass.Subfamily.name)], c1width=16, indent=indent,\n **print_kwargs)\n", (3027, 3113), False, 'from elemenpy.core.prettyprint import print2cols\n'), ((3376, 3408), 'elemenpy.sm.standardmodel.SubatomicParticle.__init__', 'SubatomicParticle.__init__', (['self'], {}), '(self)\n', (3402, 3408), False, 'from elemenpy.sm.standardmodel import StandardModel as sm, SubatomicParticle\n'), ((3893, 3959), 'elemenpy.sm.standardmodel.SubatomicParticle.print_state', 'SubatomicParticle.print_state', (['self'], {'indent': 'indent'}), '(self, indent=indent, **print_kwargs)\n', (3922, 3959), False, 'from elemenpy.sm.standardmodel import StandardModel as sm, SubatomicParticle\n'), ((12864, 12882), 'elemenpy.sm.colorcharge.ColorCharge', 'ColorCharge', (['color'], {}), '(color)\n', (12875, 12882), False, 'from elemenpy.sm.colorcharge import ColorCharge\n'), ((12912, 12934), 'elemenpy.sm.colorcharge.ColorCharge', 'ColorCharge', (['anticolor'], {}), '(anticolor)\n', (12923, 12934), False, 'from elemenpy.sm.colorcharge import ColorCharge\n'), ((14906, 15152), 'elemenpy.core.prettyprint.print2cols', 'print2cols', (["[('FQ Name', self.fqname), ('Color Charge',\n f'{self.color_charge.symbol} {self.color_charge.name}'), (\n 'Anticolor Charge',\n f'{self.anticolor_charge.symbol} {self.anticolor_charge.name}')]"], {'indent': 'indent'}), "([('FQ Name', self.fqname), ('Color Charge',\n f'{self.color_charge.symbol} {self.color_charge.name}'), (\n 'Anticolor Charge',\n f'{self.anticolor_charge.symbol} {self.anticolor_charge.name}')],\n indent=indent, **print_kwargs)\n", (14916, 15152), False, 'from elemenpy.core.prettyprint import print2cols\n'), ((17510, 17521), 'tests.utboson.utmain', 'ut.utmain', ([], {}), '()\n', (17519, 17521), True, 'import tests.utboson as ut\n'), ((1796, 1820), 'elemenpy.core.common.isderivedclass', 'isderivedclass', (['D', 'klass'], {}), '(D, klass)\n', (1810, 1820), False, 'from elemenpy.core.common import isderivedclass\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File name: tool_func.py
"""
Created on Thu Apr 23 17:39:40 2020
@author: Neo(<EMAIL>)
Some tool functions. The comment will be added when I am free.
"""
from myprogs.vsh.vsh_fit import rotgli_fit_4_table
from myprogs.catalog.pos_diff import radio_cat_diff_calc
from myprogs.catalog.read_icrf import read_icrf3, read_icrf2
from astropy.stats import sigma_clip, mad_std
from astropy import units as u
from astropy.table import Table
from sklearn.utils import resample
import numpy as np
# np.random.seed(28)
# ----------------------------- FUNCTIONS -----------------------------
def bootstrap_sample(tab, sam_size=500, with_replace=True):
"""Randomly select items of some number.
"""
N = len(tab)
ind = resample(np.arange(N), replace=with_replace, n_samples=sam_size)
new_tab = tab[ind]
return new_tab
def sample_clean(pos_oft, rho0=10, print_log=False):
"""Outlier elimination for VSH fitting.
"""
# Remove the outlier (consider the normalized separation)
N0 = len(pos_oft)
X0 = np.sqrt(np.log(N0) * 2)
X0 = 10000000
rho0 = 1000000
mask = ((pos_oft["nor_sep"] <= X0)
& (pos_oft["ang_sep"] < rho0))
# Table of a clean sample
pos_oft_cln = pos_oft[mask]
N1 = len(pos_oft_cln)
if print_log:
print("For a sample of %d sources, "
"the number of the outlier is smaller than 1 when X >= %.2f." % (N0, X0))
print("After elimination, there are %d sources in the clean sample." % N1)
print("The outlier rate is %.2f%%.\n" % ((N0 - N1) / N0 * 100))
return pos_oft_cln
def vsh_fit_for_pos(pos_oft, print_log=False):
"""Estimate the VSH coefficient.
"""
output = rotgli_fit_4_table(pos_oft, verbose=False)
# Keep rotation only and mas -> uas
pmt = output["pmt1"] * 1.e3
sig = output["sig1"] * 1.e3
# Calculate the total rotation
w, w_err = output["R"] * 1.e3, output["R_err"] * 1.e3
g, g_err = output["G"] * 1.e3, output["G_err"] * 1.e3
# Concatenate the results
pmt = np.hstack((pmt, [g, w]))
sig = np.hstack((sig, [g_err, w_err]))
# Print resultss
if print_log:
print("Estimates (%6d sources)\n"
"----------------------------------------------\n"
" Rotation [uas] \n"
" x y z \n"
"----------------------------------------------\n"
" %+4.0f +/- %3.0f %+4.0f +/- %3.0f %+4.0f +/- %3.0f\n"
"----------------------------------------------\n" %
(len(pos_oft),
pmt[3], sig[3], pmt[4], sig[4], pmt[5], sig[5]))
return pmt, sig
def calc_orient(pos_oft):
"""Calculate orientation angles based on positional difference
"""
N0 = len(pos_oft)
pos_oft_cln = sample_clean(pos_oft, rho0=10)
N1 = len(pos_oft_cln)
pmt, sig = vsh_fit_for_pos(pos_oft_cln)
return N0, N1, pmt, sig
def calc_orient_new(pos_oft):
"""Calculate orientation angles based on positional difference
"""
N0 = len(pos_oft)
pmt, sig = vsh_fit_for_pos(pos_oft)
return N0, pmt, sig
def orientation_estimate(pos_oft, opt, clean=False):
"""Estimate the orientation offsets
"""
if clean:
pos_oft = sample_clean(pos_oft, rho0=10)
pmti, sigi = vsh_fit_for_pos(pos_oft)
opti = np.hstack((pmti, sigi))
opt = np.vstack((opt, opti))
return opt
def orient_angle_sampling(pos_oft, iter_time=1000, sam_size=2000, with_replace=False):
"""Orientation angle sampling.
"""
# Array to store data in the form of
# [g1, g2, g3, r1, r2, r3, g, r,
# sig_r1, sig_r2, sig_r3, sig_g1, sig_g2, sig_g3, sig_g, sig_r]
opt = np.empty(dtype=np.float, shape=(16,)) # For all sources
opt1 = np.empty(dtype=np.float, shape=(16,)) # For a clean sample
for i in range(iter_time):
print(">>>>{:4d}th iteration:".format(i+1), end="")
pos_oft_sub = bootstrap_sample(pos_oft, sam_size, with_replace)
# Sample size is around 60% of the common sources, 3410 * 0.6 = 2046
# All sources
opt = orientation_estimate(pos_oft_sub, opt)
# Removing Outliers
opt1 = orientation_estimate(pos_oft_sub, opt1, True)
print(" done!")
return opt, opt1
def save_data(data, fname):
tab = Table(data, names=["G1", "G2", "G3", "R1", "R2", "R3",
"G", "R",
"G1_err", "G2_err", "G3_err",
"R1_err", "R2_err", "R3_err",
"G_err", "R_err"])
tab.write(fname, overwrite=True)
def vsh_fit_for_pm(apm_table):
"""Estimate VSH coefficients from apparent proper motion.
"""
output = rotgli_fit_4_table(apm_table, verbose=False)
# Keep rotation only
pmt = output["pmt1"]
sig = output["sig1"]
# Calculate the total rotation
w, w_err = output["R"], output["R_err"]
g, g_err = output["G"], output["G_err"]
# Concatenate the results
pmt = np.hstack((pmt[3:], [w], pmt[:3], [g]))
sig = np.hstack((sig[3:], [w_err], sig[:3], [g_err]))
return pmt, sig, output
def vsh_fit_for_pm2(apm_table):
"""Estimate VSH coefficients from apparent proper motion.
Only rotation vector is estimated.
"""
output = rotgli_fit_4_table(apm_table, fit_type="T", verbose=False)
# Keep rotation only
pmt = output["pmt1"]
sig = output["sig1"]
# Calculate the total rotation
w, w_err = output["R"], output["R_err"]
# Concatenate the results
pmt = np.hstack((pmt, [w]))
sig = np.hstack((sig, [w_err]))
return pmt, sig, output
def calc_mean_std(y):
"""Esimate robustly the mean and standard deviation
"""
filtered_data = sigma_clip(y, sigma=3, maxiters=1, stdfunc=mad_std)
ymean, ystd = np.mean(filtered_data), np.std(filtered_data)
return ymean, ystd
def random_walk(epoch, t_scale=5, sigma_var=2):
"""
"""
dt = epoch[1:] - epoch[:-1]
dt = np.concatenate(([0], dt))
# Positional offset
dra = np.zeros(len(epoch))
ddec = np.zeros(len(epoch))
dra[0] = np.random.random() - 0.5
ddec[0] = np.random.random() - 0.5
for i in range(len(epoch)):
# Exponential factor
exp_fac_i = np.exp(-dt[i]/t_scale)
# Gaussian factor
sigma_i = sigma_var * np.sqrt(1-np.exp(-2*dt[i]/t_scale))
g_ra_i = (np.random.random_sample()-0.5) * sigma_i
g_dec_i = (np.random.random_sample()-0.5) * sigma_i
dra[i] = exp_fac_i * dra[i-1] + g_ra_i
ddec[i] = exp_fac_i * ddec[i-1] + g_dec_i
return dra, ddec
# --------------------------------- END --------------------------------
| [
"numpy.mean",
"numpy.random.random_sample",
"astropy.table.Table",
"numpy.hstack",
"astropy.stats.sigma_clip",
"numpy.random.random",
"numpy.log",
"myprogs.vsh.vsh_fit.rotgli_fit_4_table",
"numpy.exp",
"numpy.empty",
"numpy.vstack",
"numpy.concatenate",
"numpy.std",
"numpy.arange"
] | [((1757, 1799), 'myprogs.vsh.vsh_fit.rotgli_fit_4_table', 'rotgli_fit_4_table', (['pos_oft'], {'verbose': '(False)'}), '(pos_oft, verbose=False)\n', (1775, 1799), False, 'from myprogs.vsh.vsh_fit import rotgli_fit_4_table\n'), ((2098, 2122), 'numpy.hstack', 'np.hstack', (['(pmt, [g, w])'], {}), '((pmt, [g, w]))\n', (2107, 2122), True, 'import numpy as np\n'), ((2133, 2165), 'numpy.hstack', 'np.hstack', (['(sig, [g_err, w_err])'], {}), '((sig, [g_err, w_err]))\n', (2142, 2165), True, 'import numpy as np\n'), ((3452, 3475), 'numpy.hstack', 'np.hstack', (['(pmti, sigi)'], {}), '((pmti, sigi))\n', (3461, 3475), True, 'import numpy as np\n'), ((3486, 3508), 'numpy.vstack', 'np.vstack', (['(opt, opti)'], {}), '((opt, opti))\n', (3495, 3508), True, 'import numpy as np\n'), ((3814, 3851), 'numpy.empty', 'np.empty', ([], {'dtype': 'np.float', 'shape': '(16,)'}), '(dtype=np.float, shape=(16,))\n', (3822, 3851), True, 'import numpy as np\n'), ((3882, 3919), 'numpy.empty', 'np.empty', ([], {'dtype': 'np.float', 'shape': '(16,)'}), '(dtype=np.float, shape=(16,))\n', (3890, 3919), True, 'import numpy as np\n'), ((4435, 4582), 'astropy.table.Table', 'Table', (['data'], {'names': "['G1', 'G2', 'G3', 'R1', 'R2', 'R3', 'G', 'R', 'G1_err', 'G2_err', 'G3_err',\n 'R1_err', 'R2_err', 'R3_err', 'G_err', 'R_err']"}), "(data, names=['G1', 'G2', 'G3', 'R1', 'R2', 'R3', 'G', 'R', 'G1_err',\n 'G2_err', 'G3_err', 'R1_err', 'R2_err', 'R3_err', 'G_err', 'R_err'])\n", (4440, 4582), False, 'from astropy.table import Table\n'), ((4849, 4893), 'myprogs.vsh.vsh_fit.rotgli_fit_4_table', 'rotgli_fit_4_table', (['apm_table'], {'verbose': '(False)'}), '(apm_table, verbose=False)\n', (4867, 4893), False, 'from myprogs.vsh.vsh_fit import rotgli_fit_4_table\n'), ((5135, 5174), 'numpy.hstack', 'np.hstack', (['(pmt[3:], [w], pmt[:3], [g])'], {}), '((pmt[3:], [w], pmt[:3], [g]))\n', (5144, 5174), True, 'import numpy as np\n'), ((5185, 5232), 'numpy.hstack', 'np.hstack', (['(sig[3:], [w_err], sig[:3], [g_err])'], {}), '((sig[3:], [w_err], sig[:3], [g_err]))\n', (5194, 5232), True, 'import numpy as np\n'), ((5420, 5478), 'myprogs.vsh.vsh_fit.rotgli_fit_4_table', 'rotgli_fit_4_table', (['apm_table'], {'fit_type': '"""T"""', 'verbose': '(False)'}), "(apm_table, fit_type='T', verbose=False)\n", (5438, 5478), False, 'from myprogs.vsh.vsh_fit import rotgli_fit_4_table\n'), ((5676, 5697), 'numpy.hstack', 'np.hstack', (['(pmt, [w])'], {}), '((pmt, [w]))\n', (5685, 5697), True, 'import numpy as np\n'), ((5708, 5733), 'numpy.hstack', 'np.hstack', (['(sig, [w_err])'], {}), '((sig, [w_err]))\n', (5717, 5733), True, 'import numpy as np\n'), ((5872, 5923), 'astropy.stats.sigma_clip', 'sigma_clip', (['y'], {'sigma': '(3)', 'maxiters': '(1)', 'stdfunc': 'mad_std'}), '(y, sigma=3, maxiters=1, stdfunc=mad_std)\n', (5882, 5923), False, 'from astropy.stats import sigma_clip, mad_std\n'), ((6120, 6145), 'numpy.concatenate', 'np.concatenate', (['([0], dt)'], {}), '(([0], dt))\n', (6134, 6145), True, 'import numpy as np\n'), ((785, 797), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (794, 797), True, 'import numpy as np\n'), ((5942, 5964), 'numpy.mean', 'np.mean', (['filtered_data'], {}), '(filtered_data)\n', (5949, 5964), True, 'import numpy as np\n'), ((5966, 5987), 'numpy.std', 'np.std', (['filtered_data'], {}), '(filtered_data)\n', (5972, 5987), True, 'import numpy as np\n'), ((6248, 6266), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6264, 6266), True, 'import numpy as np\n'), ((6287, 6305), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6303, 6305), True, 'import numpy as np\n'), ((6394, 6418), 'numpy.exp', 'np.exp', (['(-dt[i] / t_scale)'], {}), '(-dt[i] / t_scale)\n', (6400, 6418), True, 'import numpy as np\n'), ((1093, 1103), 'numpy.log', 'np.log', (['N0'], {}), '(N0)\n', (1099, 1103), True, 'import numpy as np\n'), ((6528, 6553), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6551, 6553), True, 'import numpy as np\n'), ((6588, 6613), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6611, 6613), True, 'import numpy as np\n'), ((6484, 6512), 'numpy.exp', 'np.exp', (['(-2 * dt[i] / t_scale)'], {}), '(-2 * dt[i] / t_scale)\n', (6490, 6512), True, 'import numpy as np\n')] |
# | Copyright 2017 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import ast, json, logging, linecache, get_file_list
from python_compat import imap, lchain, lmap
def main():
enums = []
enums_use_hash = {}
config_calls = []
for (fn, _) in get_file_list.get_file_list(show_external=False,
show_aux=False, show_script=False, show_testsuite=False, show_type_list=['py']):
if 'scriptlets' in fn:
continue
_process_calls_in_file(fn, enums, enums_use_hash, config_calls)
for result in config_calls:
result['args'] = lmap(_parse_option_spec, result['node'].args)
result['kwargs'] = {}
for keyword in result['node'].keywords:
if not keyword.arg:
continue
result['kwargs'][keyword.arg] = _parse_option_spec(keyword.value)
result['api'] = result['fqfn'].split('.')[-1]
result['scope'] = result['fqfn'].split('.')[-2]
result['pargs'] = result['kwargs'].pop('pargs', '<impossible>')
result['on_change'] = result['kwargs'].pop('on_change', '<impossible>')
result['on_valid'] = result['kwargs'].pop('on_valid', '<no validation>')
result['persistent'] = result['kwargs'].pop('persistent', False)
result.pop('node')
fp = open('docgen_config_calls.json', 'w')
json.dump(config_calls, fp, indent=2, sort_keys=True)
fp.close()
fp = open('docgen_enums.json', 'w')
assert len(enums) == len(dict(enums))
json.dump({'enums': dict(enums), 'use_hash': enums_use_hash}, fp, indent=2, sort_keys=True)
fp.close()
class ConfigVisitor(ast.NodeVisitor):
def __init__(self):
ast.NodeVisitor.__init__(self)
self._caller_stack = []
self._stack = []
self.calls = []
def generic_visit(self, node):
self._stack.append(node)
ast.NodeVisitor.generic_visit(self, node)
self._stack.pop()
def visit_Call(self, node): # pylint:disable=invalid-name
self.calls.append((list(self._caller_stack), node, list(self._stack)))
self.generic_visit(node)
def visit_ClassDef(self, node): # pylint:disable=invalid-name
self._caller_stack.append(node.name)
self.generic_visit(node)
self._caller_stack.pop()
def visit_FunctionDef(self, node): # pylint:disable=invalid-name
self._caller_stack.append(node.name)
self.generic_visit(node)
self._caller_stack.pop()
def _analyse_file(fn):
try:
tree = ast.parse(open(fn).read())
except Exception:
logging.warning(fn)
raise
visitor = ConfigVisitor()
visitor.visit(tree)
return (tree, visitor.calls)
def _get_func_name(node):
if isinstance(node, ast.Name):
result = node.id
elif isinstance(node, ast.Attribute):
result = _get_func_name(node.value) + '.' + node.attr
elif isinstance(node, ast.Call):
result = _get_func_name(node.func) + '(...)'
elif isinstance(node, ast.Subscript):
result = _get_func_name(node.value) + '[...]'
elif isinstance(node, (ast.BinOp, ast.BoolOp)):
result = '<operation>'
elif isinstance(node, ast.Str):
result = '<some string>'
elif isinstance(node, ast.Lambda):
result = '<lambda>'
else:
result = '???'
return result
def _join_config_locations(*opt_list):
opt_first = opt_list[0]
opt_list = opt_list[1:]
if isinstance(opt_first, (list, tuple)): # first option is a list - expand the first parameter
if not opt_list: # only first option -> clean and return
return lmap(str.strip, opt_first)
return lchain(imap(lambda opt: _join_config_locations(opt.strip(), *opt_list), opt_first))
if not opt_list: # only first option -> clean and return
return [opt_first.strip()]
def _do_join(opt):
return (opt_first + ' ' + opt).strip()
return lmap(_do_join, _join_config_locations(*opt_list))
def _parse_option_call(value):
args_list = []
for parg in imap(_parse_option_spec, value.args):
if isinstance(parg, (list, tuple)):
args_list.append(lmap(lambda x: x.strip().strip('"').strip("'"), parg))
else:
args_list.append(parg.strip().strip('"').strip("'"))
if isinstance(value.func, ast.Name):
if value.func.id == '_get_handler_option':
return _join_config_locations('<name:logger_name>', ['', '<name:handler_name>'], *args_list)
elif value.func.id == 'join_config_locations':
return _join_config_locations(*args_list)
elif isinstance(value.func, ast.Attribute):
if value.func.attr == '_get_pproc_opt':
return _join_config_locations(['', '<name:datasource_name>'], 'partition', *args_list)
if value.func.attr == '_get_part_opt':
return _join_config_locations(['', '<name:datasource_name>'], *args_list)
elif value.func.attr == '_get_dproc_opt':
return _join_config_locations('<name:datasource_name>', *args_list)
arg_str = str.join(', ', imap(str, imap(_parse_option_spec, value.args)))
return '<call:%s(%s)>' % (_get_func_name(value.func), arg_str)
def _parse_option_name(value):
if value.id == 'True':
return True
elif value.id == 'False':
return False
elif value.id == 'None':
return None
return '<name:%s>' % value.id
def _parse_option_op(value):
value_left = _parse_option_spec(value.left)
value_right = _parse_option_spec(value.right)
if isinstance(value.op, ast.Add):
return '%s %s' % (
value_left.strip().rstrip("'").strip(),
value_right.strip().lstrip("'").strip())
elif isinstance(value.op, ast.Mod):
value_left = value_left.replace('%d', '%s')
try:
return value_left % value_right
except Exception:
return value_left + '%' + value_right
elif isinstance(value.op, ast.Mult):
return eval('%s * %s' % (value_left, value_right)) # pylint:disable=eval-used
def _parse_option_spec(value):
if isinstance(value, ast.Str):
result = repr(value.s)
elif isinstance(value, ast.Num):
result = value.n
elif isinstance(value, ast.Name):
result = _parse_option_name(value)
elif isinstance(value, ast.Attribute):
result = '<attr:%s>' % value.attr.strip('_')
elif isinstance(value, (ast.List, ast.Tuple)):
return lmap(_parse_option_spec, value.elts)
elif isinstance(value, ast.Dict):
key_value_iter = zip(imap(_parse_option_spec, value.keys), imap(_parse_option_spec, value.values))
result = '{%s}' % str.join(', ', imap(lambda k_v: '%s: %s' % k_v, key_value_iter))
elif isinstance(value, ast.Call):
result = _parse_option_call(value)
elif isinstance(value, ast.BinOp):
result = _parse_option_op(value) or '<manual>'
elif isinstance(value, ast.UnaryOp) and isinstance(value.op, ast.USub):
result = -_parse_option_spec(value.operand)
elif hasattr(ast, 'NameConstant') and isinstance(value, getattr(ast, 'NameConstant')):
result = value.value
else:
result = '<manual>'
return result
def _process_calls_in_file(fn, enums, enums_use_hash, config_calls):
(_, call_infos) = _analyse_file(fn)
for (caller_stack, node, parents) in call_infos:
result = _transform_call(fn, caller_stack, node)
is_pconfig_get = ('self.get' in result['fqfn']) and (result['fn'].endswith('pconfig.py'))
if 'make_enum' in result['fqfn']:
if 'make_enum.enum_list' in result['fqfn']:
continue
_process_enum(node, parents, result, enums, enums_use_hash)
continue
elif '_query_config' in result['fqfn']:
result['fqfn'] = _get_func_name(node.args[0])
node.args = node.args[1:]
config_calls.append(result)
elif 'config.is_interactive' in result['fqfn']:
config_calls.append(result)
elif is_pconfig_get or ('config.get' in result['fqfn']):
if is_pconfig_get:
result['fqfn'] = result['fqfn'].replace('self.get', 'pconfig.get')
assert result['node'].func.attr.startswith('get') # prevent sequential calls with get
use = True
for key in ['get_config', 'get_work_path', 'get_state', 'get_option_list']: # internal API
if key in result['fqfn']:
use = False
if use:
config_calls.append(result)
def _process_enum(node, parents, result, enums, enums_use_hash):
if len(node.args) == 1:
enum_name = parents[-1].targets[0].id
elif len(node.args) == 2:
enum_name = node.args[1].id
def _iter_kw():
for entry in result['node'].keywords:
try:
value = entry.value.id
except Exception: # API change
value = str(entry.value.value)
yield (entry.arg, value)
kw_list = list(_iter_kw())
enums_use_hash[enum_name] = ('use_hash', 'False') not in kw_list
try:
enums.append((enum_name, lmap(lambda x: x.s, node.args[0].elts)))
except Exception:
enums.append((enum_name, '<manual>'))
def _transform_call(fn, caller_stack, node):
result = {'fn': fn, 'lineno': node.lineno, 'line': linecache.getline(fn, node.lineno),
'fqfn': _get_func_name(node.func), 'node': node, 'callers': caller_stack}
assert '???' not in result['fqfn']
return result
if __name__ == '__main__':
main()
| [
"get_file_list.get_file_list",
"ast.NodeVisitor.__init__",
"logging.warning",
"ast.NodeVisitor.generic_visit",
"python_compat.imap",
"python_compat.lmap",
"linecache.getline",
"json.dump"
] | [((803, 935), 'get_file_list.get_file_list', 'get_file_list.get_file_list', ([], {'show_external': '(False)', 'show_aux': '(False)', 'show_script': '(False)', 'show_testsuite': '(False)', 'show_type_list': "['py']"}), "(show_external=False, show_aux=False,\n show_script=False, show_testsuite=False, show_type_list=['py'])\n", (830, 935), False, 'import ast, json, logging, linecache, get_file_list\n'), ((1752, 1805), 'json.dump', 'json.dump', (['config_calls', 'fp'], {'indent': '(2)', 'sort_keys': '(True)'}), '(config_calls, fp, indent=2, sort_keys=True)\n', (1761, 1805), False, 'import ast, json, logging, linecache, get_file_list\n'), ((4176, 4212), 'python_compat.imap', 'imap', (['_parse_option_spec', 'value.args'], {}), '(_parse_option_spec, value.args)\n', (4180, 4212), False, 'from python_compat import imap, lchain, lmap\n'), ((1088, 1133), 'python_compat.lmap', 'lmap', (['_parse_option_spec', "result['node'].args"], {}), "(_parse_option_spec, result['node'].args)\n", (1092, 1133), False, 'from python_compat import imap, lchain, lmap\n'), ((2063, 2093), 'ast.NodeVisitor.__init__', 'ast.NodeVisitor.__init__', (['self'], {}), '(self)\n', (2087, 2093), False, 'import ast, json, logging, linecache, get_file_list\n'), ((2219, 2260), 'ast.NodeVisitor.generic_visit', 'ast.NodeVisitor.generic_visit', (['self', 'node'], {}), '(self, node)\n', (2248, 2260), False, 'import ast, json, logging, linecache, get_file_list\n'), ((8873, 8907), 'linecache.getline', 'linecache.getline', (['fn', 'node.lineno'], {}), '(fn, node.lineno)\n', (8890, 8907), False, 'import ast, json, logging, linecache, get_file_list\n'), ((2849, 2868), 'logging.warning', 'logging.warning', (['fn'], {}), '(fn)\n', (2864, 2868), False, 'import ast, json, logging, linecache, get_file_list\n'), ((3786, 3812), 'python_compat.lmap', 'lmap', (['str.strip', 'opt_first'], {}), '(str.strip, opt_first)\n', (3790, 3812), False, 'from python_compat import imap, lchain, lmap\n'), ((5111, 5147), 'python_compat.imap', 'imap', (['_parse_option_spec', 'value.args'], {}), '(_parse_option_spec, value.args)\n', (5115, 5147), False, 'from python_compat import imap, lchain, lmap\n'), ((8674, 8712), 'python_compat.lmap', 'lmap', (['(lambda x: x.s)', 'node.args[0].elts'], {}), '(lambda x: x.s, node.args[0].elts)\n', (8678, 8712), False, 'from python_compat import imap, lchain, lmap\n'), ((6328, 6364), 'python_compat.lmap', 'lmap', (['_parse_option_spec', 'value.elts'], {}), '(_parse_option_spec, value.elts)\n', (6332, 6364), False, 'from python_compat import imap, lchain, lmap\n'), ((6423, 6459), 'python_compat.imap', 'imap', (['_parse_option_spec', 'value.keys'], {}), '(_parse_option_spec, value.keys)\n', (6427, 6459), False, 'from python_compat import imap, lchain, lmap\n'), ((6461, 6499), 'python_compat.imap', 'imap', (['_parse_option_spec', 'value.values'], {}), '(_parse_option_spec, value.values)\n', (6465, 6499), False, 'from python_compat import imap, lchain, lmap\n'), ((6536, 6584), 'python_compat.imap', 'imap', (["(lambda k_v: '%s: %s' % k_v)", 'key_value_iter'], {}), "(lambda k_v: '%s: %s' % k_v, key_value_iter)\n", (6540, 6584), False, 'from python_compat import imap, lchain, lmap\n')] |
import numpy as np
from opytimizer.optimizers.swarm import sso
from opytimizer.spaces import search
def test_sso_params():
params = {
'C_w': 0.1,
'C_p': 0.4,
'C_g': 0.9
}
new_sso = sso.SSO(params=params)
assert new_sso.C_w == 0.1
assert new_sso.C_p == 0.4
assert new_sso.C_g == 0.9
def test_sso_params_setter():
new_sso = sso.SSO()
try:
new_sso.C_w = 'a'
except:
new_sso.C_w = 0.1
try:
new_sso.C_w = -1
except:
new_sso.C_w = 0.1
assert new_sso.C_w == 0.1
try:
new_sso.C_p = 'b'
except:
new_sso.C_p = 0.4
try:
new_sso.C_p = 0.05
except:
new_sso.C_p = 0.4
assert new_sso.C_p == 0.4
try:
new_sso.C_g = 'c'
except:
new_sso.C_g = 0.9
try:
new_sso.C_g = 0.35
except:
new_sso.C_g = 0.9
assert new_sso.C_g == 0.9
def test_sso_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
try:
new_sso.local_position = 1
except:
new_sso.local_position = np.array([1])
assert new_sso.local_position == np.array([1])
def test_sso_evaluate():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.evaluate(search_space, square)
def test_sso_update():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.update(search_space)
| [
"numpy.sum",
"opytimizer.spaces.search.SearchSpace",
"numpy.array",
"opytimizer.optimizers.swarm.sso.SSO"
] | [((221, 243), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {'params': 'params'}), '(params=params)\n', (228, 243), False, 'from opytimizer.optimizers.swarm import sso\n'), ((383, 392), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (390, 392), False, 'from opytimizer.optimizers.swarm import sso\n'), ((976, 1068), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(10)', 'n_variables': '(2)', 'lower_bound': '[0, 0]', 'upper_bound': '[10, 10]'}), '(n_agents=10, n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n', (994, 1068), False, 'from opytimizer.spaces import search\n'), ((1118, 1127), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (1125, 1127), False, 'from opytimizer.optimizers.swarm import sso\n'), ((1412, 1504), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(10)', 'n_variables': '(2)', 'lower_bound': '[0, 0]', 'upper_bound': '[10, 10]'}), '(n_agents=10, n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n', (1430, 1504), False, 'from opytimizer.spaces import search\n'), ((1554, 1563), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (1561, 1563), False, 'from opytimizer.optimizers.swarm import sso\n'), ((1686, 1778), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(10)', 'n_variables': '(2)', 'lower_bound': '[0, 0]', 'upper_bound': '[10, 10]'}), '(n_agents=10, n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n', (1704, 1778), False, 'from opytimizer.spaces import search\n'), ((1828, 1837), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (1835, 1837), False, 'from opytimizer.optimizers.swarm import sso\n'), ((1304, 1317), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1312, 1317), True, 'import numpy as np\n'), ((1379, 1393), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (1385, 1393), True, 'import numpy as np\n'), ((1252, 1265), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1260, 1265), True, 'import numpy as np\n')] |
from barbie.barbie import *
import barbie.susy_interface as susy
import subprocess
import os.path
import sys
from shutil import rmtree
from threading import Timer
constant_DirectoryDuration = 36
# Pode ser removido depois dos testes
import time
def barbiefy(dir, codes, disc, turma, lab):
# Try to compile the source code
try:
exec_file, gcc_f, sucess = compile_c(codes, temp=True, dir=dir)
assert sucess, "Falha na compilação"
# If there was a compilation problem
except AssertionError as e:
eprint("Falha na compilação!\n")
# Show the compilation output and end the program
with open(gcc_f, 'r') as gcc:
eprint(gcc.read())
return None
"""
# Code for testing the submit page while susy has no open class
time.sleep(3)
# Temporary while susy is offile
results = list()
for i in range(1, 11):
results.append(BarbieTest(0, None, None, i, None, 'None', None))
for i in range(11, 21):
results.append(BarbieTest(i*3, 'Susy: Vamos dizer que isso fudeu', 'Barbie: Vamos dizer que isso fudeu', i, 'None', 'None', 'None'))
return results
"""
tests_dir_name = os.path.join(dir, 'testes/')
in_files = None
res_files = None
# Connect to susy system and discover the test page url
url = susy.discover_susy_url(disc, turma, lab)
# List all susy files of open tests
in_files, res_files = susy.get_susy_files(url)
# Download all the open tests
susy.download_tests(url, in_files, res_files, tests_dir_name)
results = list()
# If we sucessufuly got all needed files,
# we may run all tests and compare our output with the expected
if in_files and res_files:
results = run_and_compare(exec_file, in_files, res_files, tests_dir_name)
return results
def cleanUp(folder):
rmtree(folder, ignore_errors=True)
def timedCleanUp(folder):
tmr = Timer(constant_DirectoryDuration, cleanUp, args=[folder])
tmr.start()
| [
"barbie.susy_interface.discover_susy_url",
"barbie.susy_interface.get_susy_files",
"threading.Timer",
"barbie.susy_interface.download_tests",
"shutil.rmtree"
] | [((1223, 1263), 'barbie.susy_interface.discover_susy_url', 'susy.discover_susy_url', (['disc', 'turma', 'lab'], {}), '(disc, turma, lab)\n', (1245, 1263), True, 'import barbie.susy_interface as susy\n'), ((1324, 1348), 'barbie.susy_interface.get_susy_files', 'susy.get_susy_files', (['url'], {}), '(url)\n', (1343, 1348), True, 'import barbie.susy_interface as susy\n'), ((1382, 1443), 'barbie.susy_interface.download_tests', 'susy.download_tests', (['url', 'in_files', 'res_files', 'tests_dir_name'], {}), '(url, in_files, res_files, tests_dir_name)\n', (1401, 1443), True, 'import barbie.susy_interface as susy\n'), ((1715, 1749), 'shutil.rmtree', 'rmtree', (['folder'], {'ignore_errors': '(True)'}), '(folder, ignore_errors=True)\n', (1721, 1749), False, 'from shutil import rmtree\n'), ((1784, 1841), 'threading.Timer', 'Timer', (['constant_DirectoryDuration', 'cleanUp'], {'args': '[folder]'}), '(constant_DirectoryDuration, cleanUp, args=[folder])\n', (1789, 1841), False, 'from threading import Timer\n')] |
import numpy as np
from agents import Agent
from connectboard import ConnectBoard
import time
import math
class AlphaBeta(Agent):
"""Agent that implements minimax with alpha-beta pruning to select its next move."""
def get_move(self, game_board: np.ndarray) -> np.ndarray:
"""Recursively runs minimax to determine the best move to make.
Recursively runs minimax algorithm with alpha-beta pruning starting at the current game state.
This player is assumed to be maximizing.
Args:
game_board (np.ndarray): current board with a 1 for current player, -1 for
opponent, and 0 for open space
Returns:
An ndarray representing the move, with a 1 in the row,col of the new
piece, and all other entries zero.
"""
start = time.time()
move_val, move = self.alpha_beta(game_board, depth=5)
end = time.time()
print(
"Found optimal move with value: {}, in {}s".format(move_val, (end - start))
)
return move
def alpha_beta(
self,
game_board: np.ndarray,
alpha: float = -np.inf,
beta: float = np.inf,
depth: int = np.inf,
max_player: bool = True,
) -> (int, np.ndarray):
"""Perform minimax with alpha-beta pruning to determine best move to take from current game_board.
Performs minimax starting at the current position and ending after looking depth moves ahead, or when all leaf
nodes are end_game states.
TODO: If multiple winning moves, it picks the first one. Change so agent chooses the quickest win.
Args:
game_board (np.ndarray): 2D array representing the current pieces as 1 or -1 if they
are for the maximizing or minimizing player respectively.
alpha (float, optional): The best score achieved by the maximizing player. Defaults to -np.inf,
the worst possible value for the maximizing player.
beta (float, optional): The best score achieved by the minimizing player. Defaults to np.inf.
depth (int, optional): The number of layers to check using minimax. Defualt is np.inf which will
check all layers.
max_player (bool, optional): Indicates whether the turn at the root node belongs to the minimizing or
maximizing player. Default is True, meaning the maximizing player is next to move.
Returns:
move_val (int): The optimal value of this node.
move (np.ndarray): A 6x7 numpy array with a 1 in the spot of the move to take from the current
node that will result in the optimal value.
"""
legal_moves = ConnectBoard.get_legal_moves(game_board)
if legal_moves.size == 0 or depth == 0:
# Leaf node, perform static value checking.
return self.get_static_value(game_board), None
next_states = (
game_board + legal_moves if max_player else game_board - legal_moves
)
best_move = legal_moves[0]
while next_states.size > 0:
best_idx = self.get_most_valuable(next_states, max_player)
state = next_states[best_idx]
next_states = np.delete(next_states, best_idx, 0)
# Only recurse farther if the current state is not an end game state
if math.isinf(self.get_static_value(state)):
val = self.get_static_value(state)
else:
val, _ = self.alpha_beta(
state,
alpha=alpha,
beta=beta,
depth=depth - 1,
max_player=not max_player,
)
if max_player and val > alpha:
alpha = val
best_move = legal_moves[best_idx]
elif not max_player and val < beta:
best_move = legal_moves[best_idx]
beta = val
legal_moves = np.delete(legal_moves, best_idx, 0)
if beta < alpha:
break
if max_player:
return alpha, best_move
else:
return beta, best_move
def get_most_valuable(self, states: np.ndarray, max_player: bool) -> int:
"""Return the index of next_states corresponding to the best static value for current player.
Args:
states (np.ndarray): Numpy array of 6x7 board states. Maximizing player is 1,minimizing
player is -1.
max_player (bool): If max_player is true, return the index with maximum static value,
if false, return the index that minimizes static value.
"""
idx = 0
best_val = self.get_static_value(states[0])
for i in range(1, states.shape[0]):
val = self.get_static_value(states[i])
if max_player and val > best_val:
idx = i
best_val = val
elif val < best_val:
idx = i
best_val = val
return idx
def get_static_value(self, game_board: np.ndarray) -> float:
"""Returns the static value of game_board.
For each possible way to get four in a row, check if the line contains only 1 or -1.
If that row contains pieces from only one player, add the sum of their pieces to value.
If either player has 4 in a row, return +/- inf.
Args:
game_board (np.ndarray): The current minimax board with maximing player as 1
and minimizing player as -1.
Returns:
value (float): The static value of the current position.
"""
windows = game_board.flatten()[ConnectBoard.WINDOW_INDICES].reshape(-1, 4)
uncontested_windows = windows[windows.min(axis=1) != -windows.max(axis=1)]
if uncontested_windows.size == 0:
return 0
window_sums = uncontested_windows.sum(axis=1)
if window_sums.max() == 4:
return np.inf
elif window_sums.min() == -4:
return -np.inf
else:
return (abs(window_sums) * window_sums ** 2 / window_sums).sum()
def handle_invalid_move(self) -> None:
# Throw exception during development
# TODO: Add some nice handler later on
raise Exception
| [
"numpy.delete",
"time.time",
"connectboard.ConnectBoard.get_legal_moves"
] | [((833, 844), 'time.time', 'time.time', ([], {}), '()\n', (842, 844), False, 'import time\n'), ((921, 932), 'time.time', 'time.time', ([], {}), '()\n', (930, 932), False, 'import time\n'), ((2759, 2799), 'connectboard.ConnectBoard.get_legal_moves', 'ConnectBoard.get_legal_moves', (['game_board'], {}), '(game_board)\n', (2787, 2799), False, 'from connectboard import ConnectBoard\n'), ((3291, 3326), 'numpy.delete', 'np.delete', (['next_states', 'best_idx', '(0)'], {}), '(next_states, best_idx, 0)\n', (3300, 3326), True, 'import numpy as np\n'), ((4044, 4079), 'numpy.delete', 'np.delete', (['legal_moves', 'best_idx', '(0)'], {}), '(legal_moves, best_idx, 0)\n', (4053, 4079), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import netomaton as ntm
import numpy as np
if __name__ == '__main__':
# NKS page 443 - Rule 122R
network = ntm.topology.cellular_automaton(n=100)
# carefully chosen initial conditions
previous_state = [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1,
0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,
1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 1, 1]
initial_conditions = [1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,
1, 1, 1, 0, 1, 1, 1]
trajectory = ntm.evolve(initial_conditions=initial_conditions, network=network,
activity_rule=ntm.ReversibleRule(ntm.rules.nks_ca_rule(122)),
past_conditions=[previous_state], timesteps=1002)
timestep = []
average_node_entropies = []
activities = ntm.get_activities_over_time_as_list(trajectory)
for i, c in enumerate(activities):
timestep.append(i)
bit_string = ''.join([str(x) for x in c])
average_node_entropies.append(ntm.average_node_entropy(activities[:i+1]))
print("%s, %s" % (i, average_node_entropies[-1]))
plt.subplot(3, 1, (1, 2))
plt.title("Avg. Node (Shannon) Entropy")
plt.gca().set_xlim(0, 1002)
plt.gca().axes.xaxis.set_ticks([])
plt.plot(timestep, average_node_entropies)
plt.subplot(3, 1, 3)
plt.gca().axes.yaxis.set_ticks([])
ntm.plot_grid(np.array(activities).T.tolist())
| [
"netomaton.rules.nks_ca_rule",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"numpy.array",
"netomaton.average_node_entropy",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"netomaton.get_activities_over_time_as_list",
"netomaton.topology.cellular_automaton"
] | [((150, 188), 'netomaton.topology.cellular_automaton', 'ntm.topology.cellular_automaton', ([], {'n': '(100)'}), '(n=100)\n', (181, 188), True, 'import netomaton as ntm\n'), ((1346, 1394), 'netomaton.get_activities_over_time_as_list', 'ntm.get_activities_over_time_as_list', (['trajectory'], {}), '(trajectory)\n', (1382, 1394), True, 'import netomaton as ntm\n'), ((1656, 1681), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1, 2)'], {}), '(3, 1, (1, 2))\n', (1667, 1681), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1726), 'matplotlib.pyplot.title', 'plt.title', (['"""Avg. Node (Shannon) Entropy"""'], {}), "('Avg. Node (Shannon) Entropy')\n", (1695, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1844), 'matplotlib.pyplot.plot', 'plt.plot', (['timestep', 'average_node_entropies'], {}), '(timestep, average_node_entropies)\n', (1810, 1844), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1870), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1861, 1870), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1593), 'netomaton.average_node_entropy', 'ntm.average_node_entropy', (['activities[:i + 1]'], {}), '(activities[:i + 1])\n', (1573, 1593), True, 'import netomaton as ntm\n'), ((1731, 1740), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1738, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1196), 'netomaton.rules.nks_ca_rule', 'ntm.rules.nks_ca_rule', (['(122)'], {}), '(122)\n', (1191, 1196), True, 'import netomaton as ntm\n'), ((1763, 1772), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1770, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1884), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1882, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1948), 'numpy.array', 'np.array', (['activities'], {}), '(activities)\n', (1936, 1948), True, 'import numpy as np\n')] |
import argparse
import csv
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from modules.metric import mean_reciprocal_rank
def main(csv_path):
acc = 0
num = 0
with open(csv_path, "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count > 0:
hum_id = row[0].split(".")[0]
preds = []
for col in row[1:]:
preds.append(str(col))
print(hum_id, mean_reciprocal_rank(preds, str(hum_id)))
acc += mean_reciprocal_rank(preds, str(hum_id))
num += 1
line_count += 1
print(f'Processed {line_count} lines.')
return acc / num
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--csv_path", type=str, required=True, help="path to predict csv")
args = parser.parse_args()
mrr = main(args.csv_path)
print("-----------------------------")
print(f"MRR: {mrr}") | [
"os.path.dirname",
"csv.reader",
"argparse.ArgumentParser"
] | [((838, 863), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (861, 863), False, 'import argparse\n'), ((81, 106), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (96, 106), False, 'import os\n'), ((265, 300), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (275, 300), False, 'import csv\n')] |
from django.urls import path, include
from rest_framework import routers
from . import views
app_name = "invoice_creator"
router = routers.DefaultRouter()
router.register(r"invoice", views.InvoiceViewSet)
urlpatterns = [
path("", include(router.urls), name="api"),
]
| [
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] | [((133, 156), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (154, 156), False, 'from rest_framework import routers\n'), ((237, 257), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (244, 257), False, 'from django.urls import path, include\n')] |
# Generated by Django 4.0.2 on 2022-03-17 10:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import petstagram.main_app.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('profile_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Pet',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('animal_type', models.CharField(choices=[('Cat', 'Cat'), ('Dog', 'Dog'), ('Bunny', 'Bunny'), ('Fish', 'Fish'), ('Parrot', 'Parrot'), ('Other', 'Other')], max_length=6)),
('birth_date', models.DateField(blank=True, null=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profile_app.profile')),
],
options={
'unique_together': {('name', 'user_profile')},
},
),
migrations.CreateModel(
name='PetPhoto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='images', validators=[petstagram.main_app.validators.validate_image_size_5])),
('description', models.TextField(blank=True, null=True)),
('published', models.DateField(auto_now_add=True)),
('pets', models.ManyToManyField(to='main_app.Pet')),
],
),
migrations.CreateModel(
name='LikesModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('like', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.petphoto')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.BigAutoField",
"django.db.models.ImageField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((285, 342), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (316, 342), False, 'from django.db import migrations, models\n'), ((511, 607), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (530, 607), False, 'from django.db import migrations, models\n'), ((631, 662), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (647, 662), False, 'from django.db import migrations, models\n'), ((697, 858), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Cat', 'Cat'), ('Dog', 'Dog'), ('Bunny', 'Bunny'), ('Fish', 'Fish'), (\n 'Parrot', 'Parrot'), ('Other', 'Other')]", 'max_length': '(6)'}), "(choices=[('Cat', 'Cat'), ('Dog', 'Dog'), ('Bunny', 'Bunny'\n ), ('Fish', 'Fish'), ('Parrot', 'Parrot'), ('Other', 'Other')],\n max_length=6)\n", (713, 858), False, 'from django.db import migrations, models\n'), ((883, 922), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (899, 922), False, 'from django.db import migrations, models\n'), ((958, 1051), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""profile_app.profile"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'profile_app.profile')\n", (975, 1051), False, 'from django.db import migrations, models\n'), ((1280, 1376), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1299, 1376), False, 'from django.db import migrations, models\n'), ((1401, 1510), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images"""', 'validators': '[petstagram.main_app.validators.validate_image_size_5]'}), "(upload_to='images', validators=[petstagram.main_app.\n validators.validate_image_size_5])\n", (1418, 1510), False, 'from django.db import migrations, models\n'), ((1540, 1579), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1556, 1579), False, 'from django.db import migrations, models\n'), ((1612, 1647), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1628, 1647), False, 'from django.db import migrations, models\n'), ((1675, 1716), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""main_app.Pet"""'}), "(to='main_app.Pet')\n", (1697, 1716), False, 'from django.db import migrations, models\n'), ((1852, 1948), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1871, 1948), False, 'from django.db import migrations, models\n'), ((1972, 2063), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main_app.petphoto"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'main_app.petphoto')\n", (1989, 2063), False, 'from django.db import migrations, models\n'), ((2086, 2182), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (2103, 2182), False, 'from django.db import migrations, models\n')] |
import os
import logging
import requests
import telegram
from telegram.error import NetworkError, Unauthorized
from time import sleep
from imageGenerator import generate_random_emoji_cover
update_id = None
def main():
"""Run the bot."""
global update_id
# Telegram Bot Authorization Token
bot = telegram.Bot(os.environ['TELEGRAM_TOKEN'])
# get the first pending update_id, this is so we can skip over it in case
# we get an "Unauthorized" exception.
try:
update_id = bot.get_updates()[0].update_id
except IndexError:
update_id = None
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
while True:
try:
send_emojicover(bot)
except NetworkError:
sleep(1)
except Unauthorized:
# The user has removed or blocked the bot.
update_id += 1
def send_emojicover(bot):
global update_id
# Request updates after the last update_id
for update in bot.get_updates(offset=update_id, timeout=10):
update_id = update.update_id + 1
if update.message: # your bot can receive updates without messages
chat_id = update.message["chat"]["id"]
img_path = generate_random_emoji_cover()
bot.send_photo(chat_id=chat_id, photo=open(img_path, 'rb'))
if __name__ == '__main__':
main() | [
"logging.basicConfig",
"imageGenerator.generate_random_emoji_cover",
"telegram.Bot",
"time.sleep"
] | [((314, 356), 'telegram.Bot', 'telegram.Bot', (["os.environ['TELEGRAM_TOKEN']"], {}), "(os.environ['TELEGRAM_TOKEN'])\n", (326, 356), False, 'import telegram\n'), ((591, 678), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (610, 678), False, 'import logging\n'), ((1250, 1279), 'imageGenerator.generate_random_emoji_cover', 'generate_random_emoji_cover', ([], {}), '()\n', (1277, 1279), False, 'from imageGenerator import generate_random_emoji_cover\n'), ((778, 786), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (783, 786), False, 'from time import sleep\n')] |
import pytest
from notifications_utils.validate_html import check_if_string_contains_valid_html
@pytest.mark.parametrize(
"good_content",
(
"<div>abc</div>",
'<div style="display: none;">abc</div>',
"""<div style="margin: 20px auto 30px auto;">
<img
src="http://google.com"
alt="alt text"
height="10"
width="10"
/>
</div>""",
"abc<div>abc</div>xyz",
),
)
def test_good_content_is_valid(good_content: str):
assert check_if_string_contains_valid_html(good_content) == []
@pytest.mark.parametrize(
"bad_content", ("<div>abc<div>", '<img src="http://google.com">', "abc<div>abc<div>xyz", '<div style=">abc</div>')
)
def test_bad_content_is_invalid(bad_content: str):
assert check_if_string_contains_valid_html(bad_content) != []
| [
"pytest.mark.parametrize",
"notifications_utils.validate_html.check_if_string_contains_valid_html"
] | [((100, 384), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""good_content"""', '(\'<div>abc</div>\', \'<div style="display: none;">abc</div>\',\n """<div style="margin: 20px auto 30px auto;">\n <img\n src="http://google.com"\n alt="alt text"\n height="10"\n width="10"\n />\n</div>"""\n , \'abc<div>abc</div>xyz\')'], {}), '(\'good_content\', (\'<div>abc</div>\',\n \'<div style="display: none;">abc</div>\',\n """<div style="margin: 20px auto 30px auto;">\n <img\n src="http://google.com"\n alt="alt text"\n height="10"\n width="10"\n />\n</div>"""\n , \'abc<div>abc</div>xyz\'))\n', (123, 384), False, 'import pytest\n'), ((543, 690), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bad_content"""', '(\'<div>abc<div>\', \'<img src="http://google.com">\', \'abc<div>abc<div>xyz\',\n \'<div style=">abc</div>\')'], {}), '(\'bad_content\', (\'<div>abc<div>\',\n \'<img src="http://google.com">\', \'abc<div>abc<div>xyz\',\n \'<div style=">abc</div>\'))\n', (566, 690), False, 'import pytest\n'), ((484, 533), 'notifications_utils.validate_html.check_if_string_contains_valid_html', 'check_if_string_contains_valid_html', (['good_content'], {}), '(good_content)\n', (519, 533), False, 'from notifications_utils.validate_html import check_if_string_contains_valid_html\n'), ((751, 799), 'notifications_utils.validate_html.check_if_string_contains_valid_html', 'check_if_string_contains_valid_html', (['bad_content'], {}), '(bad_content)\n', (786, 799), False, 'from notifications_utils.validate_html import check_if_string_contains_valid_html\n')] |
"""
This test is applicable while InnoSchedule bot instance is running
In order to run the test, you have to:
1.1 Create your bot in telegram:
Write @BotFather the command "/newbot", follow instructions
1.2 In admin/permanent.py put your token
2.1 Run the bot: Python Innoschedule.py
3.1 Contact you bot, find out your chat id mentioning another bot, @chatid_echo_bot
3.2 Assign your chat id to the chat_id variable in this module
"""
from parameterized import parameterized, parameterized_class
import telebot
import unittest
from modules.core.source import bot, main_markup
from modules.electives_schedule import controller as elective_controller
from modules.electives_schedule import permanent as elective_permanent
# YOU NEED TO KNOW YOUR CHAT_ID
chat_id = 0
@parameterized_class([
{'test_message': '/add_course',
'elective': 'Advanced Python Programming',
}
])
class TestReceiveNotification(unittest.TestCase):
def test_add_course(self):
"""
Register module's commands
"""
message = self.test_message
if message == '/add_course':
lessons = elective_controller.get_electives_course()
options = telebot.types.ReplyKeyboardMarkup(True, False)
for lesson in lessons:
line_list = telebot.types.KeyboardButton(lesson.subject)
options.row(line_list)
reply = str("What course you want to add?")
ret_msg = bot.send_message(chat_id, reply, reply_markup=options)
self.assertEqual(ret_msg.text, reply)
def test_process_electives(self):
message = self.elective
check = elective_controller.check_electives_course(message)
self.assertTrue(check)
elective_controller.set_electives_user('test_user_id', message)
ret_msg = bot.send_message(chat_id, elective_permanent.MESSAGE_SUCCESS, reply_markup=main_markup)
self.assertEqual(ret_msg.text, 'The course has been successfully added!')
| [
"modules.core.source.bot.send_message",
"telebot.types.KeyboardButton",
"modules.electives_schedule.controller.get_electives_course",
"modules.electives_schedule.controller.check_electives_course",
"parameterized.parameterized_class",
"modules.electives_schedule.controller.set_electives_user",
"telebot.... | [((780, 881), 'parameterized.parameterized_class', 'parameterized_class', (["[{'test_message': '/add_course', 'elective': 'Advanced Python Programming'}]"], {}), "([{'test_message': '/add_course', 'elective':\n 'Advanced Python Programming'}])\n", (799, 881), False, 'from parameterized import parameterized, parameterized_class\n'), ((1660, 1711), 'modules.electives_schedule.controller.check_electives_course', 'elective_controller.check_electives_course', (['message'], {}), '(message)\n', (1702, 1711), True, 'from modules.electives_schedule import controller as elective_controller\n'), ((1751, 1814), 'modules.electives_schedule.controller.set_electives_user', 'elective_controller.set_electives_user', (['"""test_user_id"""', 'message'], {}), "('test_user_id', message)\n", (1789, 1814), True, 'from modules.electives_schedule import controller as elective_controller\n'), ((1833, 1925), 'modules.core.source.bot.send_message', 'bot.send_message', (['chat_id', 'elective_permanent.MESSAGE_SUCCESS'], {'reply_markup': 'main_markup'}), '(chat_id, elective_permanent.MESSAGE_SUCCESS, reply_markup=\n main_markup)\n', (1849, 1925), False, 'from modules.core.source import bot, main_markup\n'), ((1129, 1171), 'modules.electives_schedule.controller.get_electives_course', 'elective_controller.get_electives_course', ([], {}), '()\n', (1169, 1171), True, 'from modules.electives_schedule import controller as elective_controller\n'), ((1194, 1240), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', (['(True)', '(False)'], {}), '(True, False)\n', (1227, 1240), False, 'import telebot\n'), ((1468, 1522), 'modules.core.source.bot.send_message', 'bot.send_message', (['chat_id', 'reply'], {'reply_markup': 'options'}), '(chat_id, reply, reply_markup=options)\n', (1484, 1522), False, 'from modules.core.source import bot, main_markup\n'), ((1305, 1349), 'telebot.types.KeyboardButton', 'telebot.types.KeyboardButton', (['lesson.subject'], {}), '(lesson.subject)\n', (1333, 1349), False, 'import telebot\n')] |
# Used functions for data preparation
from __future__ import print_function
import sys
import warnings
import traceback
import numpy as np
import shutil
import os
from os import remove
from os import listdir
import os.path
from os.path import join
from datetime import datetime
from scipy.misc import imread, imresize
from PIL import Image
def create_dir(dir_name, relative_path):
"""
Create new directory if not exists
--------------
Parameters:
dir_name (string) - name of directory we want to create
relative_path (string) - absolute path of directory we want to create
Returns:
path (string) - full path of directory
--------------
"""
path = relative_path + dir_name
if not os.path.exists(path):
os.mkdir(path)
return path + '/'
def list_train_images(directory):
"""
get all images in specified directory
--------------
Parameters:
directory (string) - absolute path of directory from which we want to take images
Returns:
list (all images)
--------------
"""
subFolderList = []
for x in os.listdir(directory):
if os.path.isdir(directory + '\\' + x):
if x != 'test':
subFolderList.append(x)
all_files = []
for x in subFolderList:
all_files += [directory + x +'\\' + y for y in os.listdir(directory + x)]
return all_files
def list_test_images(directory):
"""
get all images in specified directory
--------------
Parameters:
directory (string) - absolute path of directory from which we want to take images
Returns:
list (all images)
--------------
"""
images = []
for file in listdir(directory):
images.append(join(directory, file))
return images
def discard_bad_images(dir_path, where_to_save_names, paths):
warnings.filterwarnings('error')
warnings.filterwarnings('ignore', category=DeprecationWarning)
# paths = list_images(dir_path)
print('\nOrigin files number: %d\n' % len(paths))
num_delete = 0
path_delete = []
for path in paths:
is_continue = False
try:
image = Image.open(path)
image.verify()
image = imread(path)
# image = imread(path, mode='RGB')
except Warning as warn:
is_continue = True
num_delete += 1
path_delete.append(path)
remove(path)
print('>>> Warning happens! Removes image <%s>' % path)
print('Warning detail:\n%s\n' % str(warn))
except Exception as exc:
is_continue = True
num_delete += 1
path_delete.append(path)
remove(path)
print('>>> Exception happens! Removes image <%s>' % path)
print('Exception detail:\n%s\n' % str(exc))
if is_continue:
continue
if len(image.shape) != 3 or image.shape[2] != 3:
num_delete += 1
path_delete.append(path)
remove(path)
print('>>> Found an image with shape: %s; Now removes it: <%s>\n' % (str(image.shape), path))
else:
height, width, _ = image.shape
if height < width:
new_height = 512
new_width = int(width * new_height / height)
else:
new_width = 512
new_height = int(height * new_width / width)
try:
image = imresize(image, [new_height, new_width], interp='nearest')
except:
num_delete += 1
path_delete.append(path)
remove(path)
print('>>> Fails to resize an image! Now removes it: <%s>\n' % path)
traceback.print_exception(*sys.exc_info())
print('\n>>>>> delete %d files! Current number of files: %d\n' % (num_delete, len(paths) - num_delete))
with open(os.path.join(where_to_save_names, 'corrupted_images.txt'), 'a+') as f:
for item in path_delete:
f.write("%s\n" % item)
| [
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"os.path.join",
"sys.exc_info",
"scipy.misc.imread",
"os.path.isdir",
"os.mkdir",
"scipy.misc.imresize",
"warnings.filterwarnings",
"os.remove"
] | [((1139, 1160), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1149, 1160), False, 'import os\n'), ((1753, 1771), 'os.listdir', 'listdir', (['directory'], {}), '(directory)\n', (1760, 1771), False, 'from os import listdir\n'), ((1904, 1936), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (1927, 1936), False, 'import warnings\n'), ((1941, 2003), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (1964, 2003), False, 'import warnings\n'), ((760, 780), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (774, 780), False, 'import os\n'), ((790, 804), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (798, 804), False, 'import os\n'), ((1173, 1208), 'os.path.isdir', 'os.path.isdir', (["(directory + '\\\\' + x)"], {}), "(directory + '\\\\' + x)\n", (1186, 1208), False, 'import os\n'), ((1795, 1816), 'os.path.join', 'join', (['directory', 'file'], {}), '(directory, file)\n', (1799, 1816), False, 'from os.path import join\n'), ((2223, 2239), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2233, 2239), False, 'from PIL import Image\n'), ((2287, 2299), 'scipy.misc.imread', 'imread', (['path'], {}), '(path)\n', (2293, 2299), False, 'from scipy.misc import imread, imresize\n'), ((3086, 3098), 'os.remove', 'remove', (['path'], {}), '(path)\n', (3092, 3098), False, 'from os import remove\n'), ((4010, 4067), 'os.path.join', 'os.path.join', (['where_to_save_names', '"""corrupted_images.txt"""'], {}), "(where_to_save_names, 'corrupted_images.txt')\n", (4022, 4067), False, 'import os\n'), ((1398, 1423), 'os.listdir', 'os.listdir', (['(directory + x)'], {}), '(directory + x)\n', (1408, 1423), False, 'import os\n'), ((2487, 2499), 'os.remove', 'remove', (['path'], {}), '(path)\n', (2493, 2499), False, 'from os import remove\n'), ((2765, 2777), 'os.remove', 'remove', (['path'], {}), '(path)\n', (2771, 2777), False, 'from os import remove\n'), ((3544, 3602), 'scipy.misc.imresize', 'imresize', (['image', '[new_height, new_width]'], {'interp': '"""nearest"""'}), "(image, [new_height, new_width], interp='nearest')\n", (3552, 3602), False, 'from scipy.misc import imread, imresize\n'), ((3712, 3724), 'os.remove', 'remove', (['path'], {}), '(path)\n', (3718, 3724), False, 'from os import remove\n'), ((3870, 3884), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3882, 3884), False, 'import sys\n')] |
import numpy as np
__all__ = ["plot_spectrum_datasets_off_regions", "plot_contour_line"]
def plot_spectrum_datasets_off_regions(datasets, ax=None):
"""Plot spectrum datasets of regions.
Parameters
----------
datasets : list of `SpectrumDatasetOnOff`
List of spectrum on-off datasets
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
ax = plt.gca(projection=datasets[0].counts_off.geom.wcs) or ax
color_cycle = plt.rcParams["axes.prop_cycle"]
colors = color_cycle.by_key()["color"]
handles = []
for color, dataset in zip(colors, datasets):
kwargs = {"edgecolor": color, "facecolor": "none"}
dataset.counts_off.plot_region(ax=ax, **kwargs)
# create proxy artist for the custom legend
handle = mpatches.Patch(label=dataset.name, **kwargs)
handles.append(handle)
plt.legend(handles=handles)
def plot_contour_line(ax, x, y, **kwargs):
"""Plot smooth curve from contour points"""
from scipy.interpolate import CubicSpline
# close countour
xf = np.append(x, x[0])
yf = np.append(y, y[0])
# curve parametrization must be strictly increasing
# so we use the cumulative distance of each point from the first one
dist = np.sqrt(np.diff(xf) ** 2.0 + np.diff(yf) ** 2.0)
dist = [0] + list(dist)
t = np.cumsum(dist)
ts = np.linspace(0, t[-1], 50)
# 1D cubic spline interpolation
cs = CubicSpline(t, np.c_[xf, yf], bc_type="periodic")
out = cs(ts)
# plot
if "marker" in kwargs.keys():
marker = kwargs.pop("marker")
else:
marker = "+"
if "color" in kwargs.keys():
color = kwargs.pop("color")
else:
color = "b"
ax.plot(out[:, 0], out[:, 1], "-", color=color, **kwargs)
ax.plot(xf, yf, linestyle='', marker=marker, color=color)
| [
"scipy.interpolate.CubicSpline",
"matplotlib.pyplot.gca",
"numpy.diff",
"numpy.append",
"numpy.linspace",
"matplotlib.patches.Patch",
"numpy.cumsum",
"matplotlib.pyplot.legend"
] | [((892, 919), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'handles'}), '(handles=handles)\n', (902, 919), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1108), 'numpy.append', 'np.append', (['x', 'x[0]'], {}), '(x, x[0])\n', (1099, 1108), True, 'import numpy as np\n'), ((1118, 1136), 'numpy.append', 'np.append', (['y', 'y[0]'], {}), '(y, y[0])\n', (1127, 1136), True, 'import numpy as np\n'), ((1363, 1378), 'numpy.cumsum', 'np.cumsum', (['dist'], {}), '(dist)\n', (1372, 1378), True, 'import numpy as np\n'), ((1388, 1413), 'numpy.linspace', 'np.linspace', (['(0)', 't[-1]', '(50)'], {}), '(0, t[-1], 50)\n', (1399, 1413), True, 'import numpy as np\n'), ((1460, 1509), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['t', 'np.c_[xf, yf]'], {'bc_type': '"""periodic"""'}), "(t, np.c_[xf, yf], bc_type='periodic')\n", (1471, 1509), False, 'from scipy.interpolate import CubicSpline\n'), ((407, 458), 'matplotlib.pyplot.gca', 'plt.gca', ([], {'projection': 'datasets[0].counts_off.geom.wcs'}), '(projection=datasets[0].counts_off.geom.wcs)\n', (414, 458), True, 'import matplotlib.pyplot as plt\n'), ((811, 855), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'dataset.name'}), '(label=dataset.name, **kwargs)\n', (825, 855), True, 'import matplotlib.patches as mpatches\n'), ((1286, 1297), 'numpy.diff', 'np.diff', (['xf'], {}), '(xf)\n', (1293, 1297), True, 'import numpy as np\n'), ((1307, 1318), 'numpy.diff', 'np.diff', (['yf'], {}), '(yf)\n', (1314, 1318), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 15:05:10 2019
The Simple Linear Regression model
@author: Dr. Dr. <NAME>
@web : https://dannyvanpoucke.be
"""
import pandas as pd
import numpy as np
from TModelClass import TModelClass
from TModelResults import TModelResults
from Bootstrap import TBootstrap
from TModelQualityData import TModelQualityData
from sklearn.pipeline import Pipeline
class TLinearModel(TModelClass):
"""
Child class representing the linear regression model
"""
def __init__(self,name,Target, Feature: pd.DataFrame,
Target_test, Feature_test: pd.DataFrame,
Pipeline: Pipeline
):
"""
Constructor of the TLinearModel class.
It requires:
- name : the name of the object instance
- Feature : a pandas dataframe containing the features
- Target : the training target data
- Target_test: the test target data
- Feature_test: the untransformed features for testing.
- Pipeline : a pipeline generated by the PipelineFactory
It sets the following properties
- pipeline : a pipeline object containing the preprocessing transformations (excluding the fitter function)
- model : the fitter function to be used (should be an sklearn function with "fit" method)
- feature_tf: the transformed features as obtained by the pipeline
"""
from sklearn.linear_model import LinearRegression
super().__init__(name,Target, Feature,Target_test, Feature_test)
self.nameModel='Linear Model'
self.name=name
print("Initialising the child class:",self.nameModel)
#create a pipeline (can be extended to contain more functions, p67)
self.pipeline=Pipeline
#self.pipeline = Pipeline([
# #('std_scaler', StandardScaler()), #scaling to be centered on 0, with unit variance...since the values are quite different, this will help things
# ('std_scaler', StandardScaler(with_mean=False, with_std=False)),
#])
self.feature_tf = self.pipeline.fit_transform(Feature) #this is a numpy array...
self.model = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None) #default values..explicitly set
#def fit(self):
# """ test to check if it is possible to manually set the coef_ and intercept_
# --> conclusion: it seems to work, but only for fit & predict,
# not for cross_val_score...which just does new fittings
# Class-method wrapping the fit-method of the sklearn model.
#
# - Target : a pandas dataframe with the Target data belonging to the
# Features provided upon initialisation.
# """
# import numpy as np
# self.model.intercept_ = 0
# self.model.coef_= np.array([-0,0,6.1]).reshape((1,-1))
# self.setCoefficients()
# print("FIT COEFF=",self.model.coef_," INTERCEPT=",self.model.intercept_)
# print("did some fitting, Parent-style:",type(self.model).__name__)
def fitSanityCheck(self)->int:
"""
Class method which should cover/deal with failures of sklearn.
For some reason, sklearn LinearRegression randomly fails on small datasets.
This failure gives rise to huge coefficents. Hoever, just shuffling the
data seems to resolve the issue.
This function returns the number of shuffles needed to regain sanity.
"""
import sys
#first find out if we have "infinite" coefficients
cnt=0
insane=(abs(sum(self.model.coef_)/len(self.model.coef_))>1.0E9) #larger than 1 billion should be a clear sign
while (insane and (cnt<100)): #try up to 100x ... if non are OK, then it will never be fixed
cnt+=1
#then we shuffle the features & targets...
#1) recombine in 1 pandas dataframe
combo=pd.concat([self.feature,self.target], axis=1, sort=False, join='outer')
#2) shuffle: https://stackoverflow.com/questions/29576430/shuffle-dataframe-rows
combo=combo.sample(frac=1).reset_index(drop=True)
#3) re-store in target/feature/feature_tf
self.target=combo[combo.columns[-1]].copy()
self.feature=combo.drop(combo.columns[-1],axis=1)
self.feature_tf = self.pipeline.fit_transform(self.feature) #this is a numpy array...
#4) finally refit
self.fit()
insane=(abs(sum(abs(self.model.coef_))/len(self.model.coef_))>self.sanityThresshold) #normaly values of 1E14 are reached, but on occasion as low as 1E08 was found
if (cnt>0):#update the coefficients
self.setCoefficients()
if insane:
print("EPIC FAIL, 100 attempts at sanity failed in the ",self.name,". Terminating this sick job!")
sys.exit()
return cnt
#serial version
def setAverageCoefficients(self,EnsembleData: TModelResults, setCI: bool):
"""
Use the ensemble data to create an "average" model, and set the "coefficients"
in the current model. This should be performed in each model separately
"""
#import time
# 1. Calculate the average coefficients
# 1.1. transform them to arrays
#start = time.perf_counter_ns()
#print("3.1) Average Coefficients : AVG")
intercept=np.zeros(EnsembleData.NData)
coef=np.zeros((EnsembleData.NData,EnsembleData.modelCoef[0]['coef_'][1].shape[1]))
for i in range(EnsembleData.NData):
mcf=EnsembleData.modelCoef[i]
intercept[i]=np.asarray(mcf['intercept_'][1]).ravel()
coef[i,:]=np.asarray(mcf['coef_'][1]).ravel()
mean_intercept=np.mean(intercept,axis=0)#axis is the varying direction, so 0 means we calculate the average of a column by varying the row
mean_coef=np.mean(coef,axis=0)
# 2. Set the model coefficients to these averaged values
self.model.intercept_=mean_intercept
self.model.coef_=mean_coef
self.isAverage = True
self.hasCI=False
if setCI:
#end = time.perf_counter_ns()
#print("3.2.a) Average Coefficients : CI Intercept ",(end-start)/10E9)
# 3. Calculate Confidence Interval using Bootstrapper tech?
# & 4. Store the CI data
## For the intercept
boot=TBootstrap(data=intercept,Func=np.mean)
#end = time.perf_counter_ns()
#print("3.2.b) NPboot",(end-start)/1E9)
boot.NPbootstrap(n_iter=2000, Jackknife=True)
#end = time.perf_counter_ns()
#print("3.2.c) Con Int",(end-start)/1E9)
avgm, avgp = boot.ConfidenceInterval(CItype="BCa",alpha=0.05,n_samples=2000)#95%confidence interval
self.CI["intercept_lo"]=avgm
self.CI["intercept_hi"]=avgp
## For the coefficients
avgml=list()
avgpl=list()
for col in range(EnsembleData.modelCoef[0]['coef_'][1].shape[1]):
#end = time.perf_counter_ns()
#print("3.2) Average Coefficients : CI Coef ",col," ",(end-start)/1E9)
boot=TBootstrap(data=coef[:,col],Func=np.mean)
boot.NPbootstrap(n_iter=2000, Jackknife=True)
avgm, avgp = boot.ConfidenceInterval(CItype="BCa",alpha=0.05)#95%confidence interval
avgml.append(avgm)
avgpl.append(avgp)
self.CI["coef_lo"]=avgml
self.CI["coef_hi"]=avgpl
self.hasCI = True
#store the resulting coefficients in our wrapper tracker...and we are done
self.setCoefficients()
self.Quality=TModelQualityData(EData=EnsembleData)
def printAverageCoefficients(self, File: str=None):
"""
Print a block of information to a file, containing the averaged coefficients.
parameters:
- self:
- File: string containing a filename, if None standard output is used. Default=None
"""
if File is None:
print("======= THE AVERAGED MODEL ==============")
print(" Model : ",self.name)
print(self.Quality.QualitiesText())
if self.hasCI:
print("Intercept : ",self.model.intercept_," and CI=[",self.CI["intercept_lo"]," ; ",self.CI["intercept_hi"],"]")
for col in range(len(self.model.coef_)):
print("coef ",col," : ",self.model.coef_[col]," and CI=[",self.CI["coef_lo"][col]," ; ",self.CI["coef_hi"][col],"]")
else:
print("Intercept : ",self.model.intercept_)
for col in range(len(self.model.coef_)):
print("coef ",col," : ",self.model.coef_[col])
print("====================================\n\n")
else:
foo=open(File,"a+",)
foo.write("======= THE AVERAGED MODEL ==============\n")
line=" Model : "+self.name+"\n"
foo.write(line)
foo.write(self.Quality.QualitiesText())
if self.hasCI:
line="Intercept : "+str(self.model.intercept_)+" and CI=["+str(self.CI["intercept_lo"])+" ; "+str(self.CI["intercept_hi"])+"] \n"
foo.write(line)
for col in range(len(self.model.coef_)):
line="coef "+str(col)+" : "+str(self.model.coef_[col])+" and CI=["+str(self.CI["coef_lo"][col])+" ; "+str(self.CI["coef_hi"][col])+"] \n"
foo.write(line)
else:
line="Intercept : "+str(self.model.intercept_)+"\n"
foo.write(line)
for col in range(len(self.model.coef_)):
line="coef "+str(col)+" : "+str(self.model.coef_[col])+"\n"
foo.write(line)
foo.write("====================================\n\n")
foo.close()
def setCoefficients(self):
"""
Class-method collecting and storing the fitting coefficients for a
linear regression in the object
"""
import numpy as np
super().setCoefficients()
self.modelcoef['header_coef']=[self.coefindex,"The coefficients for each target (one per row) are given by:"]
self.modelcoef['coef_']=[self.coefindex+1,np.array([self.model.coef_])]
self.modelcoef['header_intercept']=[self.coefindex+2,"The intercepts for each target (one per row) are given by:"]
self.modelcoef['intercept_']=[self.coefindex+3,np.array([self.model.intercept_])]
self.coefindex+=4
| [
"numpy.mean",
"Bootstrap.TBootstrap",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"sys.exit",
"pandas.concat",
"sklearn.linear_model.LinearRegression",
"TModelQualityData.TModelQualityData"
] | [((2265, 2344), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)', 'normalize': '(False)', 'copy_X': '(True)', 'n_jobs': 'None'}), '(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None)\n', (2281, 2344), False, 'from sklearn.linear_model import LinearRegression\n'), ((5620, 5648), 'numpy.zeros', 'np.zeros', (['EnsembleData.NData'], {}), '(EnsembleData.NData)\n', (5628, 5648), True, 'import numpy as np\n'), ((5662, 5740), 'numpy.zeros', 'np.zeros', (["(EnsembleData.NData, EnsembleData.modelCoef[0]['coef_'][1].shape[1])"], {}), "((EnsembleData.NData, EnsembleData.modelCoef[0]['coef_'][1].shape[1]))\n", (5670, 5740), True, 'import numpy as np\n'), ((5986, 6012), 'numpy.mean', 'np.mean', (['intercept'], {'axis': '(0)'}), '(intercept, axis=0)\n', (5993, 6012), True, 'import numpy as np\n'), ((6128, 6149), 'numpy.mean', 'np.mean', (['coef'], {'axis': '(0)'}), '(coef, axis=0)\n', (6135, 6149), True, 'import numpy as np\n'), ((7995, 8032), 'TModelQualityData.TModelQualityData', 'TModelQualityData', ([], {'EData': 'EnsembleData'}), '(EData=EnsembleData)\n', (8012, 8032), False, 'from TModelQualityData import TModelQualityData\n'), ((4088, 4160), 'pandas.concat', 'pd.concat', (['[self.feature, self.target]'], {'axis': '(1)', 'sort': '(False)', 'join': '"""outer"""'}), "([self.feature, self.target], axis=1, sort=False, join='outer')\n", (4097, 4160), True, 'import pandas as pd\n'), ((5060, 5070), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5068, 5070), False, 'import sys\n'), ((6652, 6692), 'Bootstrap.TBootstrap', 'TBootstrap', ([], {'data': 'intercept', 'Func': 'np.mean'}), '(data=intercept, Func=np.mean)\n', (6662, 6692), False, 'from Bootstrap import TBootstrap\n'), ((10681, 10709), 'numpy.array', 'np.array', (['[self.model.coef_]'], {}), '([self.model.coef_])\n', (10689, 10709), True, 'import numpy as np\n'), ((10889, 10922), 'numpy.array', 'np.array', (['[self.model.intercept_]'], {}), '([self.model.intercept_])\n', (10897, 10922), True, 'import numpy as np\n'), ((7451, 7494), 'Bootstrap.TBootstrap', 'TBootstrap', ([], {'data': 'coef[:, col]', 'Func': 'np.mean'}), '(data=coef[:, col], Func=np.mean)\n', (7461, 7494), False, 'from Bootstrap import TBootstrap\n'), ((5851, 5883), 'numpy.asarray', 'np.asarray', (["mcf['intercept_'][1]"], {}), "(mcf['intercept_'][1])\n", (5861, 5883), True, 'import numpy as np\n'), ((5914, 5941), 'numpy.asarray', 'np.asarray', (["mcf['coef_'][1]"], {}), "(mcf['coef_'][1])\n", (5924, 5941), True, 'import numpy as np\n')] |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import ufora.test.PerformanceTestReporter as PerformanceTestReporter
import sys
class StringTestCases(object):
"""Test cases for pyfora strings"""
def test_string_indexing(self):
def f():
a = "abc"
return (a[0], a[1], a[2], a[-1], a[-2])
self.equivalentEvaluationTest(f)
def test_strings_with_weird_characters(self):
x = "\xb0"
def f():
return (x,"\xb0")
self.equivalentEvaluationTest(f)
def test_large_string_indexing_perf(self):
def f(ct, passCt):
x = "asdfasdf" * (ct / 8)
res = 0
for _ in xrange(passCt):
for ix in xrange(len(x)):
res = res + len(x[ix])
return res
self.evaluateWithExecutor(f, 1000000, 1)
self.evaluateWithExecutor(f, 10000, 1)
@PerformanceTestReporter.PerfTest("pyfora.string_indexing.large_string")
def test1():
self.evaluateWithExecutor(f, 1000000, 100)
@PerformanceTestReporter.PerfTest("pyfora.string_indexing.small_string")
def test2():
self.evaluateWithExecutor(f, 10000, 10000)
test1()
test2()
def test_large_string_parsing_perf(self):
def f(ct, passCt):
x = "1,2,3,4," * ct
res = 0
for _ in xrange(passCt):
ix = 0
while ix < len(x):
res = res + int(x[ix:ix+1]) + 12341234
ix = ix + 2
return res
self.evaluateWithExecutor(f, 1000000, 1)
with PerformanceTestReporter.RecordAsPerfTest("pyfora.string_to_int"):
self.evaluateWithExecutor(f, 1000000, 10)
def test_string_slicing(self):
def f(ct, passCt,chars):
x = "asdfasdf" * (ct / 8)
res = 0
for _ in xrange(passCt):
for ix in xrange(len(x)):
res = res + len(x[ix:ix+chars])
return res
self.evaluateWithExecutor(f, 1000000, 1, 2)
self.evaluateWithExecutor(f, 10000, 1, 2)
def runTest(func, name):
PerformanceTestReporter.PerfTest(name)(func)()
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 10, 2), "pyfora.string_slicing_10mm.2_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 10, 200), "pyfora.string_slicing_10mm.200_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 1000, 2), "pyfora.string_slicing_10mm.2_char_small_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 1000, 200), "pyfora.string_slicing_10mm.200_char_small_string.pyfora")
sys.setcheckinterval(100000)
runTest(lambda: f(1000000, 10, 2), "pyfora.string_slicing_10mm.2_char_large_string.native")
runTest(lambda: f(1000000, 10, 200), "pyfora.string_slicing_10mm.200_char_large_string.native")
runTest(lambda: f(10000, 1000, 2), "pyfora.string_slicing_10mm.2_char_small_string.native")
runTest(lambda: f(10000, 1000, 200), "pyfora.string_slicing_10mm.200_char_small_string.native")
sys.setcheckinterval(100)
def test_string_slicing_into_vector(self):
def testFunction(ct, passCt,chars):
x = "asdfasdf" * (ct / 8)
res = 0
for _ in xrange(passCt):
v = [x[ix*chars:ix*chars+chars] for ix in xrange(len(x) / chars)]
for e in v:
res = res + len(e)
return res
f = testFunction
self.evaluateWithExecutor(f, 1000000, 1, 2)
self.evaluateWithExecutor(f, 10000, 1, 2)
def runTest(func, name):
PerformanceTestReporter.PerfTest(name)(func)()
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 10, 2), "pyfora.string_slicing_into_vector_10mm.2_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 1000000, 1000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_large_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 1000, 2), "pyfora.string_slicing_into_vector_10mm.2_char_small_string.pyfora")
runTest(lambda: self.evaluateWithExecutor(f, 10000, 100000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_small_string.pyfora")
sys.setcheckinterval(100000)
runTest(lambda: f(1000000, 10, 2), "pyfora.string_slicing_into_vector_10mm.2_char_large_string.native")
runTest(lambda: f(1000000, 1000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_large_string.native")
runTest(lambda: f(10000, 1000, 2), "pyfora.string_slicing_into_vector_10mm.2_char_small_string.native")
runTest(lambda: f(10000, 100000, 200), "pyfora.string_slicing_into_vector_10mm.200_char_small_string.native")
sys.setcheckinterval(100)
def test_string_splitlines(self):
#test a wide variety of strings with combinations of different separators
stringsToTest = []
for char1 in ["","a"]:
stringsToTest.append(char1)
for sep1 in ["\n","\r","\n\r", "\r\n", "\r\r", "\n\n", "\r\n\r"]:
stringsToTest.append(char1 + sep1)
for char2 in ["","b"]:
stringsToTest.append(char1 + sep1 + char2)
for sep2 in ["\n","\r","\n\r", "\r\n", "\r\r", "\n\n", "\r\n\r"]:
stringsToTest.append(char1 + sep1 + char2 + sep2)
def f():
res = []
for shouldSplit in [True, False]:
for candidate in stringsToTest:
res = res + [(candidate, candidate.splitlines(shouldSplit))]
self.equivalentEvaluationTest(f)
def test_string_split(self):
#test a wide variety of strings with combinations of different separators
stringsToTest = ["", "a", "aa", "ab", "aba", "aaa", "bbb", "abab", "abc"]
sepsToTest = ["a","b"]
def f():
res = []
for s in stringsToTest:
for sep in sepsToTest:
res = res + [(s,sep, s.split(sep))]
self.equivalentEvaluationTest(f)
def test_string_indexing_2(self):
def f(idx):
x = "asdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf"
return x[idx]
self.equivalentEvaluationTest(f, -1)
self.equivalentEvaluationTest(f, -2)
self.equivalentEvaluationTest(f, 0)
self.equivalentEvaluationTest(f, 1)
def test_string_comparison(self):
def f():
a = "a"
b = "b"
r1 = a < b
r2 = a > b
return (r1, r2)
self.equivalentEvaluationTest(f)
def test_string_duplication(self):
def f():
a = "asdf"
r1 = a * 20
r2 = 20 * a
return (r1, r2)
self.equivalentEvaluationTest(f)
def test_string_equality_methods(self):
def f():
a = "val1"
b = "val1"
r1 = a == b
r2 = a != b
a = "val2"
r3 = a == b
r4 = a != b
r5 = a.__eq__(b)
r6 = a.__ne__(b)
return (r1, r2, r3, r4, r5, r6)
self.equivalentEvaluationTest(f)
def test_large_strings(self):
def f():
a = "val1"
while len(a) < 1000000:
a = a + a
return a
self.equivalentEvaluationTest(f)
def test_define_constant_string(self):
x = "a string"
with self.create_executor() as executor:
define_x = executor.define(x)
fora_x = define_x.result()
self.assertIsNotNone(fora_x)
def test_compute_string(self):
def f():
return "a string"
remote = self.evaluateWithExecutor(f)
self.assertEqual(f(), remote)
self.assertTrue(isinstance(remote, str))
def test_strings_1(self):
def f():
x = "asdf"
return x
self.equivalentEvaluationTest(f)
| [
"ufora.test.PerformanceTestReporter.PerfTest",
"sys.setcheckinterval",
"ufora.test.PerformanceTestReporter.RecordAsPerfTest"
] | [((1471, 1542), 'ufora.test.PerformanceTestReporter.PerfTest', 'PerformanceTestReporter.PerfTest', (['"""pyfora.string_indexing.large_string"""'], {}), "('pyfora.string_indexing.large_string')\n", (1503, 1542), True, 'import ufora.test.PerformanceTestReporter as PerformanceTestReporter\n'), ((1629, 1700), 'ufora.test.PerformanceTestReporter.PerfTest', 'PerformanceTestReporter.PerfTest', (['"""pyfora.string_indexing.small_string"""'], {}), "('pyfora.string_indexing.small_string')\n", (1661, 1700), True, 'import ufora.test.PerformanceTestReporter as PerformanceTestReporter\n'), ((3333, 3361), 'sys.setcheckinterval', 'sys.setcheckinterval', (['(100000)'], {}), '(100000)\n', (3353, 3361), False, 'import sys\n'), ((3780, 3805), 'sys.setcheckinterval', 'sys.setcheckinterval', (['(100)'], {}), '(100)\n', (3800, 3805), False, 'import sys\n'), ((4964, 4992), 'sys.setcheckinterval', 'sys.setcheckinterval', (['(100000)'], {}), '(100000)\n', (4984, 4992), False, 'import sys\n'), ((5463, 5488), 'sys.setcheckinterval', 'sys.setcheckinterval', (['(100)'], {}), '(100)\n', (5483, 5488), False, 'import sys\n'), ((2209, 2273), 'ufora.test.PerformanceTestReporter.RecordAsPerfTest', 'PerformanceTestReporter.RecordAsPerfTest', (['"""pyfora.string_to_int"""'], {}), "('pyfora.string_to_int')\n", (2249, 2273), True, 'import ufora.test.PerformanceTestReporter as PerformanceTestReporter\n'), ((2760, 2798), 'ufora.test.PerformanceTestReporter.PerfTest', 'PerformanceTestReporter.PerfTest', (['name'], {}), '(name)\n', (2792, 2798), True, 'import ufora.test.PerformanceTestReporter as PerformanceTestReporter\n'), ((4339, 4377), 'ufora.test.PerformanceTestReporter.PerfTest', 'PerformanceTestReporter.PerfTest', (['name'], {}), '(name)\n', (4371, 4377), True, 'import ufora.test.PerformanceTestReporter as PerformanceTestReporter\n')] |
import logging
from zentral.core.events.base import BaseEvent, register_event_type
logger = logging.getLogger('zentral.contrib.audit.events')
ALL_EVENTS_SEARCH_DICT = {"tag": "audit"}
class AuditEvent(BaseEvent):
event_type = "audit"
tags = ["audit"]
payload_aggregations = [
("event_id", {"type": "terms", "bucket_number": 10, "label": "Event IDs"}),
]
register_event_type(AuditEvent)
| [
"logging.getLogger",
"zentral.core.events.base.register_event_type"
] | [((93, 142), 'logging.getLogger', 'logging.getLogger', (['"""zentral.contrib.audit.events"""'], {}), "('zentral.contrib.audit.events')\n", (110, 142), False, 'import logging\n'), ((385, 416), 'zentral.core.events.base.register_event_type', 'register_event_type', (['AuditEvent'], {}), '(AuditEvent)\n', (404, 416), False, 'from zentral.core.events.base import BaseEvent, register_event_type\n')] |
import sys
import configparser
import json
import logging
if __name__ == "__main__":
sys.exit()
config = configparser.ConfigParser(allow_no_value=True)
config.read("conf/config.ini")
mimes_file = config['server']['MIMES_FILE']
mime_file = open(mimes_file,"r")
mimes = mime_file.read()
mime_file.close()
MIME_TYPES = json.loads(mimes)
loglevel = config['server']['LOG_LEVEL']
if loglevel == "":
LOG_LEVEL = logging.NOTSET
elif loglevel == "DEBUG":
LOG_LEVEL = logging.DEBUG
elif loglevel == "INFO":
LOG_LEVEL = logging.INFO
elif loglevel == "WARNING":
LOG_LEVEL = logging.WARNING
elif loglevel == "ERROR":
LOG_LEVEL = logging.ERROR
elif loglevel == "CRITICAL":
LOG_LEVEL = logging.CRITICAL
"""HOSTNAME = "localhost"
PORT = 8080
PUBLIC_DIR = "public" # folder to load documents from
ERROR_DOC = { #Location of error documents. Loaded from root folder
"404":"404.html"
}
MIME_TYPES = mimes.MIME_TYPES #types of mimes the server accepts. To change add your items in mimes.py
SERVER_VERSION = "Melvin2204-webserver"
SYS_VERSION = ""
LOG_LEVEL = logging.WARNING
BEHIND_PROXY = True# if the server is behind a proxy.""" | [
"json.loads",
"configparser.ConfigParser",
"sys.exit"
] | [((110, 156), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'allow_no_value': '(True)'}), '(allow_no_value=True)\n', (135, 156), False, 'import configparser\n'), ((322, 339), 'json.loads', 'json.loads', (['mimes'], {}), '(mimes)\n', (332, 339), False, 'import json\n'), ((89, 99), 'sys.exit', 'sys.exit', ([], {}), '()\n', (97, 99), False, 'import sys\n')] |
"""
AUTOR: Juanjo
FECHA DE CREACIÓN: 24/05/2019
"""
from flask import render_template, redirect, url_for
from flask_login import login_required, current_user
from app.models import Post
from . import admin_bp
from .forms import PostForm
@admin_bp.route("/admin/post/", methods=['GET', 'POST'], defaults={'post_id': None})
@admin_bp.route("/admin/post/<int:post_id>/", methods=['GET', 'POST'])
@login_required
def post_form(post_id):
form = PostForm()
if form.validate_on_submit():
title = form.title.data
content = form.content.data
post = Post(user_id=current_user.id, title=title, content=content)
post.save()
return redirect(url_for('public.index'))
return render_template("admin/post_form.html", form=form)
| [
"flask.render_template",
"app.models.Post",
"flask.url_for"
] | [((721, 771), 'flask.render_template', 'render_template', (['"""admin/post_form.html"""'], {'form': 'form'}), "('admin/post_form.html', form=form)\n", (736, 771), False, 'from flask import render_template, redirect, url_for\n'), ((580, 639), 'app.models.Post', 'Post', ([], {'user_id': 'current_user.id', 'title': 'title', 'content': 'content'}), '(user_id=current_user.id, title=title, content=content)\n', (584, 639), False, 'from app.models import Post\n'), ((685, 708), 'flask.url_for', 'url_for', (['"""public.index"""'], {}), "('public.index')\n", (692, 708), False, 'from flask import render_template, redirect, url_for\n')] |
"""
Views which deal with Bookmarks, allowing them to be added, removed,
and viewed according to various criteria.
"""
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic import list_detail
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from cab.models import Bookmark, Language, Snippet, Tag
base_generic_dict = {
'allow_empty': True,
'paginate_by': 20,
}
def add_bookmark(request, snippet_id):
"""
Bookmarks a Snippet for a User.
Context::
None, returns a redirect to the Snippet.
Template::
None, returns a redirect to the Snippet.
"""
snippet = get_object_or_404(Snippet, pk=snippet_id)
try:
Bookmark.objects.get(user__pk=request.user.id,
snippet__pk=snippet.id)
except Bookmark.DoesNotExist:
bookmark = Bookmark.objects.create(user=request.user,
snippet=snippet)
return HttpResponseRedirect(snippet.get_absolute_url())
add_bookmark = login_required(add_bookmark)
def bookmarks(request):
"""
List of a User's bookmarks.
Context::
Same as the generic ``list_detail.object_list`` view.
Template::
cab/user_bookmarks.html
"""
return list_detail.object_list(request,
queryset=Bookmark.objects.get_for_user(request.user.username).select_related(),
template_name='cab/user_bookmarks.html',
**base_generic_dict)
bookmarks = login_required(bookmarks)
def bookmark_author_list(request):
"""
Lists the authors of a User's Bookmarks.
Context::
Same as the ``list_detail.object_list`` generic view.
Template::
cab/bookmark_author_list.html
"""
return list_detail.object_list(request,
queryset=Bookmark.objects.distinct_list('author',
request.user.username).select_related(),
template_name='cab/bookmarks_author_list.html',
**base_generic_dict)
bookmark_author_list = login_required(bookmark_author_list)
def bookmarks_by_author(request, author_username):
"""
List of a User's bookmarks written by a particular author.
Context::
Same as the generic ``list_detail.object_list`` view, with one
extra variable:
object
The author
Template::
cab/bookmarks_by_author.html
"""
author = get_object_or_404(User, username__exact=author_username)
return list_detail.object_list(request,
queryset=Bookmark.objects.get_by_author(request.user.username,
author_slug).select_related(),
extra_context={ 'object': author },
template_name='cab/bookmarks_by_author.html',
**base_generic_dict)
bookmarks_by_author = login_required(bookmarks_by_author)
def bookmarks_by_language(request, language_slug):
"""
List of a User's bookmarks which are written in a
particular language.
Context::
Same as the generic ``list_detail.object_list`` view, with
one extra variable:
object
The Language
Template::
cab/bookmarks_by_language.html
"""
language = get_object_or_404(Language, slug__exact=language_slug)
return list_detail.object_list(request,
queryset=Bookmark.objects.get_by_language(request.user.username,
language_slug).select_related(),
extra_context={ 'object': language},
template_name='cab/bookmarks_by_language.html',
**base_generic_dict)
bookmarks_by_language = login_required(bookmarks_by_language)
def bookmarks_by_tag(request, tag_slug):
"""
List of a User's bookmarks which have a particular tag.
Context::
Same as the generic ``list_detail.object_list`` view, with
one extra variable:
object
The Tag
Template::
snipppets/bookmarks_by_tag.html
"""
tag = get_object_or_404(Tag, slug__exact=tag_slug)
return list_detail.object_list(request,
queryset=Bookmark.objects.get_by_tag(request.user.username,
tag_slug).select_related(),
extra_context={ 'object': tag },
template_name='cab/bookmarks_by_tag.html',
**base_generic_dict)
bookmarks_by_tag = login_required(bookmarks_by_tag)
def bookmark_language_list(request):
"""
Lists the Languages a User's Bookmarks are written in.
Context::
Same as the ``list_detail.object_list`` generic view.
Template::
cab/bookmark_language_list.html
"""
return list_detail.object_list(request,
queryset=Bookmark.objects.distinct_list('language',
request.user.username).select_related(),
template_name='cab/bookmarks_language_list.html',
**base_generic_dict)
bookmark_language_list = login_required(bookmark_language_list)
def bookmark_tag_list(request):
"""
Lists the Tags attached to a User's Bookmarks.
Context::
Same as the ``list_detail.object_list`` generic view.
Template::
cab/bookmark_tag_list.html
"""
return list_detail.object_list(request,
queryset=Bookmark.objects.distinct_list('tag',
request.user.username).select_related(),
template_name='cab/bookmarks_tag_list.html',
**base_generic_dict)
bookmark_tag_list = login_required(bookmark_tag_list)
def delete_bookmark(request, bookmark_id):
"""
Removes a User's bookmarked Snippet.
Context::
None, returns a redirect to the HTTP referer.
Template::
None, returns a redirect to the HTTP referer.
"""
bookmark = get_object_or_404(Bookmark, user__pk=request.user.id,
pk=bookmark_id)
bookmark.delete()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
delete_bookmark = login_required(delete_bookmark)
| [
"django.http.HttpResponseRedirect",
"cab.models.Bookmark.objects.distinct_list",
"cab.models.Bookmark.objects.get_by_author",
"cab.models.Bookmark.objects.get_for_user",
"django.shortcuts.get_object_or_404",
"cab.models.Bookmark.objects.get",
"cab.models.Bookmark.objects.create",
"django.contrib.auth.... | [((1186, 1214), 'django.contrib.auth.decorators.login_required', 'login_required', (['add_bookmark'], {}), '(add_bookmark)\n', (1200, 1214), False, 'from django.contrib.auth.decorators import login_required\n'), ((1725, 1750), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmarks'], {}), '(bookmarks)\n', (1739, 1750), False, 'from django.contrib.auth.decorators import login_required\n'), ((2395, 2431), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmark_author_list'], {}), '(bookmark_author_list)\n', (2409, 2431), False, 'from django.contrib.auth.decorators import login_required\n'), ((3322, 3357), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmarks_by_author'], {}), '(bookmarks_by_author)\n', (3336, 3357), False, 'from django.contrib.auth.decorators import login_required\n'), ((4279, 4316), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmarks_by_language'], {}), '(bookmarks_by_language)\n', (4293, 4316), False, 'from django.contrib.auth.decorators import login_required\n'), ((5156, 5188), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmarks_by_tag'], {}), '(bookmarks_by_tag)\n', (5170, 5188), False, 'from django.contrib.auth.decorators import login_required\n'), ((5857, 5895), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmark_language_list'], {}), '(bookmark_language_list)\n', (5871, 5895), False, 'from django.contrib.auth.decorators import login_required\n'), ((6531, 6564), 'django.contrib.auth.decorators.login_required', 'login_required', (['bookmark_tag_list'], {}), '(bookmark_tag_list)\n', (6545, 6564), False, 'from django.contrib.auth.decorators import login_required\n'), ((7034, 7065), 'django.contrib.auth.decorators.login_required', 'login_required', (['delete_bookmark'], {}), '(delete_bookmark)\n', (7048, 7065), False, 'from django.contrib.auth.decorators import login_required\n'), ((796, 837), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Snippet'], {'pk': 'snippet_id'}), '(Snippet, pk=snippet_id)\n', (813, 837), False, 'from django.shortcuts import get_object_or_404, render_to_response\n'), ((2787, 2843), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username__exact': 'author_username'}), '(User, username__exact=author_username)\n', (2804, 2843), False, 'from django.shortcuts import get_object_or_404, render_to_response\n'), ((3735, 3789), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Language'], {'slug__exact': 'language_slug'}), '(Language, slug__exact=language_slug)\n', (3752, 3789), False, 'from django.shortcuts import get_object_or_404, render_to_response\n'), ((4656, 4700), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Tag'], {'slug__exact': 'tag_slug'}), '(Tag, slug__exact=tag_slug)\n', (4673, 4700), False, 'from django.shortcuts import get_object_or_404, render_to_response\n'), ((6829, 6898), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Bookmark'], {'user__pk': 'request.user.id', 'pk': 'bookmark_id'}), '(Bookmark, user__pk=request.user.id, pk=bookmark_id)\n', (6846, 6898), False, 'from django.shortcuts import get_object_or_404, render_to_response\n'), ((6965, 7015), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["request.META['HTTP_REFERER']"], {}), "(request.META['HTTP_REFERER'])\n", (6985, 7015), False, 'from django.http import HttpResponseRedirect\n'), ((855, 925), 'cab.models.Bookmark.objects.get', 'Bookmark.objects.get', ([], {'user__pk': 'request.user.id', 'snippet__pk': 'snippet.id'}), '(user__pk=request.user.id, snippet__pk=snippet.id)\n', (875, 925), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((1008, 1067), 'cab.models.Bookmark.objects.create', 'Bookmark.objects.create', ([], {'user': 'request.user', 'snippet': 'snippet'}), '(user=request.user, snippet=snippet)\n', (1031, 1067), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((1510, 1562), 'cab.models.Bookmark.objects.get_for_user', 'Bookmark.objects.get_for_user', (['request.user.username'], {}), '(request.user.username)\n', (1539, 1562), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((2076, 2139), 'cab.models.Bookmark.objects.distinct_list', 'Bookmark.objects.distinct_list', (['"""author"""', 'request.user.username'], {}), "('author', request.user.username)\n", (2106, 2139), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((2932, 2998), 'cab.models.Bookmark.objects.get_by_author', 'Bookmark.objects.get_by_author', (['request.user.username', 'author_slug'], {}), '(request.user.username, author_slug)\n', (2962, 2998), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((3878, 3948), 'cab.models.Bookmark.objects.get_by_language', 'Bookmark.objects.get_by_language', (['request.user.username', 'language_slug'], {}), '(request.user.username, language_slug)\n', (3910, 3948), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((4789, 4849), 'cab.models.Bookmark.objects.get_by_tag', 'Bookmark.objects.get_by_tag', (['request.user.username', 'tag_slug'], {}), '(request.user.username, tag_slug)\n', (4816, 4849), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((5532, 5597), 'cab.models.Bookmark.objects.distinct_list', 'Bookmark.objects.distinct_list', (['"""language"""', 'request.user.username'], {}), "('language', request.user.username)\n", (5562, 5597), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n'), ((6221, 6281), 'cab.models.Bookmark.objects.distinct_list', 'Bookmark.objects.distinct_list', (['"""tag"""', 'request.user.username'], {}), "('tag', request.user.username)\n", (6251, 6281), False, 'from cab.models import Bookmark, Language, Snippet, Tag\n')] |
import unittest as ut
from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon
class TestLexicon(ut.TestCase):
def test_lexicon_adds_tokens_from_text(self):
lex = Lexicon()
lex.add("Isso é uma frase")
self.assertEqual(lex.total(), 4)
def test_lexicon_counts_frequency_of_word(self):
lex = Lexicon()
lex.add("Essa aqui é uma frase para testar uma classe de Léxico")
self.assertEqual(lex.frequency("uma"), 2)
self.assertEqual(lex.frequency("frase"),1)
def test_lexicon_tokenizes_properly(self):
lex = Lexicon()
lex.add("Uma frase.")
self.assertEqual(lex.frequency("frase"),1)
self.assertEqual(lex.frequency("frase."),0)
def test_gets_frequency_dict(self):
lex = Lexicon()
lex.add("Uma frase.")
fdict = lex.frequency_dict()
self.assertIs(type(fdict), dict)
self.assertEqual(fdict["uma"], 1)
class TestProbabilityLexicon(ut.TestCase):
def test_gets_probability_of_token(self):
plex = ProbabilityLexicon()
plex.add("Duas palavras.")
self.assertEqual(plex.probability("duas"),0.5)
self.assertEqual(plex.probability("palavras"),0.5)
def test_gets_probability_dict(self):
plex = ProbabilityLexicon()
plex.add("Uma frase.")
pdict = plex.probability_dict()
self.assertIs(type(pdict), dict)
self.assertEqual(pdict["uma"], 0.5)
class TestComplexityLexicon(ut.TestCase):
def test_returns_null_when_token_not_found(self):
clex = ComplexityLexicon()
clex.add("Uma frase.")
self.assertTrue(clex.complexity("frase"))
self.assertFalse(clex.complexity("cachorro"))
def test_gets_complexity_dict(self):
clex = ComplexityLexicon()
clex.add("Uma frase.")
cdict = clex.complexity_dict()
self.assertIs(type(cdict), dict)
self.assertEqual(cdict["uma"], 1) | [
"chunker_dm.lexicon.Lexicon",
"chunker_dm.lexicon.ComplexityLexicon",
"chunker_dm.lexicon.ProbabilityLexicon"
] | [((192, 201), 'chunker_dm.lexicon.Lexicon', 'Lexicon', ([], {}), '()\n', (199, 201), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((333, 342), 'chunker_dm.lexicon.Lexicon', 'Lexicon', ([], {}), '()\n', (340, 342), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((566, 575), 'chunker_dm.lexicon.Lexicon', 'Lexicon', ([], {}), '()\n', (573, 575), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((749, 758), 'chunker_dm.lexicon.Lexicon', 'Lexicon', ([], {}), '()\n', (756, 758), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((993, 1013), 'chunker_dm.lexicon.ProbabilityLexicon', 'ProbabilityLexicon', ([], {}), '()\n', (1011, 1013), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((1207, 1227), 'chunker_dm.lexicon.ProbabilityLexicon', 'ProbabilityLexicon', ([], {}), '()\n', (1225, 1227), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((1479, 1498), 'chunker_dm.lexicon.ComplexityLexicon', 'ComplexityLexicon', ([], {}), '()\n', (1496, 1498), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n'), ((1674, 1693), 'chunker_dm.lexicon.ComplexityLexicon', 'ComplexityLexicon', ([], {}), '()\n', (1691, 1693), False, 'from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon\n')] |
"""
TODO docstring
"""
import os
from datetime import timedelta, datetime
from typing import Optional
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from fastapi import APIRouter, Depends, HTTPException, status
from tinydb import TinyDB, where
from passlib.hash import bcrypt
from jose import JWTError, jwt
from dotenv import load_dotenv
import requests
load_dotenv()
USERS_DB_PATH = "./data/DB/users.json"
USERS_DB = TinyDB(USERS_DB_PATH)
# ! using HS256 the secret key should be
# ! at least 32 chars but the longer the better (base64url-encode)
SECRET_KEY = os.getenv('SECRET_KEY_LOGIN_TOKEN')
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 1440 # 60x24 = 1day ,30
router = APIRouter(prefix="/api/auth", tags=["auth"])
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
async def get_current_user(token: str = Depends(oauth2_scheme)):
"""
TODO function docstring
"""
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username = payload.get("username")
if username is None:
raise credentials_exception
except JWTError as jwt_error:
raise credentials_exception from jwt_error
users = USERS_DB.search(where('username') == username)
if not users:
raise credentials_exception
return users[0]
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
"""
TODO function docstring
"""
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def validate_human(recaptcha_value: str):
"""
TODO function docstring
"""
secret_recaptcha_token = os.getenv('SECRET_KEY_GOOGLE_RECAPTCHA')
response = requests.post("https://www.google.com/recaptcha/api/" +
f"siteverify?secret={secret_recaptcha_token}&response={recaptcha_value}")
data = response.json()
return data["success"]
@router.post("/login")
async def generate_token(form_data: OAuth2PasswordRequestForm = Depends()):
"""
TODO function docstring
"""
# * validate ReCaptcha
if not validate_human(form_data.scopes[0]):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="BOT",
headers={"WWW-Authenticate": "Bearer"},
)
users_list = USERS_DB.search(where('username') == form_data.username)
if not users_list:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password!",
headers={"WWW-Authenticate": "Bearer"},
)
user_dict = users_list[0] # should be only one user with this username
if not bcrypt.verify(form_data.password, user_dict['hashed_password']):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password!",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"username": user_dict["username"]}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
| [
"tinydb.TinyDB",
"requests.post",
"fastapi.security.OAuth2PasswordBearer",
"os.getenv",
"fastapi.HTTPException",
"datetime.datetime.utcnow",
"jose.jwt.decode",
"passlib.hash.bcrypt.verify",
"dotenv.load_dotenv",
"jose.jwt.encode",
"fastapi.APIRouter",
"datetime.timedelta",
"tinydb.where",
... | [((403, 416), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (414, 416), False, 'from dotenv import load_dotenv\n'), ((471, 492), 'tinydb.TinyDB', 'TinyDB', (['USERS_DB_PATH'], {}), '(USERS_DB_PATH)\n', (477, 492), False, 'from tinydb import TinyDB, where\n'), ((619, 654), 'os.getenv', 'os.getenv', (['"""SECRET_KEY_LOGIN_TOKEN"""'], {}), "('SECRET_KEY_LOGIN_TOKEN')\n", (628, 654), False, 'import os\n'), ((745, 789), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/auth"""', 'tags': "['auth']"}), "(prefix='/api/auth', tags=['auth'])\n", (754, 789), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((809, 857), 'fastapi.security.OAuth2PasswordBearer', 'OAuth2PasswordBearer', ([], {'tokenUrl': '"""/api/auth/login"""'}), "(tokenUrl='/api/auth/login')\n", (829, 857), False, 'from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm\n'), ((901, 923), 'fastapi.Depends', 'Depends', (['oauth2_scheme'], {}), '(oauth2_scheme)\n', (908, 923), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((1002, 1143), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': '"""Could not validate credentials"""', 'headers': "{'WWW-Authenticate': 'Bearer'}"}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail=\n 'Could not validate credentials', headers={'WWW-Authenticate': 'Bearer'})\n", (1015, 1143), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((1959, 2013), 'jose.jwt.encode', 'jwt.encode', (['to_encode', 'SECRET_KEY'], {'algorithm': 'ALGORITHM'}), '(to_encode, SECRET_KEY, algorithm=ALGORITHM)\n', (1969, 2013), False, 'from jose import JWTError, jwt\n'), ((2162, 2202), 'os.getenv', 'os.getenv', (['"""SECRET_KEY_GOOGLE_RECAPTCHA"""'], {}), "('SECRET_KEY_GOOGLE_RECAPTCHA')\n", (2171, 2202), False, 'import os\n'), ((2219, 2352), 'requests.post', 'requests.post', (["('https://www.google.com/recaptcha/api/' +\n f'siteverify?secret={secret_recaptcha_token}&response={recaptcha_value}')"], {}), "('https://www.google.com/recaptcha/api/' +\n f'siteverify?secret={secret_recaptcha_token}&response={recaptcha_value}')\n", (2232, 2352), False, 'import requests\n'), ((2505, 2514), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (2512, 2514), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((3515, 3561), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'ACCESS_TOKEN_EXPIRE_MINUTES'}), '(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n', (3524, 3561), False, 'from datetime import timedelta, datetime\n'), ((1203, 1256), 'jose.jwt.decode', 'jwt.decode', (['token', 'SECRET_KEY'], {'algorithms': '[ALGORITHM]'}), '(token, SECRET_KEY, algorithms=[ALGORITHM])\n', (1213, 1256), False, 'from jose import JWTError, jwt\n'), ((2656, 2769), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': '"""BOT"""', 'headers': "{'WWW-Authenticate': 'Bearer'}"}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail='BOT',\n headers={'WWW-Authenticate': 'Bearer'})\n", (2669, 2769), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((2935, 3077), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': '"""Incorrect username or password!"""', 'headers': "{'WWW-Authenticate': 'Bearer'}"}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail=\n 'Incorrect username or password!', headers={'WWW-Authenticate': 'Bearer'})\n", (2948, 3077), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((3216, 3279), 'passlib.hash.bcrypt.verify', 'bcrypt.verify', (['form_data.password', "user_dict['hashed_password']"], {}), "(form_data.password, user_dict['hashed_password'])\n", (3229, 3279), False, 'from passlib.hash import bcrypt\n'), ((3296, 3438), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': '"""Incorrect username or password!"""', 'headers': "{'WWW-Authenticate': 'Bearer'}"}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail=\n 'Incorrect username or password!', headers={'WWW-Authenticate': 'Bearer'})\n", (3309, 3438), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((1488, 1505), 'tinydb.where', 'where', (['"""username"""'], {}), "('username')\n", (1493, 1505), False, 'from tinydb import TinyDB, where\n'), ((1796, 1813), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1811, 1813), False, 'from datetime import timedelta, datetime\n'), ((1859, 1876), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1874, 1876), False, 'from datetime import timedelta, datetime\n'), ((1879, 1900), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (1888, 1900), False, 'from datetime import timedelta, datetime\n'), ((2853, 2870), 'tinydb.where', 'where', (['"""username"""'], {}), "('username')\n", (2858, 2870), False, 'from tinydb import TinyDB, where\n')] |
from __future__ import absolute_import, division, print_function
from abc import abstractmethod, abstractproperty
import numpy as np
from keras import __version__ as __keras_version__
from keras import backend as K
from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed
from keras.models import Model
from keras.regularizers import l2
from energyflow.archs.archbase import NNBase, _get_act_layer
from energyflow.utils import iter_or_rep
__all__ = [
# input constructor functions
#'construct_efn_input', 'construct_pfn_input',
# weight mask constructor functions
#'construct_efn_weight_mask', 'construct_pfn_weight_mask',
# network consstructor functions
#'construct_distributed_dense', 'construct_latent', 'construct_dense',
# full model classes
'EFN', 'PFN'
]
###############################################################################
# Keras 2.2.5 fixes bug in 2.2.4 that affects our usage of the Dot layer
###############################################################################
keras_version_tuple = tuple(map(int, __keras_version__.split('.')))
DOT_AXIS = 0 if keras_version_tuple <= (2, 2, 4) else 1
###############################################################################
# INPUT FUNCTIONS
###############################################################################
def construct_efn_input(input_dim, zs_name=None, phats_name=None):
# construct input tensors
zs_input = Input(batch_shape=(None, None), name=zs_name)
phats_input = Input(batch_shape=(None, None, input_dim), name=phats_name)
return [zs_input, phats_input]
def construct_pfn_input(input_dim, name=None):
# construct input tensor
return [Input(batch_shape=(None, None, input_dim), name=name)]
###############################################################################
# WEIGHT MASK FUNCTIONS
###############################################################################
def construct_efn_weight_mask(input_tensor, mask_val=0., name=None):
""""""
# define a function which maps the given mask_val to zero
def efn_mask_func(X, mask_val=mask_val):
# map mask_val to zero and leave everything else alone
return X * K.cast(K.not_equal(X, mask_val), K.dtype(X))
mask_layer = Lambda(efn_mask_func, name=name)
# return as lists for consistency
return [mask_layer], [mask_layer(input_tensor)]
def construct_pfn_weight_mask(input_tensor, mask_val=0., name=None):
""""""
# define a function which maps the given mask_val to zero
def pfn_mask_func(X, mask_val=mask_val):
# map mask_val to zero and return 1 elsewhere
return K.cast(K.any(K.not_equal(X, mask_val), axis=-1), K.dtype(X))
mask_layer = Lambda(pfn_mask_func, name=name)
# return as lists for consistency
return [mask_layer], [mask_layer(input_tensor)]
###############################################################################
# NETWORK FUNCTIONS
###############################################################################
def construct_distributed_dense(input_tensor, sizes, acts='relu', k_inits='he_uniform',
names=None, l2_regs=0.):
""""""
# repeat options if singletons
acts, k_inits, names = iter_or_rep(acts), iter_or_rep(k_inits), iter_or_rep(names)
l2_regs = iter_or_rep(l2_regs)
# list of tensors
layers, tensors = [], [input_tensor]
# iterate over specified layers
for s, act, k_init, name, l2_reg in zip(sizes, acts, k_inits, names, l2_regs):
# define a dense layer that will be applied through time distributed
kwargs = {}
if l2_reg > 0.:
kwargs.update({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)})
d_layer = Dense(s, kernel_initializer=k_init, **kwargs)
# get layers and append them to list
tdist_layer = TimeDistributed(d_layer, name=name)
act_layer = _get_act_layer(act)
layers.extend([tdist_layer, act_layer])
# get tensors and append them to list
tensors.append(tdist_layer(tensors[-1]))
tensors.append(act_layer(tensors[-1]))
return layers, tensors
def construct_latent(input_tensor, weight_tensor, dropout=0., name=None):
""""""
# lists of layers and tensors
layers = [Dot(DOT_AXIS, name=name)]
tensors = [layers[-1]([weight_tensor, input_tensor])]
# apply dropout if specified
if dropout > 0.:
dr_name = None if name is None else '{}_dropout'.format(name)
layers.append(Dropout(dropout, name=dr_name))
tensors.append(layers[-1](tensors[-1]))
return layers, tensors
def construct_dense(input_tensor, sizes,
acts='relu', k_inits='he_uniform',
dropouts=0., l2_regs=0.,
names=None):
""""""
# repeat options if singletons
acts, k_inits, names = iter_or_rep(acts), iter_or_rep(k_inits), iter_or_rep(names)
dropouts, l2_regs = iter_or_rep(dropouts), iter_or_rep(l2_regs)
# lists of layers and tensors
layers, tensors = [], [input_tensor]
# iterate to make specified layers
z = zip(sizes, acts, k_inits, dropouts, l2_regs, names)
for s, act, k_init, dropout, l2_reg, name in z:
# get layers and append them to list
kwargs = ({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)}
if l2_reg > 0. else {})
dense_layer = Dense(s, kernel_initializer=k_init, name=name, **kwargs)
act_layer = _get_act_layer(act)
layers.extend([dense_layer, act_layer])
# get tensors and append them to list
tensors.append(dense_layer(tensors[-1]))
tensors.append(act_layer(tensors[-1]))
# apply dropout if specified
if dropout > 0.:
dr_name = None if name is None else '{}_dropout'.format(name)
layers.append(Dropout(dropout, name=dr_name))
tensors.append(layers[-1](tensors[-1]))
return layers, tensors
###############################################################################
# SymmetricPerParticleNN - Base class for EFN-like models
###############################################################################
class SymmetricPerParticleNN(NNBase):
# EFN(*args, **kwargs)
def _process_hps(self):
r"""See [`ArchBase`](#archbase) for how to pass in hyperparameters as
well as defaults common to all EnergyFlow neural network models.
**Required EFN Hyperparameters**
- **input_dim** : _int_
- The number of features for each particle.
- **Phi_sizes** (formerly `ppm_sizes`) : {_tuple_, _list_} of _int_
- The sizes of the dense layers in the per-particle frontend
module $\Phi$. The last element will be the number of latent
observables that the model defines.
- **F_sizes** (formerly `dense_sizes`) : {_tuple_, _list_} of _int_
- The sizes of the dense layers in the backend module $F$.
**Default EFN Hyperparameters**
- **Phi_acts**=`'relu'` (formerly `ppm_acts`) : {_tuple_, _list_} of
_str_ or Keras activation
- Activation functions(s) for the dense layers in the
per-particle frontend module $\Phi$. A single string or activation
layer will apply the same activation to all layers. Keras advanced
activation layers are also accepted, either as strings (which use
the default arguments) or as Keras `Layer` instances. If passing a
single `Layer` instance, be aware that this layer will be used for
all activations and may introduce weight sharing (such as with
`PReLU`); it is recommended in this case to pass as many activations
as there are layers in the model. See the [Keras activations
docs](https://keras.io/activations/) for more detail.
- **F_acts**=`'relu'` (formerly `dense_acts`) : {_tuple_, _list_} of
_str_ or Keras activation
- Activation functions(s) for the dense layers in the
backend module $F$. A single string or activation layer will apply
the same activation to all layers.
- **Phi_k_inits**=`'he_uniform'` (formerly `ppm_k_inits`) : {_tuple_,
_list_} of _str_ or Keras initializer
- Kernel initializers for the dense layers in the per-particle
frontend module $\Phi$. A single string will apply the same
initializer to all layers. See the [Keras initializer docs](https:
//keras.io/initializers/) for more detail.
- **F_k_inits**=`'he_uniform'` (formerly `dense_k_inits`) : {_tuple_,
_list_} of _str_ or Keras initializer
- Kernel initializers for the dense layers in the backend
module $F$. A single string will apply the same initializer
to all layers.
- **latent_dropout**=`0` : _float_
- Dropout rates for the summation layer that defines the
value of the latent observables on the inputs. See the [Keras
Dropout layer](https://keras.io/layers/core/#dropout) for more
detail.
- **F_dropouts**=`0` (formerly `dense_dropouts`) : {_tuple_, _list_}
of _float_
- Dropout rates for the dense layers in the backend module $F$.
A single float will apply the same dropout rate to all dense layers.
- **Phi_l2_regs**=`0` : {_tuple_, _list_} of _float_
- $L_2$-regulatization strength for both the weights and biases
of the layers in the $\Phi$ network. A single float will apply the
same $L_2$-regulatization to all layers.
- **F_l2_regs**=`0` : {_tuple_, _list_} of _float_
- $L_2$-regulatization strength for both the weights and biases
of the layers in the $F$ network. A single float will apply the
same $L_2$-regulatization to all layers.
- **mask_val**=`0` : _float_
- The value for which particles with all features set equal to
this value will be ignored. The [Keras Masking layer](https://
keras.io/layers/core/#masking) appears to have issues masking
the biases of a network, so this has been implemented in a
custom (and correct) manner since version `0.12.0`.
"""
# process generic NN hps
super(SymmetricPerParticleNN, self)._process_hps()
# required hyperparameters
self.input_dim = self._proc_arg('input_dim')
self.Phi_sizes = self._proc_arg('Phi_sizes', old='ppm_sizes')
self.F_sizes = self._proc_arg('F_sizes', old='dense_sizes')
# activations
self.Phi_acts = iter_or_rep(self._proc_arg('Phi_acts', default='relu',
old='ppm_acts'))
self.F_acts = iter_or_rep(self._proc_arg('F_acts', default='relu',
old='dense_acts'))
# initializations
self.Phi_k_inits = iter_or_rep(self._proc_arg('Phi_k_inits', default='he_uniform',
old='ppm_k_inits'))
self.F_k_inits = iter_or_rep(self._proc_arg('F_k_inits', default='he_uniform',
old='dense_k_inits'))
# regularizations
self.latent_dropout = self._proc_arg('latent_dropout', default=0.)
self.F_dropouts = iter_or_rep(self._proc_arg('F_dropouts', default=0.,
old='dense_dropouts'))
self.Phi_l2_regs = iter_or_rep(self._proc_arg('Phi_l2_regs', default=0.))
self.F_l2_regs = iter_or_rep(self._proc_arg('F_l2_regs', default=0.))
# masking
self.mask_val = self._proc_arg('mask_val', default=0.)
self._verify_empty_hps()
def _construct_model(self):
# initialize dictionaries for holding indices of subnetworks
self._layer_inds, self._tensor_inds = {}, {}
# construct earlier parts of the model
self._construct_inputs()
self._construct_Phi()
self._construct_latent()
self._construct_F()
# get output layers
d_layer = Dense(self.output_dim, name=self._proc_name('output'))
act_layer = _get_act_layer(self.output_act)
# append output tensors
self._tensors.append(d_layer(self.tensors[-1]))
self._tensors.append(act_layer(self.tensors[-1]))
# construct a new model
self._model = Model(inputs=self.inputs, outputs=self.output)
# compile model
self._compile_model()
@abstractmethod
def _construct_inputs(self):
pass
def _construct_Phi(self):
# get names
names = [self._proc_name('tdist_{}'.format(i)) for i in range(len(self.Phi_sizes))]
# determine begin inds
layer_inds, tensor_inds = [len(self.layers)], [len(self.tensors)]
# construct Phi
Phi_layers, Phi_tensors = construct_distributed_dense(self.inputs[-1], self.Phi_sizes,
acts=self.Phi_acts,
k_inits=self.Phi_k_inits,
names=names,
l2_regs=self.Phi_l2_regs)
# add layers and tensors to internal lists
self._layers.extend(Phi_layers)
self._tensors.extend(Phi_tensors)
# determine end inds
layer_inds.append(len(self.layers))
tensor_inds.append(len(self.tensors))
# store inds
self._layer_inds['Phi'] = layer_inds
self._tensor_inds['Phi'] = tensor_inds
def _construct_latent(self):
# determine begin inds
layer_inds, tensor_inds = [len(self.layers)], [len(self.tensors)]
# construct latent tensors
latent_layers, latent_tensors = construct_latent(self._tensors[-1], self.weights,
dropout=self.latent_dropout,
name=self._proc_name('sum'))
# add layers and tensors to internal lists
self._layers.extend(latent_layers)
self._tensors.extend(latent_tensors)
# determine end inds
layer_inds.append(len(self.layers))
tensor_inds.append(len(self.tensors))
# store inds
self._layer_inds['latent'] = layer_inds
self._tensor_inds['latent'] = tensor_inds
def _construct_F(self):
# get names
names = [self._proc_name('dense_{}'.format(i)) for i in range(len(self.F_sizes))]
# determine begin inds
layer_inds, tensor_inds = [len(self.layers)], [len(self.tensors)]
# construct F
F_layers, F_tensors = construct_dense(self.latent[-1], self.F_sizes,
acts=self.F_acts, k_inits=self.F_k_inits,
dropouts=self.F_dropouts, names=names,
l2_regs=self.F_l2_regs)
# add layers and tensors to internal lists
self._layers.extend(F_layers)
self._tensors.extend(F_tensors)
# determine end inds
layer_inds.append(len(self.layers))
tensor_inds.append(len(self.tensors))
# store inds
self._layer_inds['F'] = layer_inds
self._tensor_inds['F'] = tensor_inds
@abstractproperty
def inputs(self):
pass
@abstractproperty
def weights(self):
pass
@property
def Phi(self):
r"""List of tensors corresponding to the layers in the $\Phi$ network."""
begin, end = self._tensor_inds['Phi']
return self._tensors[begin:end]
@property
def latent(self):
"""List of tensors corresponding to the summation layer in the
network, including any dropout layer if present.
"""
begin, end = self._tensor_inds['latent']
return self._tensors[begin:end]
@property
def F(self):
"""List of tensors corresponding to the layers in the $F$ network."""
begin, end = self._tensor_inds['F']
return self._tensors[begin:end]
@property
def output(self):
"""Output tensor for the model."""
return self._tensors[-1]
@property
def layers(self):
"""List of all layers in the model."""
return self._layers
@property
def tensors(self):
"""List of all tensors in the model."""
return self._tensors
###############################################################################
# EFN - Energy flow network class
###############################################################################
class EFN(SymmetricPerParticleNN):
"""Energy Flow Network (EFN) architecture."""
def _construct_inputs(self):
# construct input tensors
self._inputs = construct_efn_input(self.input_dim,
zs_name=self._proc_name('zs_input'),
phats_name=self._proc_name('phats_input'))
# construct weight tensor
mask_layers, mask_tensors = construct_efn_weight_mask(self.inputs[0],
mask_val=self.mask_val,
name=self._proc_name('mask'))
self._weights = mask_tensors[0]
# begin list of tensors with the inputs
self._tensors = [self.inputs, self.weights]
# begin list of layers with the mask layer
self._layers = [mask_layers[0]]
@property
def inputs(self):
"""List of input tensors to the model. EFNs have two input tensors:
`inputs[0]` corresponds to the `zs` input and `inputs[1]` corresponds
to the `phats` input.
"""
return self._inputs
@property
def weights(self):
"""Weight tensor for the model. This is the `zs` input where entries
equal to `mask_val` have been set to zero.
"""
return self._weights
# eval_filters(patch, n=100, prune=True)
def eval_filters(self, patch, n=100, prune=True):
"""Evaluates the latent space filters of this model on a patch of the
two-dimensional geometric input space.
**Arguments**
- **patch** : {_tuple_, _list_} of _float_
- Specifies the patch of the geometric input space to be evaluated.
A list of length 4 is interpretted as `[xmin, ymin, xmax, ymax]`.
Passing a single float `R` is equivalent to `[-R,-R,R,R]`.
- **n** : {_tuple_, _list_} of _int_
- The number of grid points on which to evaluate the filters. A list
of length 2 is interpretted as `[nx, ny]` where `nx` is the number of
points along the x (or first) dimension and `ny` is the number of points
along the y (or second) dimension.
- **prune** : _bool_
- Whether to remove filters that are all zero (which happens sometimes
due to dying ReLUs).
**Returns**
- (_numpy.ndarray_, _numpy.ndarray_, _numpy.ndarray_)
- Returns three arrays, `(X, Y, Z)`, where `X` and `Y` have shape `(nx, ny)`
and are arrays of the values of the geometric inputs in the specified patch.
`Z` has shape `(num_filters, nx, ny)` and is the value of the different
filters at each point.
"""
# determine patch of xy space to evaluate filters on
if isinstance(patch, (float, int)):
if patch > 0:
xmin, ymin, xmax, ymax = -patch, -patch, patch, patch
else:
ValueError('patch must be positive when passing as a single number.')
else:
xmin, ymin, xmax, ymax = patch
# determine number of pixels in each dimension
if isinstance(n, int):
nx = ny = n
else:
nx, ny = n
# construct grid of inputs
xs, ys = np.linspace(xmin, xmax, nx), np.linspace(ymin, ymax, ny)
X, Y = np.meshgrid(xs, ys, indexing='ij')
XY = np.asarray([X, Y]).reshape((1, 2, nx*ny)).transpose((0, 2, 1))
# handle weirdness of Keras/tensorflow
old_keras = (keras_version_tuple <= (2, 2, 5))
s = self.Phi_sizes[-1] if len(self.Phi_sizes) else self.input_dim
in_t, out_t = self.inputs[1], self._tensors[self._tensor_inds['latent'][0] - 1]
# construct function
kf = K.function([in_t] if old_keras else in_t, [out_t] if old_keras else out_t)
# evaluate function
Z = kf([XY] if old_keras else XY)[0].reshape(nx, ny, s).transpose((2, 0, 1))
# prune filters that are off
if prune:
return X, Y, Z[[not (z == 0).all() for z in Z]]
return X, Y, Z
###############################################################################
# PFN - Particle flow network class
###############################################################################
class PFN(SymmetricPerParticleNN):
"""Particle Flow Network (PFN) architecture. Accepts the same
hyperparameters as the [`EFN`](#EFN)."""
# PFN(*args, **kwargs)
def _construct_inputs(self):
"""""" # need this for autogen docs
# construct input tensor
self._inputs = construct_pfn_input(self.input_dim, name=self._proc_name('input'))
# construct weight tensor
mask_layers, mask_tensors = construct_pfn_weight_mask(self.inputs[0],
mask_val=self.mask_val,
name=self._proc_name('mask'))
self._weights = mask_tensors[0]
# begin list of tensors with the inputs
self._tensors = [self.inputs, self.weights]
# begin list of layers with the mask layer
self._layers = [mask_layers[0]]
@property
def inputs(self):
"""List of input tensors to the model. PFNs have one input tensor
corresponding to the `ps` input.
"""
return self._inputs
@property
def weights(self):
"""Weight tensor for the model. A weight of `0` is assigned to any
particle which has all features equal to `mask_val`, and `1` is
assigned otherwise.
"""
return self._weights
| [
"keras.backend.dtype",
"energyflow.archs.archbase._get_act_layer",
"keras.backend.function",
"keras.layers.Lambda",
"keras.__version__.split",
"keras.layers.TimeDistributed",
"keras.layers.Dot",
"keras.backend.not_equal",
"numpy.asarray",
"keras.layers.Input",
"numpy.linspace",
"energyflow.uti... | [((1477, 1522), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None)', 'name': 'zs_name'}), '(batch_shape=(None, None), name=zs_name)\n', (1482, 1522), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((1541, 1600), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, input_dim)', 'name': 'phats_name'}), '(batch_shape=(None, None, input_dim), name=phats_name)\n', (1546, 1600), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((2311, 2343), 'keras.layers.Lambda', 'Lambda', (['efn_mask_func'], {'name': 'name'}), '(efn_mask_func, name=name)\n', (2317, 2343), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((2773, 2805), 'keras.layers.Lambda', 'Lambda', (['pfn_mask_func'], {'name': 'name'}), '(pfn_mask_func, name=name)\n', (2779, 2805), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3408, 3428), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['l2_regs'], {}), '(l2_regs)\n', (3419, 3428), False, 'from energyflow.utils import iter_or_rep\n'), ((1097, 1125), 'keras.__version__.split', '__keras_version__.split', (['"""."""'], {}), "('.')\n", (1120, 1125), True, 'from keras import __version__ as __keras_version__\n'), ((1727, 1780), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, input_dim)', 'name': 'name'}), '(batch_shape=(None, None, input_dim), name=name)\n', (1732, 1780), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3334, 3351), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['acts'], {}), '(acts)\n', (3345, 3351), False, 'from energyflow.utils import iter_or_rep\n'), ((3353, 3373), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['k_inits'], {}), '(k_inits)\n', (3364, 3373), False, 'from energyflow.utils import iter_or_rep\n'), ((3375, 3393), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['names'], {}), '(names)\n', (3386, 3393), False, 'from energyflow.utils import iter_or_rep\n'), ((3860, 3905), 'keras.layers.Dense', 'Dense', (['s'], {'kernel_initializer': 'k_init'}), '(s, kernel_initializer=k_init, **kwargs)\n', (3865, 3905), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3974, 4009), 'keras.layers.TimeDistributed', 'TimeDistributed', (['d_layer'], {'name': 'name'}), '(d_layer, name=name)\n', (3989, 4009), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((4030, 4049), 'energyflow.archs.archbase._get_act_layer', '_get_act_layer', (['act'], {}), '(act)\n', (4044, 4049), False, 'from energyflow.archs.archbase import NNBase, _get_act_layer\n'), ((4404, 4428), 'keras.layers.Dot', 'Dot', (['DOT_AXIS'], {'name': 'name'}), '(DOT_AXIS, name=name)\n', (4407, 4428), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((4999, 5016), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['acts'], {}), '(acts)\n', (5010, 5016), False, 'from energyflow.utils import iter_or_rep\n'), ((5018, 5038), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['k_inits'], {}), '(k_inits)\n', (5029, 5038), False, 'from energyflow.utils import iter_or_rep\n'), ((5040, 5058), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['names'], {}), '(names)\n', (5051, 5058), False, 'from energyflow.utils import iter_or_rep\n'), ((5083, 5104), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['dropouts'], {}), '(dropouts)\n', (5094, 5104), False, 'from energyflow.utils import iter_or_rep\n'), ((5106, 5126), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['l2_regs'], {}), '(l2_regs)\n', (5117, 5126), False, 'from energyflow.utils import iter_or_rep\n'), ((5551, 5607), 'keras.layers.Dense', 'Dense', (['s'], {'kernel_initializer': 'k_init', 'name': 'name'}), '(s, kernel_initializer=k_init, name=name, **kwargs)\n', (5556, 5607), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((5628, 5647), 'energyflow.archs.archbase._get_act_layer', '_get_act_layer', (['act'], {}), '(act)\n', (5642, 5647), False, 'from energyflow.archs.archbase import NNBase, _get_act_layer\n'), ((12561, 12592), 'energyflow.archs.archbase._get_act_layer', '_get_act_layer', (['self.output_act'], {}), '(self.output_act)\n', (12575, 12592), False, 'from energyflow.archs.archbase import NNBase, _get_act_layer\n'), ((12795, 12841), 'keras.models.Model', 'Model', ([], {'inputs': 'self.inputs', 'outputs': 'self.output'}), '(inputs=self.inputs, outputs=self.output)\n', (12800, 12841), False, 'from keras.models import Model\n'), ((20572, 20606), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {'indexing': '"""ij"""'}), "(xs, ys, indexing='ij')\n", (20583, 20606), True, 'import numpy as np\n'), ((20991, 21065), 'keras.backend.function', 'K.function', (['([in_t] if old_keras else in_t)', '([out_t] if old_keras else out_t)'], {}), '([in_t] if old_keras else in_t, [out_t] if old_keras else out_t)\n', (21001, 21065), True, 'from keras import backend as K\n'), ((2743, 2753), 'keras.backend.dtype', 'K.dtype', (['X'], {}), '(X)\n', (2750, 2753), True, 'from keras import backend as K\n'), ((4635, 4665), 'keras.layers.Dropout', 'Dropout', (['dropout'], {'name': 'dr_name'}), '(dropout, name=dr_name)\n', (4642, 4665), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((20500, 20527), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (20511, 20527), True, 'import numpy as np\n'), ((20529, 20556), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'ny'], {}), '(ymin, ymax, ny)\n', (20540, 20556), True, 'import numpy as np\n'), ((2255, 2279), 'keras.backend.not_equal', 'K.not_equal', (['X', 'mask_val'], {}), '(X, mask_val)\n', (2266, 2279), True, 'from keras import backend as K\n'), ((2281, 2291), 'keras.backend.dtype', 'K.dtype', (['X'], {}), '(X)\n', (2288, 2291), True, 'from keras import backend as K\n'), ((2707, 2731), 'keras.backend.not_equal', 'K.not_equal', (['X', 'mask_val'], {}), '(X, mask_val)\n', (2718, 2731), True, 'from keras import backend as K\n'), ((5442, 5452), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (5444, 5452), False, 'from keras.regularizers import l2\n'), ((5474, 5484), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (5476, 5484), False, 'from keras.regularizers import l2\n'), ((6002, 6032), 'keras.layers.Dropout', 'Dropout', (['dropout'], {'name': 'dr_name'}), '(dropout, name=dr_name)\n', (6009, 6032), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3797, 3807), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (3799, 3807), False, 'from keras.regularizers import l2\n'), ((3829, 3839), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (3831, 3839), False, 'from keras.regularizers import l2\n'), ((20620, 20638), 'numpy.asarray', 'np.asarray', (['[X, Y]'], {}), '([X, Y])\n', (20630, 20638), True, 'import numpy as np\n')] |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='repo for https://www.kaggle.com/c/house-prices-advanced-regression-techniques',
author='<NAME>',
license='MIT',
)
| [
"setuptools.find_packages"
] | [((81, 96), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (94, 96), False, 'from setuptools import find_packages, setup\n')] |
import itertools
def find_nb(m):
sum = 0
for i in itertools.count(1):
sum += i ** 3
if sum == m:
return i
elif sum > m:
return -1 | [
"itertools.count"
] | [((59, 77), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (74, 77), False, 'import itertools\n')] |
# Generated by Django 2.2.4 on 2019-12-03 19:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scholar', '0003_auto_20191203_0243'),
]
operations = [
migrations.AlterModelOptions(
name='scholar',
options={'ordering': ['award_amount']},
),
migrations.AlterField(
model_name='scholar',
name='award_amount',
field=models.IntegerField(default=0),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.IntegerField"
] | [((235, 324), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""scholar"""', 'options': "{'ordering': ['award_amount']}"}), "(name='scholar', options={'ordering': [\n 'award_amount']})\n", (263, 324), False, 'from django.db import migrations, models\n'), ((472, 502), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (491, 502), False, 'from django.db import migrations, models\n')] |
# coding: utf-8
from os import path
from . import directives
from . import __version__
def setup(app):
app.add_html_theme("sphinx_scality", path.abspath(path.dirname(__file__)))
directives.setup(app)
return {
"version": __version__.VERSION,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| [
"os.path.dirname"
] | [((161, 183), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'from os import path\n')] |
import tensorflow as tf
import numpy as np
from blackbox_mpc.optimizers.optimizer_base import OptimizerBase
class PSOOptimizer(OptimizerBase):
def __init__(self, env_action_space, env_observation_space,
planning_horizon=50, max_iterations=5, population_size=500,
num_agents=5, c1=tf.constant(0.3, dtype=tf.float32),
c2=tf.constant(0.5, dtype=tf.float32), w=tf.constant(0.2, dtype=tf.float32),
initial_velocity_fraction=tf.constant(0.01, dtype=tf.float32)):
"""
This class defines the particle swarm optimizer.
(https://www.cs.tufts.edu/comp/150GA/homeworks/hw3/_reading6%201995%20particle%20swarming.pdf)
Parameters
---------
env_action_space: gym.ActionSpace
Defines the action space of the gym environment.
env_observation_space: gym.ObservationSpace
Defines the observation space of the gym environment.
planning_horizon: Int
Defines the planning horizon for the optimizer (how many steps to lookahead and optimize for).
max_iterations: tf.int32
Defines the maximimum iterations for the CMAES optimizer to refine its guess for the optimal solution.
population_size: tf.int32
Defines the population size of the particles evaluated at each iteration.
num_agents: tf.int32
Defines the number of runner running in parallel
c1: tf.float32
Defines the fraction of the local best known position direction.
c2: tf.float32
Defines the fraction of the global best known position direction.
w: tf.float32
Defines the fraction of the current velocity to use.
initial_velocity_fraction: tf.float32
Defines the initial velocity fraction out of the action space.
"""
super(PSOOptimizer, self).__init__(name=None,
planning_horizon=planning_horizon,
max_iterations=max_iterations,
num_agents=num_agents,
env_action_space=env_action_space,
env_observation_space=
env_observation_space)
self._solution_dim = [self._num_agents, tf.constant(self._planning_horizon, dtype=tf.int32), self._dim_U]
self._solution_size = tf.reduce_prod(self._solution_dim)
self._population_size = population_size
self._particle_positions = tf.Variable(tf.zeros([self._population_size, *self._solution_dim], dtype=tf.float32))
self._particle_velocities = tf.Variable(tf.zeros([self._population_size, *self._solution_dim], dtype=tf.float32))
self._particle_best_known_position = tf.Variable(tf.zeros([self._population_size, *self._solution_dim],
dtype=tf.float32))
self._particle_best_known_reward = tf.Variable(tf.zeros([self._population_size, self._num_agents],
dtype=tf.float32))
#global
self._global_best_known_position = tf.Variable(tf.zeros([*self._solution_dim], dtype=tf.float32))
self._global_best_known_reward = tf.Variable(tf.zeros([self._num_agents], dtype=tf.float32))
solution_variance_values = np.tile(np.square(self._action_lower_bound - self._action_upper_bound) / 16,
[self._planning_horizon * self._num_agents, 1])
solution_variance_values = solution_variance_values.reshape([self._num_agents, self._planning_horizon, -1])
self._solution_variance = tf.constant(solution_variance_values, dtype=tf.float32)
self._c1 = c1
self._c2 = c2
self._w = w
self._initial_velocity_fraction = initial_velocity_fraction
self._solution = tf.Variable(tf.zeros([self._num_agents, self._dim_U], dtype=tf.float32))
@tf.function
def _optimize(self, current_state, time_step):
def continue_condition(t, position):
result = tf.less(t, self._max_iterations)
return result
def iterate(t, position):
#evaluate each of the particles
# Evaluate and sort solutions
feasible_particle_positions = tf.clip_by_value(self._particle_positions, self._action_lower_bound_horizon,
self._action_upper_bound_horizon)
penalty = tf.norm(tf.reshape(self._particle_positions - feasible_particle_positions, [self._population_size, self._num_agents, -1]),
axis=2) ** 2
self._particle_positions.assign(feasible_particle_positions)
rewards = self._trajectory_evaluator(current_state, self._particle_positions, time_step) - penalty
#set the best local known position
condition = tf.less(self._particle_best_known_reward, rewards)
new_particle_best_known_position = tf.where(tf.expand_dims(tf.expand_dims(condition, -1), -1), self._particle_positions,
self._particle_best_known_position)
self._particle_best_known_position.assign(new_particle_best_known_position)
new_particle_best_known_reward = tf.where(condition, rewards,
self._particle_best_known_reward)
self._particle_best_known_reward.assign(new_particle_best_known_reward)
#get the global best now
global_best_known_position_index = tf.math.argmax(self._particle_best_known_reward)
samples = tf.transpose(self._particle_best_known_position, [1, 0, 2, 3])
global_best_known_position_index = tf.cast(global_best_known_position_index, dtype=tf.int32) + tf.range(0, samples.shape[0], dtype=tf.int32) * samples.shape[1]
samples = tf.reshape(samples, [-1, *samples.shape[2:]])
self._global_best_known_position.assign(tf.gather(samples, global_best_known_position_index))
samples = tf.reshape(self._particle_best_known_reward, [-1])
self._global_best_known_reward.assign(tf.gather(samples, global_best_known_position_index))
#calculate the velocity now
adapted_particle_velocities = (self._particle_velocities * self._w) + \
(self._particle_best_known_position - self._particle_positions) * self._c1 * tf.random.normal(shape=[], dtype=tf.float32) + \
(self._global_best_known_position - self._particle_positions) * self._c2 * tf.random.normal(shape=[], dtype=tf.float32)
self._particle_velocities.assign(adapted_particle_velocities)
self._particle_positions.assign(self._particle_positions + self._particle_velocities)
return t + tf.constant(1, dtype=tf.int32), self._global_best_known_position
_ = tf.while_loop(cond=continue_condition, body=iterate, loop_vars=[tf.constant(0, dtype=tf.int32), self._global_best_known_position])
self._solution.assign(self._global_best_known_position[:, 0, :])
# update the particles position for the next iteration
lower_bound_dist = self._global_best_known_position - self._action_lower_bound_horizon
upper_bound_dist = self._action_upper_bound_horizon - self._global_best_known_position
constrained_variance = tf.minimum(tf.minimum(tf.square(lower_bound_dist / tf.constant(2, dtype=tf.float32)),
tf.square(upper_bound_dist / tf.constant(2, dtype=tf.float32))),
self._solution_variance)
samples_positions = tf.random.truncated_normal([self._population_size,
*self._solution_dim],
tf.concat([self._global_best_known_position[:, 1:],
tf.expand_dims(self._global_best_known_position[:, -1],
1)], 1),
tf.sqrt(constrained_variance),
dtype=tf.float32)
action_space = self._action_upper_bound_horizon - self._action_lower_bound_horizon
initial_velocity = self._initial_velocity_fraction * action_space
samples_velocities = tf.random.uniform([self._population_size, *self._solution_dim], -initial_velocity,
initial_velocity, dtype=tf.float32)
self._particle_positions.assign(samples_positions)
self._particle_velocities.assign(samples_velocities)
self._particle_best_known_position.assign(samples_positions)
self._particle_best_known_reward.assign(tf.fill([self._population_size, self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
self._global_best_known_reward.assign(tf.fill([self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
#end update particles
resulting_action = self._solution
return resulting_action
def reset(self):
"""
This method resets the optimizer to its default state at the beginning of the trajectory/episode.
"""
samples_positions = tf.random.uniform([self._population_size, *self._solution_dim], self._action_lower_bound_horizon,
self._action_upper_bound_horizon, dtype=tf.float32)
action_space = self._action_upper_bound_horizon - self._action_lower_bound_horizon
initial_velocity = self._initial_velocity_fraction * action_space
samples_velocities = tf.random.uniform([self._population_size, *self._solution_dim], -initial_velocity,
initial_velocity, dtype=tf.float32)
self._particle_positions.assign(samples_positions)
self._particle_velocities.assign(samples_velocities)
self._particle_best_known_position.assign(samples_positions)
self._particle_best_known_reward.assign(tf.fill([self._population_size, self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
self._global_best_known_reward.assign(tf.fill([self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
return
| [
"tensorflow.random.uniform",
"tensorflow.math.argmax",
"tensorflow.random.normal",
"tensorflow.reduce_prod",
"tensorflow.transpose",
"numpy.square",
"tensorflow.range",
"tensorflow.where",
"tensorflow.sqrt",
"tensorflow.constant",
"tensorflow.clip_by_value",
"tensorflow.gather",
"tensorflow.... | [((320, 354), 'tensorflow.constant', 'tf.constant', (['(0.3)'], {'dtype': 'tf.float32'}), '(0.3, dtype=tf.float32)\n', (331, 354), True, 'import tensorflow as tf\n'), ((376, 410), 'tensorflow.constant', 'tf.constant', (['(0.5)'], {'dtype': 'tf.float32'}), '(0.5, dtype=tf.float32)\n', (387, 410), True, 'import tensorflow as tf\n'), ((414, 448), 'tensorflow.constant', 'tf.constant', (['(0.2)'], {'dtype': 'tf.float32'}), '(0.2, dtype=tf.float32)\n', (425, 448), True, 'import tensorflow as tf\n'), ((493, 528), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {'dtype': 'tf.float32'}), '(0.01, dtype=tf.float32)\n', (504, 528), True, 'import tensorflow as tf\n'), ((2504, 2538), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['self._solution_dim'], {}), '(self._solution_dim)\n', (2518, 2538), True, 'import tensorflow as tf\n'), ((3794, 3849), 'tensorflow.constant', 'tf.constant', (['solution_variance_values'], {'dtype': 'tf.float32'}), '(solution_variance_values, dtype=tf.float32)\n', (3805, 3849), True, 'import tensorflow as tf\n'), ((8720, 8843), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self._population_size, *self._solution_dim]', '(-initial_velocity)', 'initial_velocity'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], -\n initial_velocity, initial_velocity, dtype=tf.float32)\n', (8737, 8843), True, 'import tensorflow as tf\n'), ((9726, 9885), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self._population_size, *self._solution_dim]', 'self._action_lower_bound_horizon', 'self._action_upper_bound_horizon'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], self.\n _action_lower_bound_horizon, self._action_upper_bound_horizon, dtype=tf\n .float32)\n', (9743, 9885), True, 'import tensorflow as tf\n'), ((10116, 10239), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self._population_size, *self._solution_dim]', '(-initial_velocity)', 'initial_velocity'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], -\n initial_velocity, initial_velocity, dtype=tf.float32)\n', (10133, 10239), True, 'import tensorflow as tf\n'), ((2408, 2459), 'tensorflow.constant', 'tf.constant', (['self._planning_horizon'], {'dtype': 'tf.int32'}), '(self._planning_horizon, dtype=tf.int32)\n', (2419, 2459), True, 'import tensorflow as tf\n'), ((2634, 2706), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, *self._solution_dim]'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], dtype=tf.float32)\n', (2642, 2706), True, 'import tensorflow as tf\n'), ((2756, 2828), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, *self._solution_dim]'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], dtype=tf.float32)\n', (2764, 2828), True, 'import tensorflow as tf\n'), ((2887, 2959), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, *self._solution_dim]'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], dtype=tf.float32)\n', (2895, 2959), True, 'import tensorflow as tf\n'), ((3082, 3151), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, self._num_agents]'], {'dtype': 'tf.float32'}), '([self._population_size, self._num_agents], dtype=tf.float32)\n', (3090, 3151), True, 'import tensorflow as tf\n'), ((3289, 3338), 'tensorflow.zeros', 'tf.zeros', (['[*self._solution_dim]'], {'dtype': 'tf.float32'}), '([*self._solution_dim], dtype=tf.float32)\n', (3297, 3338), True, 'import tensorflow as tf\n'), ((3393, 3439), 'tensorflow.zeros', 'tf.zeros', (['[self._num_agents]'], {'dtype': 'tf.float32'}), '([self._num_agents], dtype=tf.float32)\n', (3401, 3439), True, 'import tensorflow as tf\n'), ((4019, 4078), 'tensorflow.zeros', 'tf.zeros', (['[self._num_agents, self._dim_U]'], {'dtype': 'tf.float32'}), '([self._num_agents, self._dim_U], dtype=tf.float32)\n', (4027, 4078), True, 'import tensorflow as tf\n'), ((4215, 4247), 'tensorflow.less', 'tf.less', (['t', 'self._max_iterations'], {}), '(t, self._max_iterations)\n', (4222, 4247), True, 'import tensorflow as tf\n'), ((4437, 4551), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self._particle_positions', 'self._action_lower_bound_horizon', 'self._action_upper_bound_horizon'], {}), '(self._particle_positions, self._action_lower_bound_horizon,\n self._action_upper_bound_horizon)\n', (4453, 4551), True, 'import tensorflow as tf\n'), ((5051, 5101), 'tensorflow.less', 'tf.less', (['self._particle_best_known_reward', 'rewards'], {}), '(self._particle_best_known_reward, rewards)\n', (5058, 5101), True, 'import tensorflow as tf\n'), ((5461, 5523), 'tensorflow.where', 'tf.where', (['condition', 'rewards', 'self._particle_best_known_reward'], {}), '(condition, rewards, self._particle_best_known_reward)\n', (5469, 5523), True, 'import tensorflow as tf\n'), ((5747, 5795), 'tensorflow.math.argmax', 'tf.math.argmax', (['self._particle_best_known_reward'], {}), '(self._particle_best_known_reward)\n', (5761, 5795), True, 'import tensorflow as tf\n'), ((5818, 5880), 'tensorflow.transpose', 'tf.transpose', (['self._particle_best_known_position', '[1, 0, 2, 3]'], {}), '(self._particle_best_known_position, [1, 0, 2, 3])\n', (5830, 5880), True, 'import tensorflow as tf\n'), ((6075, 6120), 'tensorflow.reshape', 'tf.reshape', (['samples', '[-1, *samples.shape[2:]]'], {}), '(samples, [-1, *samples.shape[2:]])\n', (6085, 6120), True, 'import tensorflow as tf\n'), ((6249, 6299), 'tensorflow.reshape', 'tf.reshape', (['self._particle_best_known_reward', '[-1]'], {}), '(self._particle_best_known_reward, [-1])\n', (6259, 6299), True, 'import tensorflow as tf\n'), ((8422, 8451), 'tensorflow.sqrt', 'tf.sqrt', (['constrained_variance'], {}), '(constrained_variance)\n', (8429, 8451), True, 'import tensorflow as tf\n'), ((3484, 3546), 'numpy.square', 'np.square', (['(self._action_lower_bound - self._action_upper_bound)'], {}), '(self._action_lower_bound - self._action_upper_bound)\n', (3493, 3546), True, 'import numpy as np\n'), ((5928, 5985), 'tensorflow.cast', 'tf.cast', (['global_best_known_position_index'], {'dtype': 'tf.int32'}), '(global_best_known_position_index, dtype=tf.int32)\n', (5935, 5985), True, 'import tensorflow as tf\n'), ((6173, 6225), 'tensorflow.gather', 'tf.gather', (['samples', 'global_best_known_position_index'], {}), '(samples, global_best_known_position_index)\n', (6182, 6225), True, 'import tensorflow as tf\n'), ((6350, 6402), 'tensorflow.gather', 'tf.gather', (['samples', 'global_best_known_position_index'], {}), '(samples, global_best_known_position_index)\n', (6359, 6402), True, 'import tensorflow as tf\n'), ((9230, 9268), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (9241, 9268), True, 'import tensorflow as tf\n'), ((9399, 9437), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (9410, 9437), True, 'import tensorflow as tf\n'), ((10626, 10664), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (10637, 10664), True, 'import tensorflow as tf\n'), ((10795, 10833), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (10806, 10833), True, 'import tensorflow as tf\n'), ((4637, 4755), 'tensorflow.reshape', 'tf.reshape', (['(self._particle_positions - feasible_particle_positions)', '[self._population_size, self._num_agents, -1]'], {}), '(self._particle_positions - feasible_particle_positions, [self.\n _population_size, self._num_agents, -1])\n', (4647, 4755), True, 'import tensorflow as tf\n'), ((5174, 5203), 'tensorflow.expand_dims', 'tf.expand_dims', (['condition', '(-1)'], {}), '(condition, -1)\n', (5188, 5203), True, 'import tensorflow as tf\n'), ((5988, 6033), 'tensorflow.range', 'tf.range', (['(0)', 'samples.shape[0]'], {'dtype': 'tf.int32'}), '(0, samples.shape[0], dtype=tf.int32)\n', (5996, 6033), True, 'import tensorflow as tf\n'), ((6815, 6859), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[]', 'dtype': 'tf.float32'}), '(shape=[], dtype=tf.float32)\n', (6831, 6859), True, 'import tensorflow as tf\n'), ((7055, 7085), 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int32'}), '(1, dtype=tf.int32)\n', (7066, 7085), True, 'import tensorflow as tf\n'), ((7196, 7226), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (7207, 7226), True, 'import tensorflow as tf\n'), ((8221, 8279), 'tensorflow.expand_dims', 'tf.expand_dims', (['self._global_best_known_position[:, -1]', '(1)'], {}), '(self._global_best_known_position[:, -1], 1)\n', (8235, 8279), True, 'import tensorflow as tf\n'), ((6649, 6693), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[]', 'dtype': 'tf.float32'}), '(shape=[], dtype=tf.float32)\n', (6665, 6693), True, 'import tensorflow as tf\n'), ((7671, 7703), 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.float32'}), '(2, dtype=tf.float32)\n', (7682, 7703), True, 'import tensorflow as tf\n'), ((7788, 7820), 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.float32'}), '(2, dtype=tf.float32)\n', (7799, 7820), True, 'import tensorflow as tf\n')] |
from search import *
import heapq
# Predicados usados para descrever estado, pre-condições e efeitos
class Predicate:
def __str__(self):
return type(self).__name__ + "(" + str(self.args[0]) + "," + str(self.args[1]) + ")"
def __repr__(self):
return str(self)
def __eq__(self, predicate):
"""Permite comparações com "==", etc."""
return str(self) == str(predicate)
def __hash__(self):
"""Permite a criação de uma hash costumizada para aumentar a performance dos sets e afins."""
return hash(str(self))
def substitute(self, assign):
"""Substitui os argumentos em um predicado the arguments in a predicate."""
la = self.args
return type(self)(assign[la[0]]+la[2], assign[la[1]]+la[3])
# Operadores
# -- Os operadores do domínio vão ser subclasses
# -- Ações concretas são instancias dos operadores
class Operator:
def __init__(self, args, pc, neg, pos, move, keeper=None):
self.args = args
self.pc = pc
self.neg = neg
self.pos = pos
self.move = move
self.keeper = keeper
def __str__(self):
return type(self).__name__ + '(' + str(self.args[0]) + "," + str(self.args[1]) + ")"
def __repr__(self):
return str(self)
def __eq__(self, operator):
return str(self) == str(operator)
def __hash__(self):
return hash(str(self))
@classmethod
def instanciate(cls, args):
"""Produz uma ação. Retorna None se a ação não for aplicável."""
if len(args) != len(cls.args):
return None
assign = dict(zip(cls.args, args))
pc = cls.pc.substitute(assign)
neg = cls.neg.substitute(assign)
pos = cls.pos.substitute(assign)
move = cls.move
keeper = cls.keeper.substitute(assign)
return cls(args, pc, neg, pos, move, keeper)
class Keeper(Predicate):
def __init__(self, x, y, addx=0, addy=0):
self.args = [x, y, addx, addy]
class Box(Predicate):
def __init__(self, x, y, addx=0, addy=0):
self.args = [x, y, addx, addy]
class Wall(Predicate):
def __init__(self, x, y, addx=0, addy=0):
self.args = [x, y, addx, addy]
class Floor(Predicate):
def __init__(self, x, y, addx=0, addy=0):
self.args = [x, y, addx, addy]
X = 'X'
Y = 'Y'
class Up(Operator):
args = [X, Y]
pc = Keeper(X, Y)
neg = Keeper(X, Y)
pos = Keeper(X, Y, 0, -1)
keeper = Keeper(X, Y, 0, -1)
move = 'w'
class Down(Operator):
args = [X, Y]
pc = Keeper(X, Y)
neg = Keeper(X, Y)
pos = Keeper(X, Y, 0, +1)
keeper = Keeper(X, Y, 0, +1)
move = 's'
class Left(Operator):
args = [X, Y]
pc = Keeper(X, Y)
neg = Keeper(X, Y)
pos = Keeper(X, Y, -1, 0)
keeper = Keeper(X, Y, -1, 0)
move = 'a'
class Right(Operator):
args = [X, Y]
pc = Keeper(X, Y)
neg = Keeper(X, Y)
pos = Keeper(X, Y, +1, 0)
keeper = Keeper(X, Y, +1, 0)
move = 'd'
class UpBox(Operator):
args = [X, Y]
pc = Box(X, Y, 0, 0)
neg = Box(X, Y, 0, 0)
pos = Box(X, Y, 0, -1)
keeper = Keeper(X, Y, 0, +1)
move = 'w'
class DownBox(Operator):
args = [X, Y]
pc = Box(X, Y, 0, 0)
neg = Box(X, Y, 0, 0)
pos = Box(X, Y, 0, +1)
keeper = Keeper(X, Y, 0, -1)
move = 's'
class LeftBox(Operator):
args = [X, Y]
pc = Box(X, Y, 0, 0)
neg = Box(X, Y, 0, 0)
pos = Box(X, Y, -1, 0)
keeper = Keeper(X, Y, +1, 0)
move = 'a'
class RightBox(Operator):
args = [X, Y]
pc = Box(X, Y, 0, 0)
neg = Box(X, Y, 0, 0)
pos = Box(X, Y, +1, 0)
keeper = Keeper(X, Y, -1, 0)
move = 'd'
# Domínio de pesquisa baseado em STRIPS
class STRIPSBox(SearchDomain):
def __init__(self):
pass
def checkFreezeDeadlock(self, state, boxAfter, walls, goal):
"""FreezeDeadLock ocorre quando uma ou mais caixas estão bloqueadas numa posição, não podendo ser empurradas novamente."""
# axis: True - verifica apenas na vertical
# axis: False - verifica apenas na horizontal
# Embedded function para evitar loops na recursão (corre depois do código indicado abaixo)
def childsFreezeDeadLock(state, boxAfter, walls, goal, axis, initial):
boxesToCheck = set()
x, y = boxAfter.args[0], boxAfter.args[1]
# Se axis = True, significa que a caixa que estava junto a esta estava bloqueada horizontalmente, como tal, esta também está.
# Portanto, tem apenas de se verificar se a caixa está ou não bloqueada verticalmente
if axis:
nextToBoxY = set()
blockedVertical = False
# Seguir a mesma lógica e verificar se há paredes/caixas a bloquear esta verticalmente
if Box(x, y+1, 0, 0) in state:
nextToBoxY.add(Box(x, y+1, 0, 0))
if Box(x, y-1, 0, 0) in state:
nextToBoxY.add(Box(x, y-1, 0, 0))
if Wall(x, y+1, 0, 0) in walls:
nextToBoxY.add(Wall(x, y+1, 0, 0))
if Wall(x, y-1, 0, 0) in walls:
nextToBoxY.add(Wall(x, y-1, 0, 0))
# Para cada posição acima adicionada ao nextToBoxY
for y in nextToBoxY:
# Se for uma parede considera-se bloqueado verticalmente
if isinstance(y, Wall):
blockedVertical = True
# Se for uma box terá de se chamar recursivamente a função embedded para verificar também para essa caixa se ela está bloqueada
# Sendo que esta box atual foi verificada verticalmente, a próxima só precisa de ser verificada horizontalmente, daí axis ser passado a False
else:
# Para evitar loops, quando uma caixa já foi pesquisada, considera-se que está bloqueada
if boxAfter in initial:
blockedVertical = True
# Adicionar a caixa ao initial para manter um set com as caixas que já foram pesquisadas (a atual)
# Adicionar ao boxesToCheck a caixa que foi encontrada como estando a bloquear na vertical da atual
else:
boxesToCheck.add(y)
initial.add(boxAfter)
blockedVertical, boxesCheck = childsFreezeDeadLock(
state, y, walls, goal, False, initial)
# Juntar o resultado da chamada recursiva ao que já tinhamos previamente
boxesToCheck.union(boxesCheck)
return blockedVertical, boxesToCheck
# Verificar apenas horizontalmente, segue a mesma lógica
else:
nextToBoxX = set()
blockedHorizontal = False
if Box(x+1, y, 0, 0) in state:
nextToBoxX.add(Box(x+1, y, 0, 0))
if Box(x-1, y, 0, 0) in state:
nextToBoxX.add(Box(x-1, y, 0, 0))
if Wall(x+1, y, 0, 0) in walls:
nextToBoxX.add(Wall(x+1, y, 0, 0))
if Wall(x-1, y, 0, 0) in walls:
nextToBoxX.add(Wall(x-1, y, 0, 0))
for x in nextToBoxX:
if isinstance(x, Wall):
blockedHorizontal = True
else:
if boxAfter in initial:
blockedHorizontal = True
else:
boxesToCheck.add(x)
initial.add(boxAfter)
blockedHorizontal, boxesCheck = childsFreezeDeadLock(
state, x, walls, goal, True, initial)
boxesToCheck.union(boxesCheck)
return blockedHorizontal, boxesToCheck
# O QUE CORRE INICIALMENTE:
nextToBoxY = set()
nextToBoxX = set()
boxesToCheck = set()
x, y = boxAfter.args[0], boxAfter.args[1]
# Caso haja uma box diretamente abaixo da caixa em questão
if Box(x, y+1, 0, 0) in state:
nextToBoxY.add(Box(x, y+1, 0, 0))
# Caso haja uma box diretamente acima da caixa em questão
if Box(x, y-1, 0, 0) in state:
nextToBoxY.add(Box(x, y-1, 0, 0))
# Caso haja uma box diretamente à direita da caixa em questão
if Box(x+1, y, 0, 0) in state:
nextToBoxX.add(Box(x+1, y, 0, 0))
# Caso haja uma box diretamente à esquerda da caixa em questão
if Box(x-1, y, 0, 0) in state:
nextToBoxX.add(Box(x-1, y, 0, 0))
# Caso haja uma parede diretamente à direita da caixa em questão
if Wall(x+1, y, 0, 0) in walls:
nextToBoxX.add(Wall(x+1, y, 0, 0))
# Caso haja uma parede diretamente à esquerda da caixa em questão
if Wall(x-1, y, 0, 0) in walls:
nextToBoxX.add(Wall(x-1, y, 0, 0))
# Caso haja uma parede diretamente abaixo da caixa em questão
if Wall(x, y+1, 0, 0) in walls:
nextToBoxY.add(Wall(x, y+1, 0, 0))
# Caso haja uma parede diretamente acima da caixa em questão
if Wall(x, y-1, 0, 0) in walls:
nextToBoxY.add(Wall(x, y-1, 0, 0))
# Adicionar ao estado atual a caixa depois de ser realizado o movimento que está a ser estudado
state.add(boxAfter)
blockedVertical = False
blockedHorizontal = False
# Caso haja paredes/caixas ao lado ou para cima e para baixo da caixa em questão
if nextToBoxX and nextToBoxY:
# Para as caixas que estão ao lado
for x in nextToBoxX:
if not blockedHorizontal:
# Se se tratar de uma parede, está bloqueado horizontalmente
if isinstance(x, Wall):
blockedHorizontal = True
# Caso contrário, adicionar a caixa a uma lista de caixas que futuramente serão analizadas para verificar se elas próprias estão, também, bloqueadas
else:
boxesToCheck.add(x)
# Chamada da função embedded, passa-se o axis = True porque só vai ser preciso verificar verticalmente na próxima box e passa-se o initial com a box
# depois de ser deslocada para fazer verificação de loops
blockedHorizontal, boxesCheck = childsFreezeDeadLock(
state, x, walls, goal, True, {boxAfter})
boxesToCheck.union(boxesCheck)
# Apenas vale a pena verificar se está bloqueado verticalmente se estiver bloqueado horizontalmente
if blockedHorizontal:
for y in nextToBoxY:
if not blockedVertical:
if isinstance(y, Wall):
blockedVertical = True
else:
boxesToCheck.add(y)
# Chamada da função embedded, axis = False porque como a caixa atual está bloqueada horizontalmente, a próxima só precisa de ser verificada verticalmente
blockedVertical, boxesCheck = childsFreezeDeadLock(
state, y, walls, goal, False, {boxAfter})
boxesToCheck.union(boxesCheck)
# É considerado um estado de deadlock se nenhuma das caixas estiver em cima de um goal e se estiverem bloqueadas vertical e horizontalmente
for box in boxesToCheck:
if box not in goal and blockedHorizontal and blockedVertical:
return True
return False
def actions(self, state, walls):
operators = [UpBox, DownBox, LeftBox, RightBox]
actions = set()
for op in operators:
for box in state:
action = op.instanciate([box.args[0], box.args[1]])
# Verifica se a posição onde o keeper tem de estar para realizar o movimento sobre a caixa é outra caixa ou uma parede,
# evitando uma pesquisa extensa sem resultado possível
if Wall(action.keeper.args[0], action.keeper.args[1]) not in walls and Box(action.keeper.args[0], action.keeper.args[1]) not in state:
action.keeperAfterMove = Keeper(
action.args[0], action.args[1])
actions.add(action)
return actions
def keeperSearch(self, keeper_domain, initialState, goalState, state, possibleMovesSokoban):
return searchKeeper(keeper_domain, initialState, goalState, state, possibleMovesSokoban)
def result(self, state, action):
# remover os efeitos negativos
newstate = set(state)
newstate.remove(action.neg)
# adicionar os efeitos positivos
newstate.add(action.pos)
return frozenset(newstate)
def cost(self, cost):
return cost+1
def heuristic(self, state, goal):
"""Heurística da pesquisa de ações das caixas."""
# Tem em consideração as caixas já em goals, se existem caixas na coluna ou linha de outros goals e
# a distância das caixas que não estão já em goals à posição dos goals não ocupados
heuristic = 0
for box in state:
for g in goal:
if box == g:
heuristic -= 1
elif g not in state:
heuristic += abs(box.args[0] - g.args[0]) + \
abs(box.args[0] - g.args[1])
if box.args[0] == g.args[0] or box.args[1] == g.args[1]:
heuristic -= 1
return heuristic
def satisfies(self, state, goal):
return state == goal
class STRIPSKeeper(SearchDomain):
def __init__(self):
pass
def actions(self, keeper):
operators = [Up, Down, Left, Right]
actions = []
for op in operators:
action = op.instanciate([keeper.args[0], keeper.args[1]])
actions.append(action)
return actions
def result(self, state, action):
return action.pos
def cost(self, cost):
return cost+1
def heuristic(self, state, goal):
"""Heurística para a pesquisa do Keeper"""
# Distância de Manhattan
return abs(state.args[0] - goal.args[0]) + abs(state.args[1] - goal.args[1])
def satisfies(self, state, goal):
return state == goal
def searchKeeper(domain, initial, goal, boxes, possibleMoves):
"""Pesquisa do caminho da posição atual do keeper até ao lado da caixa necessário para executar o movimento."""
# Segue a lógica da pesquisa principal
backtrack = set()
root = SearchNode(initial, None, 0,
domain.heuristic(initial, goal), None)
open_nodes = [root]
heapq.heapify(open_nodes)
while open_nodes:
node = heapq.heappop(open_nodes)
if domain.satisfies(node.state, goal):
return node.path
lnewnodes = []
for a in domain.actions(node.state):
newstate = domain.result(node.state, a)
if newstate not in backtrack:
# Verificação se a ação gerada é um movimento possível e se não leva o keeper a ocupar uma posição de uma caixa existente neste estado
if a.move in possibleMoves[(node.state.args[0], node.state.args[1])] and not any(b.args == a.pos.args for b in boxes):
newnode = SearchNode(
newstate, node, node.cost+1, domain.heuristic(newstate, goal), a, path=node.path + [a.move])
backtrack.add(newstate)
lnewnodes.append(newnode)
for newnode in lnewnodes:
heapq.heappush(open_nodes, newnode)
return None
| [
"heapq.heappush",
"heapq.heappop",
"heapq.heapify"
] | [((15099, 15124), 'heapq.heapify', 'heapq.heapify', (['open_nodes'], {}), '(open_nodes)\n', (15112, 15124), False, 'import heapq\n'), ((15163, 15188), 'heapq.heappop', 'heapq.heappop', (['open_nodes'], {}), '(open_nodes)\n', (15176, 15188), False, 'import heapq\n'), ((16029, 16064), 'heapq.heappush', 'heapq.heappush', (['open_nodes', 'newnode'], {}), '(open_nodes, newnode)\n', (16043, 16064), False, 'import heapq\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import show_and_tell_model
from inference_utils import inference_wrapper_base
class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase):
def __init__(self):
super(InferenceWrapper,self).__init__()
def build_model(self,model_config):
model=show_and_tell_model.ShowAndTellModel(model_config,mode="inference")
model.build()
return model
def feed_image_and_word_prob(self,sess,encoded_image,word_prob):
initial_state=sess.run(fetches="lstm/initial_state:0",feed_dict={"image_feed:0":encoded_image,"word_prob_feed:0":word_prob})
return initial_state
def inference_step(self,sess,input_feed,state_feed):
softmax_output,state_output=sess.run(fetches=["softmax:0","lstm/state:0"],feed_dict={"input_feed:0":input_feed,"lstm/state_feed:0":state_feed,})
return softmax_output,state_output,None
| [
"show_and_tell_model.ShowAndTellModel"
] | [((388, 456), 'show_and_tell_model.ShowAndTellModel', 'show_and_tell_model.ShowAndTellModel', (['model_config'], {'mode': '"""inference"""'}), "(model_config, mode='inference')\n", (424, 456), False, 'import show_and_tell_model\n')] |
from logging import getLogger
from os import getpid
from infi.exceptools import chain
from infi.pyutils.contexts import contextmanager
from infi.pyutils.decorators import wraps
CHECK_CONDITIONS_NOT_WORTH_RETRY = [
('ILLEGAL_REQUEST', 'LOGICAL UNIT NOT SUPPORTED'),
('ILLEGAL_REQUEST', 'INVALID FIELD IN CDB'),
('ILLEGAL_REQUEST', 'INVALID COMMAND OPERATION CODE'),
]
SEC = 1000
TIMEOUT_IN_SEC = 3
TIMEOUT = SEC * TIMEOUT_IN_SEC
logger = getLogger(__name__)
class ScsiCommandFailed(Exception):
pass
class ScsiReservationConflictError(ScsiCommandFailed):
pass
class ScsiCheckConditionError(ScsiCommandFailed):
def __init__(self, sense_key, code_name):
super(ScsiCheckConditionError, self).__init__(sense_key, code_name)
self.sense_key = sense_key
self.code_name = code_name
def func_logger(func):
@wraps(func)
def decorator(*args, **kwargs):
format_args = ', '.join([repr(item) for item in args])
format_kwargs = ', '.join(["{}={!r}".format(repr(key), repr(value)) for key, value in kwargs.items()])
logger.debug("{} --> {}({}, {})".format(getpid(), func.__name__, format_args, format_kwargs))
result = func(*args, **kwargs)
try:
logger.debug("{} <-- return {!r} | {}".format(getpid(), result, func.__name__))
except Exception as err:
logger.exception("{} <-- {} raise {} | {}".format(getpid(), err, func.___name__))
raise
return result
return decorator
@func_logger
def log_execute(args, timeout_in_seconds=None):
from infi.execute import execute_async, CommandTimeout
pid = execute_async(args)
try:
pid.wait(timeout_in_seconds)
except CommandTimeout:
pid.kill(9)
return pid.get_pid()
@contextmanager
def asi_context(sg_device):
from infi.asi import create_platform_command_executer, create_os_file
handle = create_os_file("/dev/{}".format(sg_device))
executer = create_platform_command_executer(handle, timeout=TIMEOUT)
try:
yield executer
finally:
handle.close()
def check_for_scsi_errors(func):
from infi.asi.errors import AsiOSError, AsiSCSIError, AsiCheckConditionError, AsiReservationConflictError
from infi.asi.cdb.report_luns import UnsupportedReportLuns
@wraps(func)
def decorator(*args, **kwargs):
counter = 10
while counter > 0:
try:
sg_device, cdb = args
msg = "{} attempting to send {} to sg device {}, {} more retries"
logger.debug(msg.format(getpid(), func.__name__, sg_device, counter))
response = func(*args, **kwargs)
return response
except AsiCheckConditionError as e:
(key, code) = (e.sense_obj.sense_key, e.sense_obj.additional_sense_code.code_name)
msg = "{} sg device {} got {} {}".format(getpid(), sg_device, key, code)
logger.warn(msg)
counter -= 1
if (key, code) in CHECK_CONDITIONS_NOT_WORTH_RETRY or counter == 0:
raise ScsiCheckConditionError(key, code)
except AsiReservationConflictError as error:
msg = "{} sg device {} has unsupported luns report: {}"
logger.error(msg.format(getpid(), sg_device, error))
raise ScsiReservationConflictError()
except (IOError, OSError, AsiOSError, AsiSCSIError) as error:
msg = "{} sg device {} got unrecoverable error {} during {!r}"
logger.error(msg.format(getpid(), sg_device, error, cdb))
counter = 0
except UnsupportedReportLuns as error:
msg = "{} sg device {} has unsupported luns report: {}"
logger.error(msg.format(getpid(), sg_device, error))
raise ScsiCommandFailed()
raise chain(ScsiCommandFailed())
return decorator
def format_hctl(host, channel, target, lun):
return "{}:{}:{}:{}".format(host, channel, target, lun)
| [
"logging.getLogger",
"infi.asi.create_platform_command_executer",
"infi.pyutils.decorators.wraps",
"infi.execute.execute_async",
"os.getpid"
] | [((452, 471), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (461, 471), False, 'from logging import getLogger\n'), ((855, 866), 'infi.pyutils.decorators.wraps', 'wraps', (['func'], {}), '(func)\n', (860, 866), False, 'from infi.pyutils.decorators import wraps\n'), ((1642, 1661), 'infi.execute.execute_async', 'execute_async', (['args'], {}), '(args)\n', (1655, 1661), False, 'from infi.execute import execute_async, CommandTimeout\n'), ((1971, 2028), 'infi.asi.create_platform_command_executer', 'create_platform_command_executer', (['handle'], {'timeout': 'TIMEOUT'}), '(handle, timeout=TIMEOUT)\n', (2003, 2028), False, 'from infi.asi import create_platform_command_executer, create_os_file\n'), ((2309, 2320), 'infi.pyutils.decorators.wraps', 'wraps', (['func'], {}), '(func)\n', (2314, 2320), False, 'from infi.pyutils.decorators import wraps\n'), ((1125, 1133), 'os.getpid', 'getpid', ([], {}), '()\n', (1131, 1133), False, 'from os import getpid\n'), ((1289, 1297), 'os.getpid', 'getpid', ([], {}), '()\n', (1295, 1297), False, 'from os import getpid\n'), ((1418, 1426), 'os.getpid', 'getpid', ([], {}), '()\n', (1424, 1426), False, 'from os import getpid\n'), ((2582, 2590), 'os.getpid', 'getpid', ([], {}), '()\n', (2588, 2590), False, 'from os import getpid\n'), ((2913, 2921), 'os.getpid', 'getpid', ([], {}), '()\n', (2919, 2921), False, 'from os import getpid\n'), ((3321, 3329), 'os.getpid', 'getpid', ([], {}), '()\n', (3327, 3329), False, 'from os import getpid\n'), ((3596, 3604), 'os.getpid', 'getpid', ([], {}), '()\n', (3602, 3604), False, 'from os import getpid\n'), ((3821, 3829), 'os.getpid', 'getpid', ([], {}), '()\n', (3827, 3829), False, 'from os import getpid\n')] |
from pathlib import Path, PureWindowsPath
from PIL import Image
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def openSingleAndCheck(path, resized_path, resize=False):
"""
Check if path leads to valid image
:param img: PIL image object
:return: PIL image object
"""
image = Image.open(path)
if image.mode != 'RGB':
image = image.convert('RGB')
if resize != False:
max_size = resize
original_size = max(image.size[0], image.size[1])
if original_size >= max_size:
if (image.size[0] > image.size[1]):
resized_width = max_size
resized_height = int(round((max_size/float(image.size[0]))*image.size[1]))
else:
resized_height = max_size
resized_width = int(round((max_size/float(image.size[1]))*image.size[0]))
image = image.resize((resized_width, resized_height), Image.ANTIALIAS)
image.save(resized_path, 'JPEG')
output = Path(resized_path).absolute()
return PureWindowsPath(output)
| [
"PIL.Image.open",
"pathlib.PureWindowsPath",
"pathlib.Path"
] | [((427, 443), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (437, 443), False, 'from PIL import Image\n'), ((1165, 1188), 'pathlib.PureWindowsPath', 'PureWindowsPath', (['output'], {}), '(output)\n', (1180, 1188), False, 'from pathlib import Path, PureWindowsPath\n'), ((1123, 1141), 'pathlib.Path', 'Path', (['resized_path'], {}), '(resized_path)\n', (1127, 1141), False, 'from pathlib import Path, PureWindowsPath\n')] |
from django.db import models
from cms.models import CMSPlugin
from embed_video.fields import EmbedVideoField
class VideoPluginModel(CMSPlugin):
video_url = EmbedVideoField()
max_width = models.IntegerField(blank=True, null=True, help_text="in px")
def __str__(self):
return self.video_url
| [
"embed_video.fields.EmbedVideoField",
"django.db.models.IntegerField"
] | [((162, 179), 'embed_video.fields.EmbedVideoField', 'EmbedVideoField', ([], {}), '()\n', (177, 179), False, 'from embed_video.fields import EmbedVideoField\n'), ((196, 257), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'help_text': '"""in px"""'}), "(blank=True, null=True, help_text='in px')\n", (215, 257), False, 'from django.db import models\n')] |
"""
Generate all synonymous mutants for a input protein
<NAME>
"""
# Ensure Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import argparse
import sys
from signal import signal, SIGPIPE, SIG_DFL
# catch broken pipe errors to allow ex) python pyParse.py foo bar | head
# see: https://stackoverflow.com/a/30091579
signal(SIGPIPE, SIG_DFL)
#===============================================================================
def fasta_reader(fasta):
"""
Read in a fasta file lazily and return a generator of the name and sequence
Parameters:
-----------
fasta :: FileType
opened file
Yields:
-------
generator :: (name, seq)
name :: str
Name of the read taken from the fasta file
read :: str
Sequence taken from the fasta file
Requires:
---------
itertools
Example:
--------
itertools.groupby takes a key function and groups all items into a list
until that key changes. We can key on lines beginning with >, then grab
every line until the next record in the fasta. This makes our method robust
to some fasta formats that have forced line breaks at given characters.
foo = '>ABC>DEF>GHI'
[(k, list(g)) for k,g in itertools.groupby(foo, lambda x: x == '>')]
--> [(True, ['>']), (False, ['A', 'B', 'C']), (True, ['>']), ... ]
Note:
-----
Adapted from: https://www.biostars.org/p/710/#1412
"""
# ditch the boolean (x[0]) and just keep the header/seq grouping
fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == ">"))
for header in fa_iter:
# drop the ">"
name = next(header)[1:].strip()
# join all sequence lines to one by iterating until the next group.
read = "".join(s.strip() for s in next(fa_iter))
yield name, read
#-------------------------------------------------------------------------------
def revcomp(seq):
"""
Reverse Complement a string
Parameters:
-----------
seq :: str
Returns:
--------
str
"""
comp = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}
return ''.join(comp[nuc] for nuc in seq[::-1])
#-------------------------------------------------------------------------------
def to_prot(dna):
"""
Translate a DNA sequence into it's protein equivalent
Parameters:
-----------
dna - DNA sequnce
Returns:
--------
(aminos, flag) :: (str, bool)
aminos - Protein sequence
flag - Was there an X?
Requires:
---------
itertools
Note:
-----
Assumes sequences are already in-frame (e.g. does not find start codons).
Will convert any N's to a dummy codon.
Adapted from: http://stackoverflow.com/a/19522901
"""
codon_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W'}
codons = (dna[x:x+3] for x in range(0, len(dna), 3))
coding_seq = itertools.takewhile(lambda x: len(x) == 3, codons)
clean_codons = ('X' if 'N' in x else codon_table[x] for x in coding_seq)
aminos = ''.join(clean_codons)
# set a flag for downstream filtering
if 'X' in aminos:
return (aminos, True)
else:
return (aminos, False)
#-------------------------------------------------------------------------------
def to_dna(dna):
rev_codon_table = {
'A':['GCT','GCC','GCA','GCG'],
'R':['CGT','CGC','CGA','CGG','AGA','AGG'],
'N':['AAT','AAC'],
'D':['GAT','GAC'],
'C':['TGT','TGC'],
'Q':['CAA','CAG'],
'E':['GAA','GAG'],
'G':['GGT','GGC','GGA','GGG'],
'H':['CAT','CAC'],
'I':['ATT','ATC','ATA'],
'L':['TTA','TTG','CTT','CTC','CTA','CTG'],
'K':['AAA','AAG'],
'M':['ATG'],
'F':['TTT','TTC'],
'P':['CCT','CCC','CCA','CCG'],
'S':['TCT','TCC','TCA','TCG','AGT','AGC'],
'T':['ACT','ACC','ACA','ACG'],
'W':['TGG'],
'Y':['TAT','TAC'],
'V':['GTT','GTC','GTA','GTG'],
'*':['TAA','TGA','TAG']}
# get codons
old_codons = (dna[x:x+3] for x in range(0, len(dna), 3))
coding_seq = list(itertools.takewhile(lambda x: len(x) == 3, old_codons))
prot, flag = to_prot(dna)
if flag:
raise ValueError('N found in input sequence')
raw_seqs = list()
names = list()
for pos, aa in enumerate(prot):
new_codons = rev_codon_table[aa]
for codon in new_codons:
names.append('{}_{}_{}'.format(pos + 1, aa, codon))
raw_seqs.append(''.join(
itertools.chain.from_iterable([coding_seq[:pos], codon, coding_seq[pos+1:]])))
return([x for x in zip(names, raw_seqs) if x[1] != dna])
#===============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate all single synonymous mutations for an input sequence')
parser.add_argument('infile',
type=argparse.FileType('r'),
default=sys.stdin,
nargs='?',
help='path to a *.fasta file of the reads (or stdin if none)')
parser.add_argument('-r',
'--rev-comp',
dest='rc',
action='store_true',
help='reverse complement the sequence?')
args = parser.parse_args()
# drop the fasta header since we don't need it
for _, seq in fasta_reader(args.infile):
if args.rc:
seq = revcomp(seq)
out = to_dna(seq)
for header, synon in out:
print('>{}\n{}'.format(header, synon), file=sys.stdout)
| [
"argparse.FileType",
"signal.signal",
"itertools.groupby",
"argparse.ArgumentParser",
"itertools.chain.from_iterable"
] | [((458, 482), 'signal.signal', 'signal', (['SIGPIPE', 'SIG_DFL'], {}), '(SIGPIPE, SIG_DFL)\n', (464, 482), False, 'from signal import signal, SIGPIPE, SIG_DFL\n'), ((5607, 5713), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate all single synonymous mutations for an input sequence"""'}), "(description=\n 'Generate all single synonymous mutations for an input sequence')\n", (5630, 5713), False, 'import argparse\n'), ((1680, 1733), 'itertools.groupby', 'itertools.groupby', (['fasta', "(lambda line: line[0] == '>')"], {}), "(fasta, lambda line: line[0] == '>')\n", (1697, 1733), False, 'import itertools\n'), ((5781, 5803), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (5798, 5803), False, 'import argparse\n'), ((5341, 5419), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[coding_seq[:pos], codon, coding_seq[pos + 1:]]'], {}), '([coding_seq[:pos], codon, coding_seq[pos + 1:]])\n', (5370, 5419), False, 'import itertools\n')] |
"""Functions to manipulate and validate configurations."""
import jsonschema
import six
def merge_config(a, b):
"""Merges config b in a."""
for k, v in six.iteritems(b):
if k in a and isinstance(v, dict) and type(a[k]) == type(v):
merge_config(a[k], v)
else:
a[k] = v
return a
def index_config(config, path, index_structure=True):
"""Index a configuration with a path-like string."""
key = None
sections = path.split('/')
if not index_structure:
key = sections[-1]
sections = sections[:-1]
for section in sections:
if isinstance(config, dict):
if section not in config:
raise ValueError('Invalid path %s in config' % path)
config = config[section]
elif isinstance(config, list):
try:
section_index = int(section)
except ValueError:
raise ValueError('Expected an array index in path, but got %s instead' % section)
config = config[section_index]
else:
raise ValueError('Paths in config can only represent object and array structures')
if index_structure:
return config
else:
return config, key
def index_schema(schema, path):
"""Index a JSON schema with a path-like string."""
for section in path.split('/'):
if schema['type'] != 'object':
raise ValueError('Only object types are supported in the schema structure, '
'but saw type %s' % schema['type'])
properties = schema['properties']
if section not in properties:
raise ValueError('Invalid path %s in user options' % path)
schema = properties[section]
return schema
def validate_inference_options(inference_options, config):
"""Validate the inference options, raising ValueError on error."""
json_schema = inference_options.get('json_schema')
if json_schema is None:
raise ValueError('Missing "json_schema" in "inference_options"')
jsonschema.Draft7Validator.check_schema(json_schema)
options = inference_options.get('options')
if options is None:
raise ValueError('Missing "options" in "inference_options"')
validate_mapping(json_schema, options, config)
return json_schema
def validate_mapping(schema, options, config):
"""Validate the mapping between inference options and configuration fields,
raising ValueError on error.
"""
for i, mapping in enumerate(options):
config_path = mapping.get('config_path')
if config_path is None:
raise ValueError('Missing "config_path" in option mapping %d' % i)
dst_config, _ = index_config(config, config_path, index_structure=False)
if not isinstance(dst_config, dict):
raise ValueError('Paths in config can only index object structures')
option_path = mapping.get('option_path')
if option_path is None:
raise ValueError('Missing "option_path" in option mapping %d' % i)
option_schema = index_schema(schema, option_path)
def update_config_with_options(config, options):
"""Update the configuration with incoming inference options. Raises ValueError
if inference options were not expected or the value is not accepted.
"""
inference_options = config.get('inference_options')
if inference_options is None:
raise ValueError('This model does not expect inference options')
try:
jsonschema.validate(options, inference_options['json_schema'])
except jsonschema.ValidationError as e:
raise ValueError('Options validation error: %s' % e.message)
for mapping in inference_options['options']:
try:
option_value = index_config(options, mapping['option_path'])
except ValueError:
continue # Option not passed for this request.
dst_config, dst_key = index_config(config, mapping['config_path'], index_structure=False)
dst_config[dst_key] = option_value
| [
"jsonschema.validate",
"six.iteritems",
"jsonschema.Draft7Validator.check_schema"
] | [((163, 179), 'six.iteritems', 'six.iteritems', (['b'], {}), '(b)\n', (176, 179), False, 'import six\n'), ((2067, 2119), 'jsonschema.Draft7Validator.check_schema', 'jsonschema.Draft7Validator.check_schema', (['json_schema'], {}), '(json_schema)\n', (2106, 2119), False, 'import jsonschema\n'), ((3524, 3586), 'jsonschema.validate', 'jsonschema.validate', (['options', "inference_options['json_schema']"], {}), "(options, inference_options['json_schema'])\n", (3543, 3586), False, 'import jsonschema\n')] |
import os
from factory import create_app
app = create_app()
app.app_context().push()
@app.teardown_request
def teardown_request(*args, **kwargs):
'Expire and remove the session after each request'
from database import db
db.session.expire_all()
db.session.remove()
if 'Development' in os.environ.get('SERVER_SOFTWARE', ''):
from tests.conftest import create_dev_data
from database import db
create_dev_data(db.session)
if __name__ == "__main__":
app.run()
| [
"database.db.session.remove",
"tests.conftest.create_dev_data",
"os.environ.get",
"factory.create_app",
"database.db.session.expire_all"
] | [((50, 62), 'factory.create_app', 'create_app', ([], {}), '()\n', (60, 62), False, 'from factory import create_app\n'), ((238, 261), 'database.db.session.expire_all', 'db.session.expire_all', ([], {}), '()\n', (259, 261), False, 'from database import db\n'), ((266, 285), 'database.db.session.remove', 'db.session.remove', ([], {}), '()\n', (283, 285), False, 'from database import db\n'), ((308, 345), 'os.environ.get', 'os.environ.get', (['"""SERVER_SOFTWARE"""', '""""""'], {}), "('SERVER_SOFTWARE', '')\n", (322, 345), False, 'import os\n'), ((426, 453), 'tests.conftest.create_dev_data', 'create_dev_data', (['db.session'], {}), '(db.session)\n', (441, 453), False, 'from tests.conftest import create_dev_data\n')] |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from skynet.endpoints.ephttp import simple_post_server
from skynet.seq2seq import Generator
def run_server(model_name, port=8080):
generator = Generator(model_name)
print('Test is: ' + generator.generate(500, 'test string'))
def handler(server):
try:
if 'Content-Length' not in server.headers:
server.send_error(400, 'Bad Request: Content-Length required')
return
if 'Content-Type' not in server.headers or server.headers['Content-Type'] != 'text/plain':
server.send_error(400, 'Bad Request: Content-Type must be text/plain')
input_len = int(server.headers['Content-Length'])
input_text = server.rfile.read(input_len).decode('utf-8')
if not input_text.endswith(('.','?','!')):
input_text = input_text + '.'
output_text = generator.generate(500, input_text)
server.send_header('Content-Type', 'text/plain')
server.send_header('Content-Length', str(len(output_text)))
server.wfile.write(output_text.encode('utf-8'))
except Exception as e:
server.send_error(500, 'Internal Server Error')
print(e)
simple_post_server('0.0.0.0', port, handler)
| [
"skynet.endpoints.ephttp.simple_post_server",
"skynet.seq2seq.Generator"
] | [((729, 750), 'skynet.seq2seq.Generator', 'Generator', (['model_name'], {}), '(model_name)\n', (738, 750), False, 'from skynet.seq2seq import Generator\n'), ((1810, 1854), 'skynet.endpoints.ephttp.simple_post_server', 'simple_post_server', (['"""0.0.0.0"""', 'port', 'handler'], {}), "('0.0.0.0', port, handler)\n", (1828, 1854), False, 'from skynet.endpoints.ephttp import simple_post_server\n')] |
#
# Copyright (c) 2018 LabKey Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from labkey.api_wrapper import APIWrapper
from labkey.experiment import Batch, Run
labkey_server = "localhost:8080"
project_name = "ModulesAssayTest" # Project folder name
context_path = "labkey"
api = APIWrapper(labkey_server, project_name, context_path, use_ssl=False)
assay_id = 3315 # provide one from your server
###################
# Save an Assay batch
###################
# Generate the Run object(s)
run_test = Run()
run_test.name = "python upload"
run_test.data_rows = [
{
# ColumnName: Value
"SampleId": "<NAME>",
"TimePoint": "2008/11/02 11:22:33",
"DoubleData": 4.5,
"HiddenData": "another data point",
},
{
"SampleId": "<NAME>",
"TimePoint": "2008/11/02 14:00:01",
"DoubleData": 3.1,
"HiddenData": "fozzy bear",
},
{
"SampleId": "<NAME>",
"TimePoint": "2008/11/02 14:00:01",
"DoubleData": 1.5,
"HiddenData": "jimbo",
},
]
run_test.properties["RunFieldName"] = "Run Field Value"
# Generate the Batch object(s)
batch = Batch()
batch.runs = [run_test]
batch.name = "python batch"
batch.properties["PropertyName"] = "Property Value"
# Execute save api
saved_batch = api.experiment.save_batch(assay_id, batch)
###################
# Load an Assay batch
###################
batch_id = saved_batch.row_id # provide one from your server
run_group = api.experiment.load_batch(assay_id, batch_id)
if run_group is not None:
print("Batch Id: " + str(run_group.id))
print("Created By: " + run_group.created_by)
| [
"labkey.experiment.Run",
"labkey.experiment.Batch",
"labkey.api_wrapper.APIWrapper"
] | [((794, 862), 'labkey.api_wrapper.APIWrapper', 'APIWrapper', (['labkey_server', 'project_name', 'context_path'], {'use_ssl': '(False)'}), '(labkey_server, project_name, context_path, use_ssl=False)\n', (804, 862), False, 'from labkey.api_wrapper import APIWrapper\n'), ((1016, 1021), 'labkey.experiment.Run', 'Run', ([], {}), '()\n', (1019, 1021), False, 'from labkey.experiment import Batch, Run\n'), ((1656, 1663), 'labkey.experiment.Batch', 'Batch', ([], {}), '()\n', (1661, 1663), False, 'from labkey.experiment import Batch, Run\n')] |
import numpy as np
import porespy as ps
import matplotlib.pyplot as plt
import openpnm as op
np.random.seed(0)
def test_snow_example_script():
plot = False
im1 = ps.generators.blobs(shape=[600, 400], porosity=None, blobiness=1) < 0.4
im2 = ps.generators.blobs(shape=[600, 400], porosity=None, blobiness=1) < 0.7
phases = im1 + (im2 * ~im1)*2
# phases = phases > 0
snow_n = ps.networks.snow2(phases,
phase_alias={1: 'solid', 2: 'void'},
boundary_width=5,
accuracy='high',
parallelization=None)
assert snow_n.regions.max() == 211
# Remove all but 1 pixel-width of boundary regions
temp = ps.tools.extract_subsection(im=snow_n.regions,
shape=np.array(snow_n.regions.shape)-8)
assert temp.max() == 211
# Remove complete boundary region
temp = ps.tools.extract_subsection(im=snow_n.regions,
shape=np.array(snow_n.regions.shape)-10)
assert temp.max() == 164
# %% Plot the final extraction overlayed with snow segmentation
if plot:
fig, ax = plt.subplots(1, 1)
ax.imshow(ps.tools.randomize_colors(snow_n.regions.T))
proj = op.io.from_porespy(snow_n.network)
op.topotools.plot_connections(network=proj.network, ax=ax)
op.topotools.plot_coordinates(network=proj.network, ax=ax)
plt.axis('off')
| [
"openpnm.topotools.plot_coordinates",
"porespy.generators.blobs",
"porespy.networks.snow2",
"openpnm.io.from_porespy",
"numpy.array",
"openpnm.topotools.plot_connections",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"porespy.tools.randomize_colors",
"matplotlib.pyplot.subplots"
] | [((93, 110), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (107, 110), True, 'import numpy as np\n'), ((401, 528), 'porespy.networks.snow2', 'ps.networks.snow2', (['phases'], {'phase_alias': "{(1): 'solid', (2): 'void'}", 'boundary_width': '(5)', 'accuracy': '"""high"""', 'parallelization': 'None'}), "(phases, phase_alias={(1): 'solid', (2): 'void'},\n boundary_width=5, accuracy='high', parallelization=None)\n", (418, 528), True, 'import porespy as ps\n'), ((173, 238), 'porespy.generators.blobs', 'ps.generators.blobs', ([], {'shape': '[600, 400]', 'porosity': 'None', 'blobiness': '(1)'}), '(shape=[600, 400], porosity=None, blobiness=1)\n', (192, 238), True, 'import porespy as ps\n'), ((255, 320), 'porespy.generators.blobs', 'ps.generators.blobs', ([], {'shape': '[600, 400]', 'porosity': 'None', 'blobiness': '(1)'}), '(shape=[600, 400], porosity=None, blobiness=1)\n', (274, 320), True, 'import porespy as ps\n'), ((1193, 1211), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1205, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1291, 1325), 'openpnm.io.from_porespy', 'op.io.from_porespy', (['snow_n.network'], {}), '(snow_n.network)\n', (1309, 1325), True, 'import openpnm as op\n'), ((1334, 1392), 'openpnm.topotools.plot_connections', 'op.topotools.plot_connections', ([], {'network': 'proj.network', 'ax': 'ax'}), '(network=proj.network, ax=ax)\n', (1363, 1392), True, 'import openpnm as op\n'), ((1401, 1459), 'openpnm.topotools.plot_coordinates', 'op.topotools.plot_coordinates', ([], {'network': 'proj.network', 'ax': 'ax'}), '(network=proj.network, ax=ax)\n', (1430, 1459), True, 'import openpnm as op\n'), ((1468, 1483), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1476, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1273), 'porespy.tools.randomize_colors', 'ps.tools.randomize_colors', (['snow_n.regions.T'], {}), '(snow_n.regions.T)\n', (1255, 1273), True, 'import porespy as ps\n'), ((828, 858), 'numpy.array', 'np.array', (['snow_n.regions.shape'], {}), '(snow_n.regions.shape)\n', (836, 858), True, 'import numpy as np\n'), ((1029, 1059), 'numpy.array', 'np.array', (['snow_n.regions.shape'], {}), '(snow_n.regions.shape)\n', (1037, 1059), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import appier
class BaseController(appier.Controller):
@appier.route("/", "GET")
@appier.route("/index", "GET")
def index(self):
return self.redirect(
self.url_for("base.simple")
)
@appier.route("/simple", "GET")
def simple(self):
client_id = appier.conf("OAUTH_ID", None)
client_secret = appier.conf("OAUTH_SECRET", None)
return self.template(
"simple.html.tpl",
url = self.field("url"),
brand = self.field("brand"),
model = self.field("model"),
variant = self.field("variant"),
version = self.field("version"),
country = self.field("country"),
currency = self.field("currency"),
client_id = self.field("client_id", client_id),
client_secret = self.field("client_secret", client_secret),
guess = self.field("guess", False, cast = bool),
guess_url = self.field("guess_url", False, cast = bool),
mode = self.field("mode", "full")
)
| [
"appier.route",
"appier.conf"
] | [((112, 136), 'appier.route', 'appier.route', (['"""/"""', '"""GET"""'], {}), "('/', 'GET')\n", (124, 136), False, 'import appier\n'), ((143, 172), 'appier.route', 'appier.route', (['"""/index"""', '"""GET"""'], {}), "('/index', 'GET')\n", (155, 172), False, 'import appier\n'), ((286, 316), 'appier.route', 'appier.route', (['"""/simple"""', '"""GET"""'], {}), "('/simple', 'GET')\n", (298, 316), False, 'import appier\n'), ((361, 390), 'appier.conf', 'appier.conf', (['"""OAUTH_ID"""', 'None'], {}), "('OAUTH_ID', None)\n", (372, 390), False, 'import appier\n'), ((416, 449), 'appier.conf', 'appier.conf', (['"""OAUTH_SECRET"""', 'None'], {}), "('OAUTH_SECRET', None)\n", (427, 449), False, 'import appier\n')] |
from itertools import permutations
from string import ascii_lowercase
INPUT_PATH = 'input.txt'
def get_input(path=INPUT_PATH):
"""Get the input for day 5.
This should all be one long string. I tested with the given input and we don't need to join
the lines but it's just a bit less brittle this way."""
with open(path) as f:
return ''.join(f.readlines()).strip()
def react_polymer(polymer):
"""React the polymer with a brute force method.
Just iterate the possible character pairs and remove them, then check whether the
new string is the same as the old one."""
pairs = [''.join(s) for s in permutations('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', 2)
if s[0].casefold() == s[1].casefold() and s[0].islower() != s[1].islower()]
while True:
old_polymer = polymer
for pair in pairs:
polymer = polymer.replace(pair, '')
if polymer == '' or old_polymer == polymer:
return polymer
def polymer_reductions(polymer):
return (polymer.replace(l, '').replace(l.upper(), '') for l in ascii_lowercase)
def shortest_reduced_polymer(polymer):
return min(len(react_polymer(p)) for p in polymer_reductions(polymer))
| [
"itertools.permutations"
] | [((640, 711), 'itertools.permutations', 'permutations', (['"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"""', '(2)'], {}), "('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', 2)\n", (652, 711), False, 'from itertools import permutations\n')] |
#!/usr/bin/env python3
"""Module with FourSquare API usage to get data"""
import os
import logging
import requests
import pandas as pd
LIMIT = 50
VERSION = "20180323"
CLIENT_ID = os.environ['FOURSQ_ID']
CLIENT_SECRET = os.environ['FOURSQ_SECRET']
HYDEPARK_LATITUDE = 51.5052
HYDEPARK_LONGTITUDE = -0.1582
def get_nearby_venues(latitudes, longitudes, radius=500):
"""Search venues near location"""
venues_list = []
# create the API request URL
url_search = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&\
client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
latitudes,
longitudes,
radius,
LIMIT)
print(requests.get(url_search).json()["response"])
results = requests.get(url_search).json()["response"]["venues"]
for venue in results:
tmp_dict = {'name': venue['name'],
'id': venue['id'],
'latitide': venue['location']['lat'],
'longitude': venue['location']['lng'],
# 'postal_code': venue['location']['postalCode']
}
rating = get_venue_rating(tmp_dict['id'])
if isinstance(rating, float):
tmp_dict['rating'] = rating
venues_list.append(tmp_dict)
print(tmp_dict)
venue_df = pd.DataFrame(venues_list)
return venue_df
def get_venue_rating(venue_id):
"""Venue details"""
res_dict = {'rating': None, 'error': None}
url_details = 'https://api.foursquare.com/v2/venues/{}?&client_id={}&\
client_secret={}&v={}'.format(
venue_id,
CLIENT_ID,
CLIENT_SECRET,
VERSION
)
results = requests.get(url_details).json()
if results['meta']['code'] == 200:
if 'rating' in results['response']['venue']:
res_dict['rating'] = float(results['response']['venue']['rating'])
else:
res_dict['error'] = 'No rating for this venue'
logging.info('foursquare : get_venue_rating : no rating for venue')
else:
res_dict['error'] = results
logging.error('foursquare : get_venue_rating : %s', res_dict)
# print(results['response']['venue'])
# print('#'*100)
return res_dict
if __name__ == "__main__":
# logging.setLevel(logging.DEBUG)
# res = get_nearby_venues(HYDEPARK_LATITUDE, HYDEPARK_LONGTITUDE)
# # print(res)
# for i in res:
# print(i)
# print('#'*100)
# res = get_venue_rating('4b4dbf69f964a520b9d626e3')
# print(res)
venue_df_ou = get_nearby_venues(HYDEPARK_LATITUDE, HYDEPARK_LONGTITUDE)
venue_df_ou.to_csv('./datasets/foursquare.csv')
| [
"pandas.DataFrame",
"logging.error",
"logging.info",
"requests.get"
] | [((1380, 1405), 'pandas.DataFrame', 'pd.DataFrame', (['venues_list'], {}), '(venues_list)\n', (1392, 1405), True, 'import pandas as pd\n'), ((2149, 2210), 'logging.error', 'logging.error', (['"""foursquare : get_venue_rating : %s"""', 'res_dict'], {}), "('foursquare : get_venue_rating : %s', res_dict)\n", (2162, 2210), False, 'import logging\n'), ((1737, 1762), 'requests.get', 'requests.get', (['url_details'], {}), '(url_details)\n', (1749, 1762), False, 'import requests\n'), ((2027, 2094), 'logging.info', 'logging.info', (['"""foursquare : get_venue_rating : no rating for venue"""'], {}), "('foursquare : get_venue_rating : no rating for venue')\n", (2039, 2094), False, 'import logging\n'), ((739, 763), 'requests.get', 'requests.get', (['url_search'], {}), '(url_search)\n', (751, 763), False, 'import requests\n'), ((798, 822), 'requests.get', 'requests.get', (['url_search'], {}), '(url_search)\n', (810, 822), False, 'import requests\n')] |
# Generated by Django 2.0.3 on 2018-03-25 20:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mailoutuser',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='emails.MailoutCategory'),
),
]
| [
"django.db.models.ForeignKey"
] | [((364, 460), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""emails.MailoutCategory"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'emails.MailoutCategory')\n", (381, 460), False, 'from django.db import migrations, models\n')] |
from collections import namedtuple
from typing import Dict
import pytest
from pyocle.service.core import BaseForm
FormTestFixture = namedtuple(typename='FormTestFixture', field_names='value,dict')
class DummyBaseForm(BaseForm):
def __init__(self, name: str = None, items: Dict[str, str] = None):
self.name = name
self.items = items
def _dict(self):
return {
'name': self.name,
'items': self.items
}
@pytest.fixture
def dummy_form() -> FormTestFixture:
return FormTestFixture(
value=DummyBaseForm(name='test', items={'test': 'test'}),
dict={
'name': 'test',
'items': {
'test': 'test'
}
}
)
def test_base_form_correctly_resolves_null_values():
actual_form = DummyBaseForm().dict()
expected_form = {}
assert actual_form == expected_form
def test_base_form_correctly_resolves_dict(dummy_form: FormTestFixture):
actual_form = dummy_form.value.dict()
expected_form = dummy_form.dict
assert actual_form == expected_form
| [
"collections.namedtuple"
] | [((135, 199), 'collections.namedtuple', 'namedtuple', ([], {'typename': '"""FormTestFixture"""', 'field_names': '"""value,dict"""'}), "(typename='FormTestFixture', field_names='value,dict')\n", (145, 199), False, 'from collections import namedtuple\n')] |
# -*- coding: utf-8 -*-
import time, random
def qsort(q_list):
if len(q_list) < 2:
return q_list
mid = len(q_list) // 2
mid_value = q_list[mid]
del q_list[mid]
left = []
right = []
for i in q_list:
if i <= mid_value:
left.append(i)
else:
right.append(i)
return qsort(left) + [mid_value] + qsort(right)
if __name__ == "__main__":
alist = []
for i in range(50000):
alist.append(random.randint(1, 100))
start_time = time.time()
new_list = qsort(alist)
end_time = time.time() - start_time
print("cost time: %ss" % (end_time))
| [
"time.time",
"random.randint"
] | [((517, 528), 'time.time', 'time.time', ([], {}), '()\n', (526, 528), False, 'import time, random\n'), ((572, 583), 'time.time', 'time.time', ([], {}), '()\n', (581, 583), False, 'import time, random\n'), ((476, 498), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (490, 498), False, 'import time, random\n')] |
# Copyright (c) 2022 <NAME>
"""
构造top矩阵
日志
2022-01-11
- init
2022-01-20
- 更新:生成普通top时需要剔除ST股
"""
import numpy as np
import pandas as pd
import pickle
# 生成某个指数的top
def generate_index_top(dates: np.array, date_position_dic: dict,
code_order_dic: dict, data_path: str, top_type: str = 'ZZ1000') -> np.array:
"""
:param dates:
:param date_position_dic:
:param code_order_dic:
:param data_path:
:param top_type: top类型
:return:
"""
top = np.zeros((len(dates), len(code_order_dic)))
for date in dates:
with open('{}/StockDailyData/{}/index_dic.pkl'.format(data_path, date), 'rb') as f:
index = pickle.load(f)
for stock in index[top_type]:
top[date_position_dic[date], code_order_dic[stock]] = 1
top = top > 0
return top
def generate_top(dates: np.array, date_position_dic: dict, # 默认得到当天没有停牌且不是创业板和科创板的股票
code_order_dic: dict, data_path: str, top_type: str = 'listed') -> np.array:
"""
:param dates:
:param date_position_dic:
:param code_order_dic:
:param data_path:
:param top_type: top类型
:return:
"""
top = np.zeros((len(dates), len(code_order_dic)))
for date in dates:
stock = pd.read_csv('{}/StockDailyData/{}/stock.csv'.format(data_path, date))
all_securities = pd.read_csv('{}/StockDailyData/{}/all_securities.csv'.format(data_path, date))
if top_type == 'listed': # 当天仍在上市的股票
for i in range(len(stock['code'].values)):
if stock['code'][i][:3] in ['300', '688']:
continue
if stock['paused'][i] == 1:
continue
top[date_position_dic[date], code_order_dic[stock['code'][i]]] = 1
for i in range(len(all_securities['code'].values)):
if 'ST' in all_securities['display_name'][i]:
top[date_position_dic[date], code_order_dic[all_securities['code'][i]]] = 0
top = top > 0
return top
def merge_top(top_list: list, method: str = 'cap') -> np.array: # 多个top融合
"""
:param top_list: top列表
:param method: cap为取交集,cup为取并集
:return:
"""
top = top_list[0]
if method == 'cap':
for t in top_list[1:]:
top = top & t
elif method == 'cup':
for t in top_list[1:]:
top = top | t
else:
raise NotImplementedError
return top | [
"pickle.load"
] | [((676, 690), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (687, 690), False, 'import pickle\n')] |
import airflow
from airflow.operators.bash_operator import BashOperator
from airflow.operators.sensors import ExternalTaskSensor
from airflow.models import DAG
from datetime import datetime, timedelta
args = {
'owner': 'airflow',
"depends_on_past": False,
'start_date': airflow.utils.dates.days_ago(1),
'email': ['<EMAIL>'],
'email_on_failure': True,
'retries': 1,
'retry_delay': timedelta(seconds=3)
}
dag = DAG(
dag_id='ll_test_2',
default_args=args,
schedule_interval='28 8 * * *'
)
########################################################## ODS ##########################################################
# 云仓订单
zqp = BashOperator(
task_id = 'adc',
bash_command = 'ls',
retries=2,
dag = dag) | [
"airflow.models.DAG",
"datetime.timedelta",
"airflow.utils.dates.days_ago",
"airflow.operators.bash_operator.BashOperator"
] | [((439, 513), 'airflow.models.DAG', 'DAG', ([], {'dag_id': '"""ll_test_2"""', 'default_args': 'args', 'schedule_interval': '"""28 8 * * *"""'}), "(dag_id='ll_test_2', default_args=args, schedule_interval='28 8 * * *')\n", (442, 513), False, 'from airflow.models import DAG\n'), ((682, 748), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""adc"""', 'bash_command': '"""ls"""', 'retries': '(2)', 'dag': 'dag'}), "(task_id='adc', bash_command='ls', retries=2, dag=dag)\n", (694, 748), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((283, 314), 'airflow.utils.dates.days_ago', 'airflow.utils.dates.days_ago', (['(1)'], {}), '(1)\n', (311, 314), False, 'import airflow\n'), ((409, 429), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3)'}), '(seconds=3)\n', (418, 429), False, 'from datetime import datetime, timedelta\n')] |
"""Test improved pipeline."""
from textwrap import dedent
from unittest.mock import patch
import pandas as pd
import yaml
from pytest import fixture
from nitinat.pipeline2 import pipeline
READ_CONFIG = "nitinat.pipeline2._read_config"
READER_SHORTENED_LEN = 3
def simple_df():
return pd.DataFrame(
{
"red": [0.1, 0.2, 0.3],
"green": [0.4, 0.5, 0.6],
"blue": [0.7, 0.8, 0.9],
"orange": [1.1, 1.2, 1.3],
"yellow": [1.4, 1.5, 1.6],
"purple": [1.7, 1.8, 1.9],
}
)
def first(df):
"""To demonstrate pipeline operation."""
return df.iloc[[0]]
def head(df, num, debug=False):
"""To demonstrate pipeline operation."""
return df.head(1) if debug else df.head(num)
def tail(df, num, debug=False):
"""To demonstrate pipeline operation."""
return df.tail(1) if debug else df.tail(num)
def reader(debug=False):
"""To demonstrate pipeline operation."""
df = simple_df()
return df.head(READER_SHORTENED_LEN) if debug else df
@fixture
def available():
return {f.__name__: f for f in [first, head, tail, reader]}
def test_pipeline2_empty_returns_nothing(available):
with patch(READ_CONFIG, return_value=[]):
result = pipeline("test.yml", available)
assert result is None
def test_pipeline2_single_stage_no_parameters_no_overall(available):
config = [{"function": "reader"}]
with patch(READ_CONFIG, return_value=config):
expected = simple_df()
result = pipeline("test.yml", available)
assert result.equals(expected)
def test_pipeline2_two_stages_with_parameters_no_overall(available):
config = [{"function": "reader"}, {"function": "head", "num": 2}]
with patch(READ_CONFIG, return_value=config):
result = pipeline("test.yml", available)
assert len(result) == 2
assert result.equals(simple_df().iloc[[0, 1]])
def test_pipeline2_single_stage_with_debugging(available):
config = [{"function": "reader", "debug": True}]
with patch(READ_CONFIG, return_value=config):
result = pipeline("test.yml", available)
assert len(result) == READER_SHORTENED_LEN
def test_pipeline2_single_stage_with_overall_debugging(available):
config = [{"overall": {"debug": True}}, {"function": "reader"}]
with patch(READ_CONFIG, return_value=config):
result = pipeline("test.yml", available)
assert len(result) == READER_SHORTENED_LEN
def test_pipeline2_two_stage_with_overall_debugging(available):
config = [
{"overall": {"debug": True}},
{"function": "reader"},
{"function": "head", "num": len(simple_df())},
]
with patch(READ_CONFIG, return_value=config):
result = pipeline("test.yml", available)
assert len(result) == 1
def test_pipeline2_two_stage_with_yaml_text(available):
config = dedent(
"""\
- overall:
debug: true
- function: reader
- function: head
num: 1000
"""
)
config = yaml.safe_load(config)
with patch(READ_CONFIG, return_value=config):
result = pipeline("test.yml", available)
assert len(result) == 1
| [
"textwrap.dedent",
"nitinat.pipeline2.pipeline",
"yaml.safe_load",
"pandas.DataFrame",
"unittest.mock.patch"
] | [((294, 473), 'pandas.DataFrame', 'pd.DataFrame', (["{'red': [0.1, 0.2, 0.3], 'green': [0.4, 0.5, 0.6], 'blue': [0.7, 0.8, 0.9],\n 'orange': [1.1, 1.2, 1.3], 'yellow': [1.4, 1.5, 1.6], 'purple': [1.7, \n 1.8, 1.9]}"], {}), "({'red': [0.1, 0.2, 0.3], 'green': [0.4, 0.5, 0.6], 'blue': [\n 0.7, 0.8, 0.9], 'orange': [1.1, 1.2, 1.3], 'yellow': [1.4, 1.5, 1.6],\n 'purple': [1.7, 1.8, 1.9]})\n", (306, 473), True, 'import pandas as pd\n'), ((2897, 3020), 'textwrap.dedent', 'dedent', (['""" - overall:\n debug: true\n - function: reader\n - function: head\n num: 1000\n """'], {}), '(\n """ - overall:\n debug: true\n - function: reader\n - function: head\n num: 1000\n """\n )\n', (2903, 3020), False, 'from textwrap import dedent\n'), ((3040, 3062), 'yaml.safe_load', 'yaml.safe_load', (['config'], {}), '(config)\n', (3054, 3062), False, 'import yaml\n'), ((1211, 1246), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': '[]'}), '(READ_CONFIG, return_value=[])\n', (1216, 1246), False, 'from unittest.mock import patch\n'), ((1265, 1296), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (1273, 1296), False, 'from nitinat.pipeline2 import pipeline\n'), ((1445, 1484), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': 'config'}), '(READ_CONFIG, return_value=config)\n', (1450, 1484), False, 'from unittest.mock import patch\n'), ((1534, 1565), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (1542, 1565), False, 'from nitinat.pipeline2 import pipeline\n'), ((1755, 1794), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': 'config'}), '(READ_CONFIG, return_value=config)\n', (1760, 1794), False, 'from unittest.mock import patch\n'), ((1813, 1844), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (1821, 1844), False, 'from nitinat.pipeline2 import pipeline\n'), ((2055, 2094), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': 'config'}), '(READ_CONFIG, return_value=config)\n', (2060, 2094), False, 'from unittest.mock import patch\n'), ((2113, 2144), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (2121, 2144), False, 'from nitinat.pipeline2 import pipeline\n'), ((2342, 2381), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': 'config'}), '(READ_CONFIG, return_value=config)\n', (2347, 2381), False, 'from unittest.mock import patch\n'), ((2400, 2431), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (2408, 2431), False, 'from nitinat.pipeline2 import pipeline\n'), ((2704, 2743), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': 'config'}), '(READ_CONFIG, return_value=config)\n', (2709, 2743), False, 'from unittest.mock import patch\n'), ((2762, 2793), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (2770, 2793), False, 'from nitinat.pipeline2 import pipeline\n'), ((3072, 3111), 'unittest.mock.patch', 'patch', (['READ_CONFIG'], {'return_value': 'config'}), '(READ_CONFIG, return_value=config)\n', (3077, 3111), False, 'from unittest.mock import patch\n'), ((3130, 3161), 'nitinat.pipeline2.pipeline', 'pipeline', (['"""test.yml"""', 'available'], {}), "('test.yml', available)\n", (3138, 3161), False, 'from nitinat.pipeline2 import pipeline\n')] |
"""
MIT License
Copyright(c) 2021 <NAME>
"""
from flask import abort
from . import testing_bp
from flog.utils import redirect_back
@testing_bp.route("/400")
def trigger_bad_request():
abort(400)
@testing_bp.route("/403")
def trigger_forbidden():
abort(403)
@testing_bp.route("/404")
def trigger_not_found():
abort(404)
@testing_bp.route("/405")
def trigger_method_not_allowed():
abort(405)
@testing_bp.route("/413")
def trigger_payload_too_large():
abort(413)
@testing_bp.route("/429")
def trigger_too_many_requests():
abort(429)
@testing_bp.route("/500")
def trigger_server_error():
abort(500)
@testing_bp.route("/redirect")
def trigger_redirect_back():
return redirect_back("main.main", next="example.com")
| [
"flask.abort",
"flog.utils.redirect_back"
] | [((201, 211), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (206, 211), False, 'from flask import abort\n'), ((274, 284), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (279, 284), False, 'from flask import abort\n'), ((347, 357), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (352, 357), False, 'from flask import abort\n'), ((429, 439), 'flask.abort', 'abort', (['(405)'], {}), '(405)\n', (434, 439), False, 'from flask import abort\n'), ((510, 520), 'flask.abort', 'abort', (['(413)'], {}), '(413)\n', (515, 520), False, 'from flask import abort\n'), ((591, 601), 'flask.abort', 'abort', (['(429)'], {}), '(429)\n', (596, 601), False, 'from flask import abort\n'), ((667, 677), 'flask.abort', 'abort', (['(500)'], {}), '(500)\n', (672, 677), False, 'from flask import abort\n'), ((756, 802), 'flog.utils.redirect_back', 'redirect_back', (['"""main.main"""'], {'next': '"""example.com"""'}), "('main.main', next='example.com')\n", (769, 802), False, 'from flog.utils import redirect_back\n')] |
import tensorflow as tf
import sys
from configs import DEFINES
# 엘에스티엠(LSTM) 단층 네트워크 구성하는 부분
def make_lstm_cell(mode, hiddenSize, index):
cell = tf.nn.rnn_cell.BasicLSTMCell(hiddenSize, name = "lstm"+str(index))
if mode == tf.estimator.ModeKeys.TRAIN:
# 트레이닝 모드에서 드랍아웃 추가
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=DEFINES.dropout_width)
return cell
# 에스티메이터 모델
def model(features, labels, mode, params):
TRAIN = mode == tf.estimator.ModeKeys.TRAIN
EVAL = mode == tf.estimator.ModeKeys.EVAL
PREDICT = mode == tf.estimator.ModeKeys.PREDICT
# 인코딩 부분 (미리 정의된 임베딩 벡터 사용 유무)
if params['embedding'] == True:
# 가중치 행렬에 대한 초기화
initializer = tf.contrib.layers.xavier_initializer()
embedding = tf.get_variable(name = "embedding", # 이름
shape=[params['vocabulary_length'], params['embedding_size']], # 모양
dtype=tf.float32, # 타입
initializer=initializer, # 초기화 값
trainable=True) # 학습 유무
else:
# tf.eye를 통해서 사전의 크기 만큼의 단위행렬 구조 선언
embedding = tf.eye(num_rows = params['vocabulary_length'], dtype = tf.float32)
embedding = tf.get_variable(name = "embedding", # 이름
initializer = embedding, # 초기화 값
trainable = False) # 학습 유무
# 임베딩된 인코딩 배치를 생성
embedding_encoder = tf.nn.embedding_lookup(params = embedding, ids = features['input'])
# 임베딩된 디코딩 배치를 생성
embedding_decoder = tf.nn.embedding_lookup(params = embedding, ids = features['output'])
with tf.variable_scope('encoder_scope', reuse=tf.AUTO_REUSE):
# 값이 True이면 멀티레이어로 모델을 구성하고 False이면
# 단일레이어로 모델 구성
if params['multilayer'] == True:
encoder_cell_list = [make_lstm_cell(mode, params['hidden_size'], i) for i in range(params['layer_size'])]
rnn_cell = tf.contrib.rnn.MultiRNNCell(encoder_cell_list)
else:
rnn_cell = make_lstm_cell(mode, params['hidden_size'], "")
# rnn_cell에 의해 지정된 dynamic_rnn으로 반복적인 신경망 생성
# encoder_states 최종 상태 [batch_size, cell.state_size]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell=rnn_cell, # RNN 셀
inputs=embedding_encoder, # 입력 값
dtype=tf.float32) # 타입
with tf.variable_scope('decoder_scope', reuse=tf.AUTO_REUSE):
if params['multilayer'] == True:
decoder_cell_list = [make_lstm_cell(mode, params['hidden_size'], i) for i in range(params['layer_size'])]
rnn_cell = tf.contrib.rnn.MultiRNNCell(decoder_cell_list)
else:
rnn_cell = make_lstm_cell(mode, params['hidden_size'], "")
decoder_initial_state = encoder_states
decoder_outputs, decoder_states = tf.nn.dynamic_rnn(cell=rnn_cell, # RNN 셀
inputs=embedding_decoder, # 입력 값
initial_state=decoder_initial_state, # 인코딩의 마지막 값으로 초기화
dtype=tf.float32) # 타입
# logits는 마지막 히든레이어를 통과한 결과값
logits = tf.layers.dense(decoder_outputs, params['vocabulary_length'], activation=None)
# argmax를 통해서 최대 값을 가져 옴
predict = tf.argmax(logits, 2)
if PREDICT:
predictions = { # 예측 값들이 여기에 딕셔너리 형태로 저장
'indexs': predict, # 시퀀스 마다 예측한 값
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
#
# logits과 같은 차원을 만들어 마지막 결과 값과 정답 값을 비교하여 에러를 구함
labels_ = tf.one_hot(labels, params['vocabulary_length'])
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels_))
# 라벨과 결과가 일치하는지 빈도 계산을 통해 정확도를 측정
accuracy = tf.metrics.accuracy(labels=labels, predictions=predict,name='accOp')
# accuracy를 전체 값으로 나눠 확률 값 계산
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if EVAL:
# 에러 값(loss)과 정확도 값(eval_metric_ops) 전달
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
# 수행 mode(tf.estimator.ModeKeys.TRAIN)가
# 아닌 경우는 여기 까지 오면 안되도록 하는 방어적 코드
assert TRAIN
optimizer = tf.train.AdamOptimizer(learning_rate=DEFINES.learning_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
# 에러 값(loss)과 그라디언트 반환값 (train_op) 전달
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
| [
"tensorflow.one_hot",
"tensorflow.nn.embedding_lookup",
"tensorflow.train.AdamOptimizer",
"tensorflow.eye",
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.get_variable",
"tensorflow.variable_scope",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.estimator.EstimatorSpec",
"te... | [((1510, 1573), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', ([], {'params': 'embedding', 'ids': "features['input']"}), "(params=embedding, ids=features['input'])\n", (1532, 1573), True, 'import tensorflow as tf\n'), ((1625, 1689), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', ([], {'params': 'embedding', 'ids': "features['output']"}), "(params=embedding, ids=features['output'])\n", (1647, 1689), True, 'import tensorflow as tf\n'), ((3275, 3353), 'tensorflow.layers.dense', 'tf.layers.dense', (['decoder_outputs', "params['vocabulary_length']"], {'activation': 'None'}), "(decoder_outputs, params['vocabulary_length'], activation=None)\n", (3290, 3353), True, 'import tensorflow as tf\n'), ((3395, 3415), 'tensorflow.argmax', 'tf.argmax', (['logits', '(2)'], {}), '(logits, 2)\n', (3404, 3415), True, 'import tensorflow as tf\n'), ((3691, 3738), 'tensorflow.one_hot', 'tf.one_hot', (['labels', "params['vocabulary_length']"], {}), "(labels, params['vocabulary_length'])\n", (3701, 3738), True, 'import tensorflow as tf\n'), ((3893, 3962), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'labels', 'predictions': 'predict', 'name': '"""accOp"""'}), "(labels=labels, predictions=predict, name='accOp')\n", (3912, 3962), True, 'import tensorflow as tf\n'), ((4038, 4080), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy[1]'], {}), "('accuracy', accuracy[1])\n", (4055, 4080), True, 'import tensorflow as tf\n'), ((4348, 4407), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'DEFINES.learning_rate'}), '(learning_rate=DEFINES.learning_rate)\n', (4370, 4407), True, 'import tensorflow as tf\n'), ((4544, 4606), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['mode'], {'loss': 'loss', 'train_op': 'train_op'}), '(mode, loss=loss, train_op=train_op)\n', (4570, 4606), True, 'import tensorflow as tf\n'), ((305, 380), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['cell'], {'output_keep_prob': 'DEFINES.dropout_width'}), '(cell, output_keep_prob=DEFINES.dropout_width)\n', (334, 380), True, 'import tensorflow as tf\n'), ((717, 755), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (753, 755), True, 'import tensorflow as tf\n'), ((776, 939), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""embedding"""', 'shape': "[params['vocabulary_length'], params['embedding_size']]", 'dtype': 'tf.float32', 'initializer': 'initializer', 'trainable': '(True)'}), "(name='embedding', shape=[params['vocabulary_length'],\n params['embedding_size']], dtype=tf.float32, initializer=initializer,\n trainable=True)\n", (791, 939), True, 'import tensorflow as tf\n'), ((1187, 1249), 'tensorflow.eye', 'tf.eye', ([], {'num_rows': "params['vocabulary_length']", 'dtype': 'tf.float32'}), "(num_rows=params['vocabulary_length'], dtype=tf.float32)\n", (1193, 1249), True, 'import tensorflow as tf\n'), ((1274, 1347), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""embedding"""', 'initializer': 'embedding', 'trainable': '(False)'}), "(name='embedding', initializer=embedding, trainable=False)\n", (1289, 1347), True, 'import tensorflow as tf\n'), ((1704, 1759), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder_scope"""'], {'reuse': 'tf.AUTO_REUSE'}), "('encoder_scope', reuse=tf.AUTO_REUSE)\n", (1721, 1759), True, 'import tensorflow as tf\n'), ((2309, 2385), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'rnn_cell', 'inputs': 'embedding_encoder', 'dtype': 'tf.float32'}), '(cell=rnn_cell, inputs=embedding_encoder, dtype=tf.float32)\n', (2326, 2385), True, 'import tensorflow as tf\n'), ((2544, 2599), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_scope"""'], {'reuse': 'tf.AUTO_REUSE'}), "('decoder_scope', reuse=tf.AUTO_REUSE)\n", (2561, 2599), True, 'import tensorflow as tf\n'), ((3005, 3123), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'rnn_cell', 'inputs': 'embedding_decoder', 'initial_state': 'decoder_initial_state', 'dtype': 'tf.float32'}), '(cell=rnn_cell, inputs=embedding_decoder, initial_state=\n decoder_initial_state, dtype=tf.float32)\n', (3022, 3123), True, 'import tensorflow as tf\n'), ((3553, 3610), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['mode'], {'predictions': 'predictions'}), '(mode, predictions=predictions)\n', (3579, 3610), True, 'import tensorflow as tf\n'), ((3765, 3838), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'labels_'}), '(logits=logits, labels=labels_)\n', (3807, 3838), True, 'import tensorflow as tf\n'), ((4162, 4230), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['mode'], {'loss': 'loss', 'eval_metric_ops': 'metrics'}), '(mode, loss=loss, eval_metric_ops=metrics)\n', (4188, 4230), True, 'import tensorflow as tf\n'), ((2011, 2057), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['encoder_cell_list'], {}), '(encoder_cell_list)\n', (2038, 2057), True, 'import tensorflow as tf\n'), ((2783, 2829), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['decoder_cell_list'], {}), '(decoder_cell_list)\n', (2810, 2829), True, 'import tensorflow as tf\n'), ((4460, 4486), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (4484, 4486), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python3
import os
import random
import string
from nephele2.pipelines.pipebase import PipeBase
import nephele2.pipelines.pipeline_error
class picrust(PipeBase):
"""
PICRUSt (pronounced pie crust) is a bioinformatics software package designed
to predict metagenome functional content from marker gene (e.g., 16S rRNA)
surveys and full genomes.
"""
gg99_db = "/mnt/EFS/dbs/Greengenes_99/99_otus.fasta"
otus_dir = "otus_picrust" # picrust otu output directory
data_dir = "PICRUSt_data" # picrust output directory
l2_param = [
"summarize_taxa:md_identifier\t\"KEGG_Pathways\"",
"summarize_taxa:absolute_abundance\tTrue",
"summarize_taxa:level\t2"]
l3_param = [
"summarize_taxa:md_identifier\t\"KEGG_Pathways\"",
"summarize_taxa:absolute_abundance\tTrue",
"summarize_taxa:level\t3"]
gg_param = [
"pick_otus:enable_rev_strand_match\tTrue",
"pick_otus:otu_picking_method\tsortmerna",
"pick_otus:sortmerna_db\t/mnt/EFS/dbs/Greengenes_99/99_otus",
"pick_otus:refseqs_fp\t/mnt/EFS/dbs/Greengenes_99/99_otus.fasta",
"alpha_diversity:metrics\tobserved_species,chao1,PD_whole_tree,shannon",
"make_distance_boxplots:num_permutations\t0",
"summarize_taxa:level\t2,3,4,5,6,7",
"summarize_taxa:absolute_abundance\tTrue",
"make_otu_table:taxonomy\t/mnt/EFS/dbs/Greengenes_99/99_otu_taxonomy.txt",
"assign_taxonomy:id_to_taxonomy_fp\t/mnt/EFS/dbs/Greengenes_99/99_otu_taxonomy.txt",
"assign_taxonomy:reference_seqs_fp\t/mnt/EFS/dbs/Greengenes_99/99_otus.fasta",
"assign_taxonomy:assignment_method\tsortmerna",
"assign_taxonomy:sortmerna_db\t/mnt/EFS/dbs/Greengenes_99/99_otus"]
def __init__(self, log_info, in_d = "inputs", out_d = "outputs"):
self.log_info = log_info
self.in_dir = in_d
self.out_dir = out_d
self.cmds = []
def param_file(self, p):
pf = os.path.join(self.out_dir, "%s.txt" % "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)))
with open(pf, "w") as f:
f.write("\n".join(p + [""]))
return(pf)
def get_cmds(self):
return(self.cmds)
def get_output(self):
self.output.update(self.scan_dir(os.join.path(self.out_dir, picrust.otus_dir)))
return(self.output)
def run(self, fasta):
"""
pick_closed_reference_otus.py
-i split_lib_out/seqs.fna
--output_dir=otus_picrust
--reference_fp=Greengenes_99/99_otus.fasta
--taxonomy_fp=Greengenes_99/99_otu_taxonomy.txt
--parameter_fp=picrust_params.txt
--force
normalize_by_copy_number.py
-i otus_picrust/otu_table.biom
-o PICRUSt_data/normalized_otus.biom
predict_metagenomes.py
-i PICRUSt_data/normalized_otus.biom
-o PICRUSt_data/metagenome_predictions.biom
categorize_by_function.py
-i PICRUSt_data/metagenome_predictions.biom
-c "KEGG_Pathways"
-l 2
-o PICRUSt_data/predicted_metagenomes.L2.biom
categorize_by_function.py
-i PICRUSt_data/metagenome_predictions.biom
-c "KEGG_Pathways"
-l 3
-o PICRUSt_data/predicted_metagenomes.L3.biom
summarize_taxa_through_plots.py
-i PICRUSt_data/predicted_metagenomes.L2.biom
-o PICRUSt_data/picrust_at_lvl2
-p qiime_params_picrust2.txt
"""
# run closed reference otu picking with greengenes 99 database
cmd = "pick_closed_reference_otus.py -i \"%s\" -o \"%s\" -r \"%s\" -p \"%s\" -f" % (
fasta, os.path.join(self.out_dir, picrust.otus_dir),
picrust.gg99_db, self.param_file(picrust.gg_param))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
# normalize an OTU table by marker gene copy number
cmd = "normalize_by_copy_number.py -i \"%s\" -o \"%s\"" % (
os.path.join(self.out_dir, picrust.otus_dir, "otu_table.biom"),
os.path.join(self.out_dir, picrust.data_dir, "normalized_otus.biom"))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
# produces the actual metagenome functional predictions for a given OTU table
cmd = "predict_metagenomes.py -i \"%s\" -o \"%s\"" % (
os.path.join(self.out_dir, picrust.data_dir, "normalized_otus.biom"),
os.path.join(self.out_dir, picrust.data_dir, "metagenome_predictions.biom"))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
# collapse table data to a specified level in a hierarchy
cmd = "categorize_by_function.py -i \"%s\" -c \"KEGG_Pathways\" -l 2 -o \"%s\"" % (
os.path.join(self.out_dir, picrust.data_dir, "metagenome_predictions.biom"),
os.path.join(self.out_dir, picrust.data_dir, "predicted_metagenomes.L2.biom"))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
cmd = "categorize_by_function.py -i \"%s\" -c \"KEGG_Pathways\" -l 3 -o \"%s\"" % (
os.path.join(self.out_dir, picrust.data_dir, "metagenome_predictions.biom"),
os.path.join(self.out_dir, picrust.data_dir, "predicted_metagenomes.L3.biom"))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
# summarize taxonomy and generate plots
cmd = "summarize_taxa_through_plots.py -i \"%s\" -o \"%s\" -p \"%s\"" % (
os.path.join(self.out_dir, picrust.data_dir, "predicted_metagenomes.L2.biom"),
os.path.join(self.out_dir, picrust.data_dir, "picrust_at_lvl2"),
self.param_file(picrust.l2_param))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
cmd = "summarize_taxa_through_plots.py -i \"%s\" -o \"%s\" -p \"%s\"" % (
os.path.join(self.out_dir, picrust.data_dir, "predicted_metagenomes.L3.biom"),
os.path.join(self.out_dir, picrust.data_dir, "picrust_at_lvl3"),
self.param_file(picrust.l3_param))
self.cmds.append(cmd)
self.log_info(cmd)
self.exec_cmnd(cmd)
return(True)
| [
"os.path.join",
"random.choice",
"os.join.path"
] | [((2339, 2383), 'os.join.path', 'os.join.path', (['self.out_dir', 'picrust.otus_dir'], {}), '(self.out_dir, picrust.otus_dir)\n', (2351, 2383), False, 'import os\n'), ((3787, 3831), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.otus_dir'], {}), '(self.out_dir, picrust.otus_dir)\n', (3799, 3831), False, 'import os\n'), ((4123, 4185), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.otus_dir', '"""otu_table.biom"""'], {}), "(self.out_dir, picrust.otus_dir, 'otu_table.biom')\n", (4135, 4185), False, 'import os\n'), ((4199, 4267), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""normalized_otus.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'normalized_otus.biom')\n", (4211, 4267), False, 'import os\n'), ((4516, 4584), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""normalized_otus.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'normalized_otus.biom')\n", (4528, 4584), False, 'import os\n'), ((4598, 4673), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""metagenome_predictions.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'metagenome_predictions.biom')\n", (4610, 4673), False, 'import os\n'), ((4931, 5006), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""metagenome_predictions.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'metagenome_predictions.biom')\n", (4943, 5006), False, 'import os\n'), ((5020, 5097), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""predicted_metagenomes.L2.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'predicted_metagenomes.L2.biom')\n", (5032, 5097), False, 'import os\n'), ((5289, 5364), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""metagenome_predictions.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'metagenome_predictions.biom')\n", (5301, 5364), False, 'import os\n'), ((5378, 5455), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""predicted_metagenomes.L3.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'predicted_metagenomes.L3.biom')\n", (5390, 5455), False, 'import os\n'), ((5685, 5762), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""predicted_metagenomes.L2.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'predicted_metagenomes.L2.biom')\n", (5697, 5762), False, 'import os\n'), ((5776, 5839), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""picrust_at_lvl2"""'], {}), "(self.out_dir, picrust.data_dir, 'picrust_at_lvl2')\n", (5788, 5839), False, 'import os\n'), ((6068, 6145), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""predicted_metagenomes.L3.biom"""'], {}), "(self.out_dir, picrust.data_dir, 'predicted_metagenomes.L3.biom')\n", (6080, 6145), False, 'import os\n'), ((6159, 6222), 'os.path.join', 'os.path.join', (['self.out_dir', 'picrust.data_dir', '"""picrust_at_lvl3"""'], {}), "(self.out_dir, picrust.data_dir, 'picrust_at_lvl3')\n", (6171, 6222), False, 'import os\n'), ((2053, 2106), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (2066, 2106), False, 'import random\n')] |
import towers_of_hanoi as towers
import pytest
def test_tower_exception_type():
"""Test TypeError in tower function."""
with pytest.raises(TypeError):
towers.towers_of_hanoi('n')
def test_tower_return_three():
"""Test tower of hanoi function returns correct last move with three."""
temp = towers.towers_of_hanoi(3)
assert temp == ['move disk from A to C']
def test_tower_return_two():
"""Test tower of hanoi function returns correct last move with two."""
temp = towers.towers_of_hanoi(2)
assert temp == ['move disk from A to C']
def test_tower_with_zero():
temp = towers.towers_of_hanoi(1)
assert temp == ['move disk from A to C']
| [
"pytest.raises",
"towers_of_hanoi.towers_of_hanoi"
] | [((318, 343), 'towers_of_hanoi.towers_of_hanoi', 'towers.towers_of_hanoi', (['(3)'], {}), '(3)\n', (340, 343), True, 'import towers_of_hanoi as towers\n'), ((506, 531), 'towers_of_hanoi.towers_of_hanoi', 'towers.towers_of_hanoi', (['(2)'], {}), '(2)\n', (528, 531), True, 'import towers_of_hanoi as towers\n'), ((618, 643), 'towers_of_hanoi.towers_of_hanoi', 'towers.towers_of_hanoi', (['(1)'], {}), '(1)\n', (640, 643), True, 'import towers_of_hanoi as towers\n'), ((135, 159), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (148, 159), False, 'import pytest\n'), ((169, 196), 'towers_of_hanoi.towers_of_hanoi', 'towers.towers_of_hanoi', (['"""n"""'], {}), "('n')\n", (191, 196), True, 'import towers_of_hanoi as towers\n')] |
from bagua.torch_api.contrib.cached_dataset import CachedDataset
from torch.utils.data.dataset import Dataset
import numpy as np
import logging
import unittest
from tests import skip_if_cuda_available
logging.basicConfig(level=logging.DEBUG)
class MyDataset(Dataset):
def __init__(self, size):
self.size = size
self.dataset = [(np.random.rand(5, 2), np.random.rand(1)) for _ in range(size)]
def __getitem__(self, item):
return self.dataset[item]
def __len__(self):
return self.size
class TestCacheDataset(unittest.TestCase):
def check_dataset(self, dataset, cache_dataset):
for _ in range(10):
for _, _ in enumerate(cache_dataset):
pass
for i in range(len(dataset)):
self.assertTrue((dataset[i][0] == cache_dataset[i][0]).all())
self.assertTrue((dataset[i][1] == cache_dataset[i][1]).all())
@skip_if_cuda_available()
def test_redis(self):
dataset1 = MyDataset(102)
dataset2 = MyDataset(102)
cache_dataset1 = CachedDataset(
dataset1,
backend="redis",
dataset_name="d1",
)
cache_dataset2 = CachedDataset(
dataset2,
backend="redis",
dataset_name="d2",
)
cache_dataset1.cache_loader.store.clear()
self.check_dataset(dataset1, cache_dataset1)
self.assertEqual(cache_dataset1.cache_loader.num_keys(), len(dataset1))
self.check_dataset(dataset2, cache_dataset2)
self.assertEqual(
cache_dataset2.cache_loader.num_keys(), len(dataset1) + len(dataset2)
)
if __name__ == "__main__":
unittest.main()
| [
"logging.basicConfig",
"numpy.random.rand",
"bagua.torch_api.contrib.cached_dataset.CachedDataset",
"tests.skip_if_cuda_available",
"unittest.main"
] | [((202, 242), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (221, 242), False, 'import logging\n'), ((921, 945), 'tests.skip_if_cuda_available', 'skip_if_cuda_available', ([], {}), '()\n', (943, 945), False, 'from tests import skip_if_cuda_available\n'), ((1694, 1709), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1707, 1709), False, 'import unittest\n'), ((1065, 1124), 'bagua.torch_api.contrib.cached_dataset.CachedDataset', 'CachedDataset', (['dataset1'], {'backend': '"""redis"""', 'dataset_name': '"""d1"""'}), "(dataset1, backend='redis', dataset_name='d1')\n", (1078, 1124), False, 'from bagua.torch_api.contrib.cached_dataset import CachedDataset\n'), ((1197, 1256), 'bagua.torch_api.contrib.cached_dataset.CachedDataset', 'CachedDataset', (['dataset2'], {'backend': '"""redis"""', 'dataset_name': '"""d2"""'}), "(dataset2, backend='redis', dataset_name='d2')\n", (1210, 1256), False, 'from bagua.torch_api.contrib.cached_dataset import CachedDataset\n'), ((351, 371), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (365, 371), True, 'import numpy as np\n'), ((373, 390), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (387, 390), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import formats
from django.contrib.auth.models import User
from jsonfield import JSONField
from mbase.models import MetaBaseModel, MetaBaseStatusModel
from mcat.models import Product
from mcat_order.conf import ORDER_STATUSES, CIVILITIES
class Customer(MetaBaseModel, MetaBaseStatusModel):
first_name = models.CharField(max_length=120, verbose_name=_(u'First name'))
last_name = models.CharField(max_length=120, verbose_name=_(u'Last name'))
civility = models.CharField(max_length=60, verbose_name=_(u'Title'), choices=CIVILITIES, default=CIVILITIES[0][0])
telephone = models.PositiveIntegerField(verbose_name=_(u'Phone number'))
company_name = models.CharField(max_length=120, blank=True, verbose_name=_(u'Company name'))
email = models.EmailField(verbose_name=_(u'Email'))
address = models.TextField(verbose_name=_(u'Address'))
user = models.OneToOneField(User, verbose_name=_(u'User') )
extra = JSONField(blank=True, verbose_name=_(u'Extra infos'))
class Meta:
verbose_name=_(u'Customer')
verbose_name_plural = _(u'Customers')
ordering = ('last_name',)
unique_together = ('first_name', 'last_name')
def __unicode__(self):
return unicode(self.first_name+' '+self.last_name)
@property
def telephone_formated(self):
return '%s %s %s %s' %(self.telephone[0:2],self.telephone[2:4],self.telephone[4:6],self.telephone[6:8])
def get_civility(self):
for civ in CIVILITIES:
if civ[0] == self.civility:
return civ[1]
return self.civility
class Order(MetaBaseModel):
customer = models.ForeignKey(Customer, related_name='orders', verbose_name=_(u'Customer'))
status = models.CharField(max_length=120, verbose_name=_(u'Status'), choices=ORDER_STATUSES, default=ORDER_STATUSES[0][0])
total = models.FloatField(null=True, blank=True, verbose_name=_(u'Total'))
class Meta:
verbose_name=_(u'Order')
verbose_name_plural = _(u'Orders')
ordering = ('-created',)
def __unicode__(self):
date = formats.date_format(self.created, "SHORT_DATETIME_FORMAT")
return unicode(date+' - '+str(self.total)+' - '+self.status)
class OrderedProduct(MetaBaseModel):
product = models.ForeignKey(Product, related_name='ordered', verbose_name=_(u'Product'))
order = models.ForeignKey(Order, related_name='+', verbose_name=_(u'Order'))
quantity = models.PositiveIntegerField(verbose_name=_(u'Quantity'))
price_per_unit = models.FloatField(verbose_name=_(u'Price per unit'))
class Meta:
verbose_name=_(u'Ordered product')
verbose_name_plural = _(u'Ordered products')
ordering = ('-created', 'order')
def __unicode__(self):
date = formats.date_format(self.created, "SHORT_DATETIME_FORMAT")
return unicode(date)
| [
"django.utils.translation.ugettext_lazy",
"django.utils.formats.date_format"
] | [((1160, 1174), 'django.utils.translation.ugettext_lazy', '_', (['u"""Customer"""'], {}), "(u'Customer')\n", (1161, 1174), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1205, 1220), 'django.utils.translation.ugettext_lazy', '_', (['u"""Customers"""'], {}), "(u'Customers')\n", (1206, 1220), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2093, 2104), 'django.utils.translation.ugettext_lazy', '_', (['u"""Order"""'], {}), "(u'Order')\n", (2094, 2104), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2135, 2147), 'django.utils.translation.ugettext_lazy', '_', (['u"""Orders"""'], {}), "(u'Orders')\n", (2136, 2147), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2224, 2282), 'django.utils.formats.date_format', 'formats.date_format', (['self.created', '"""SHORT_DATETIME_FORMAT"""'], {}), "(self.created, 'SHORT_DATETIME_FORMAT')\n", (2243, 2282), False, 'from django.utils import formats\n'), ((2757, 2778), 'django.utils.translation.ugettext_lazy', '_', (['u"""Ordered product"""'], {}), "(u'Ordered product')\n", (2758, 2778), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2809, 2831), 'django.utils.translation.ugettext_lazy', '_', (['u"""Ordered products"""'], {}), "(u'Ordered products')\n", (2810, 2831), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2916, 2974), 'django.utils.formats.date_format', 'formats.date_format', (['self.created', '"""SHORT_DATETIME_FORMAT"""'], {}), "(self.created, 'SHORT_DATETIME_FORMAT')\n", (2935, 2974), False, 'from django.utils import formats\n'), ((483, 499), 'django.utils.translation.ugettext_lazy', '_', (['u"""First name"""'], {}), "(u'First name')\n", (484, 499), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((563, 578), 'django.utils.translation.ugettext_lazy', '_', (['u"""Last name"""'], {}), "(u'Last name')\n", (564, 578), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((640, 651), 'django.utils.translation.ugettext_lazy', '_', (['u"""Title"""'], {}), "(u'Title')\n", (641, 651), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((756, 774), 'django.utils.translation.ugettext_lazy', '_', (['u"""Phone number"""'], {}), "(u'Phone number')\n", (757, 774), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((853, 871), 'django.utils.translation.ugettext_lazy', '_', (['u"""Company name"""'], {}), "(u'Company name')\n", (854, 871), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((916, 927), 'django.utils.translation.ugettext_lazy', '_', (['u"""Email"""'], {}), "(u'Email')\n", (917, 927), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((973, 986), 'django.utils.translation.ugettext_lazy', '_', (['u"""Address"""'], {}), "(u'Address')\n", (974, 986), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1039, 1049), 'django.utils.translation.ugettext_lazy', '_', (['u"""User"""'], {}), "(u'User')\n", (1040, 1049), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1099, 1116), 'django.utils.translation.ugettext_lazy', '_', (['u"""Extra infos"""'], {}), "(u'Extra infos')\n", (1100, 1116), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1833, 1847), 'django.utils.translation.ugettext_lazy', '_', (['u"""Customer"""'], {}), "(u'Customer')\n", (1834, 1847), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1908, 1920), 'django.utils.translation.ugettext_lazy', '_', (['u"""Status"""'], {}), "(u'Status')\n", (1909, 1920), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2042, 2053), 'django.utils.translation.ugettext_lazy', '_', (['u"""Total"""'], {}), "(u'Total')\n", (2043, 2053), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2473, 2486), 'django.utils.translation.ugettext_lazy', '_', (['u"""Product"""'], {}), "(u'Product')\n", (2474, 2486), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2556, 2567), 'django.utils.translation.ugettext_lazy', '_', (['u"""Order"""'], {}), "(u'Order')\n", (2557, 2567), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2625, 2639), 'django.utils.translation.ugettext_lazy', '_', (['u"""Quantity"""'], {}), "(u'Quantity')\n", (2626, 2639), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2693, 2713), 'django.utils.translation.ugettext_lazy', '_', (['u"""Price per unit"""'], {}), "(u'Price per unit')\n", (2694, 2713), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import argparse
import math
import sys
import timeit
def load_file(fname: str):
with open(fname, 'r') as file:
code = file.read()
return code
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='time fibonacci implementations')
parser.add_argument('-n', '--nth', type=int, help='the N-th term of the fibonacci sequence')
parser.add_argument('-r', '--runs', type= int, help='the number of times fib(N) is calculated')
args = parser.parse_args()
N = 100000 if (args.nth is None) else args.nth
nruns = 1000 if (args.runs is None) else args.runs
if(nruns <= 0 or N < 0):
print('invalid parameter for N or number of runs')
sys.exit()
if (N > 10000000):
sys.setrecursionlimit(math.floor(math.log(N, 2)) + 100)
print(f"adjusting recursion limit to: {sys.getrecursionlimit()}")
print(f"Calculating speed for fib({N}) - {nruns} times")
func_calls = ['fib_iter({})', 'fib_fd_iter({})', 'fib_fd_tr({})']
setup = load_file('fibonacci.py')
for fc in func_calls:
temp_stmt = fc.format(N)
t = timeit.Timer(stmt=temp_stmt ,setup=setup)
print(f"{temp_stmt} => {t.timeit(nruns)}")
| [
"timeit.Timer",
"argparse.ArgumentParser",
"math.log",
"sys.getrecursionlimit",
"sys.exit"
] | [((200, 269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""time fibonacci implementations"""'}), "(description='time fibonacci implementations')\n", (223, 269), False, 'import argparse\n'), ((701, 711), 'sys.exit', 'sys.exit', ([], {}), '()\n', (709, 711), False, 'import sys\n'), ((1115, 1156), 'timeit.Timer', 'timeit.Timer', ([], {'stmt': 'temp_stmt', 'setup': 'setup'}), '(stmt=temp_stmt, setup=setup)\n', (1127, 1156), False, 'import timeit\n'), ((777, 791), 'math.log', 'math.log', (['N', '(2)'], {}), '(N, 2)\n', (785, 791), False, 'import math\n'), ((847, 870), 'sys.getrecursionlimit', 'sys.getrecursionlimit', ([], {}), '()\n', (868, 870), False, 'import sys\n')] |
# Copyright (c) 2020, <NAME>.
# All rights reserved. Distributed under the BSD License.
import csv
import gzip
import json
import logging
import os.path
import time
from enum import Enum
from typing import Optional, Generator, Dict, Callable, Tuple, Set, Any
import requests
_MEGABYTE = 1048576
#: Logger for all output of the pimdb module.
log = logging.getLogger("pimdb")
class PimdbError(Exception):
"""Error representing that something went wrong during an pimdb operation."""
pass
class PimdbTsvError(Exception):
def __init__(self, path: str, row_number: int, base_message: str):
self.path = path
self.row_number = row_number
self.message = f"{os.path.basename(self.path)} ({row_number}: {base_message}"
class ImdbDataset(Enum):
"""Names of all IMDb datasets available."""
NAME_BASICS = "name.basics"
TITLE_AKAS = "title.akas"
TITLE_BASICS = "title.basics"
TITLE_CREW = "title.crew"
TITLE_EPISODE = "title.episode"
TITLE_PRINCIPALS = "title.principals"
TITLE_RATINGS = "title.ratings"
@property
def tsv_filename(self):
"""
The uncompressed file name mostly used for testing, for example:
>>> ImdbDataset("name.basics").tsv_filename
'name.basics.tsv'
"""
return f"{self.value}.tsv"
@property
def filename(self):
"""
The compressed file name for the URL, for example:
>>> ImdbDataset("name.basics").filename
'name.basics.tsv.gz'
"""
return f"{self.value}.tsv.gz"
@property
def table_name(self):
"""
Name for use in SQL tables, for example:
>>> ImdbDataset("name.basics").table_name
'NameBasics'
"""
return camelized_dot_name(self.value)
class NormalizedTableKey(Enum):
CHARACTER = "character"
TEMP_CHARACTERS_TO_CHARACTER = "temp_characters_to_character"
EPISODE = "episode"
GENRE = "genre"
NAME = "name"
NAME_TO_KNOWN_FOR_TITLE = "name_to_known_for_title"
PARTICIPATION = "participation"
PARTICIPATION_TO_CHARACTER = "participation_to_character"
PROFESSION = "profession" # Extracted from title.principal.category
TITLE = "title"
TITLE_ALIAS = "title_alias"
TITLE_ALIAS_TO_TITLE_ALIAS_TYPE = "title_alias_to_title_alias_type"
TITLE_ALIAS_TYPE = "title_alias_type"
TITLE_TO_GENRE = "title_to_genre"
TITLE_TYPE = "title_type"
#: Names of all available IMDb datasets.
IMDB_DATASET_NAMES = [dataset.value for dataset in ImdbDataset]
#: Names of datasets required to build normalized tables.
IMDB_DATASET_NAMES_FOR_NORMALIZED_TABLES = list(set(IMDB_DATASET_NAMES).difference([ImdbDataset.TITLE_CREW.name]))
IMDB_DATASET_TO_KEY_COLUMNS_MAP = {
ImdbDataset.NAME_BASICS: ["nconst"],
ImdbDataset.TITLE_AKAS: ["titleId", "ordering"],
ImdbDataset.TITLE_BASICS: ["tconst"],
ImdbDataset.TITLE_EPISODE: ["tconst"],
ImdbDataset.TITLE_CREW: ["tconst"],
ImdbDataset.TITLE_PRINCIPALS: ["nconst", "tconst"],
ImdbDataset.TITLE_RATINGS: ["tconst"],
}
assert len(IMDB_DATASET_NAMES) == len(IMDB_DATASET_TO_KEY_COLUMNS_MAP)
_DOWNLOAD_BUFFER_SIZE = 8192
class Settings:
def __init__(self, data_folder: Optional[str] = None):
self._data_folder = data_folder if data_folder is not None else ".pimdb"
def pimdb_path(self, relative_path: str) -> str:
"""Path to a file or folder inside the pimdb data folder."""
return os.path.join(self._data_folder, relative_path)
class LastModifiedMap:
def __init__(self, last_modified_map_path: str):
self._last_modified_map_path = last_modified_map_path
self._url_to_last_modified_map = {}
try:
log.debug('reading "last modified" map from "%s"', self._last_modified_map_path)
with open(self._last_modified_map_path, encoding="utf-8") as last_modified_file:
self._url_to_last_modified_map = json.load(last_modified_file)
except FileNotFoundError:
# If we never cached anything before, just move on.
log.debug('cannot find last modified map "%s", enforcing downloads', self._last_modified_map_path)
pass
except Exception as error:
log.warning(
'cannot process last modified map "%s", enforcing downloads: %s', self._last_modified_map_path, error
)
def is_modified(self, url: str, current_last_modified: str) -> bool:
previous_last_modified = self._url_to_last_modified_map.get(url)
log.debug(
'checking last modified: previous=%r, current=%r, url="%s"',
previous_last_modified,
current_last_modified,
url,
)
return current_last_modified != previous_last_modified
def update(self, url: str, last_modified: str) -> None:
self._url_to_last_modified_map[url] = last_modified
def write(self) -> None:
with open(self._last_modified_map_path, "w", encoding="utf-8") as last_modified_file:
json.dump(self._url_to_last_modified_map, last_modified_file)
def download_imdb_dataset(imdb_dataset: ImdbDataset, target_path: str, only_if_newer: bool = True) -> None:
source_url = f"https://datasets.imdbws.com/{imdb_dataset.filename}"
last_modified_storage_path = os.path.join(os.path.dirname(target_path), ".pimdb_last_modified.json")
last_modified_map = LastModifiedMap(last_modified_storage_path) if only_if_newer else None
with requests.get(source_url, stream=True) as response:
response.raise_for_status()
if only_if_newer:
current_last_modified = response.headers.get("last-modified")
has_to_be_downloaded = last_modified_map.is_modified(source_url, current_last_modified)
else:
has_to_be_downloaded = True
if has_to_be_downloaded:
megabyte_to_download = int(response.headers.get("content-length", "0")) / _MEGABYTE
length_text = f"{megabyte_to_download:.1f} MB " if megabyte_to_download > 0 else ""
log.info('downloading %sfrom "%s" to "%s"', length_text, source_url, target_path)
with open(target_path, "wb") as target_file:
for chunk in response.iter_content(chunk_size=_DOWNLOAD_BUFFER_SIZE):
if chunk: # filter out keep-alive new chunks
target_file.write(chunk)
if only_if_newer:
last_modified_map.update(source_url, current_last_modified)
last_modified_map.write()
else:
log.info('dataset "%s" is up to date, skipping download of "%s"', imdb_dataset.value, source_url)
class GzippedTsvReader:
def __init__(
self,
gzipped_tsv_path: str,
key_columns: Tuple[str],
indicate_progress: Optional[Callable[[int, int], None]] = None,
seconds_between_progress_update: float = 3.0,
filtered_name_to_values_map: Optional[Dict[str, Set[str]]] = None,
):
self._gzipped_tsv_path = gzipped_tsv_path
self._row_number = None
self._key_columns = key_columns
self._duplicate_count = None
self._indicate_progress = indicate_progress
self._seconds_between_progress_update = seconds_between_progress_update
self._filtered_name_to_values_map = filtered_name_to_values_map
@property
def gzipped_tsv_path(self) -> str:
return self._gzipped_tsv_path
@property
def row_number(self) -> int:
assert self._row_number is not None
return self._row_number
@property
def location(self) -> str:
row_number_text = f" ({self.row_number})" if self.row_number is not None else ""
return f"{os.path.basename(self.gzipped_tsv_path)}{row_number_text}"
@property
def duplicate_count(self) -> int:
return self._duplicate_count
def column_names_to_value_maps(self) -> Generator[Dict[str, str], None, None]:
log.info(' reading IMDb dataset file "%s"', self.gzipped_tsv_path)
with gzip.open(self.gzipped_tsv_path, "rt", encoding="utf-8", newline="") as tsv_file:
last_progress_time = time.time()
last_progress_row_number = None
existing_keys = set()
self._duplicate_count = 0
self._row_number = 0
tsv_reader = csv.DictReader(tsv_file, delimiter="\t", quoting=csv.QUOTE_NONE, strict=True)
try:
for result in tsv_reader:
self._row_number += 1
try:
key = tuple(result[key_column] for key_column in self._key_columns)
except KeyError as error:
raise PimdbTsvError(
self.gzipped_tsv_path,
self.row_number,
f'cannot find key "{error}" for key columns {self._key_columns}: row_map={result}',
) from error
if key not in existing_keys:
existing_keys.add(key)
try:
is_filter_match = self._filtered_name_to_values_map is None or all(
result[name_to_filter] in values_to_filter
for name_to_filter, values_to_filter in self._filtered_name_to_values_map.items()
)
except KeyError as error:
raise PimdbTsvError(
self.gzipped_tsv_path,
self.row_number,
f"cannot evaluate filter: key_columns={self._key_columns}, "
f"filtered_name_to_values_map={self._filtered_name_to_values_map}",
) from error
if is_filter_match:
yield result
else:
log.debug("%s: ignoring duplicate %s=%s", self.location, self._key_columns, key)
self._duplicate_count += 1
if self._indicate_progress is not None:
current_time = time.time()
if current_time - last_progress_time > self._seconds_between_progress_update:
self._indicate_progress(self.row_number, self.duplicate_count)
last_progress_time = current_time
if self._duplicate_count != last_progress_row_number and self._indicate_progress is not None:
self._indicate_progress(self.row_number, self.duplicate_count)
except csv.Error as error:
raise PimdbTsvError(self.gzipped_tsv_path, self.row_number, str(error)) from error
class TsvDictWriter:
def __init__(self, target_file):
self._target_file = target_file
self._line_number = None
self._column_names = None
@property
def line_number(self) -> int:
assert self._line_number is not None
return self._line_number
def write(self, name_to_value_map: Dict[str, Any]):
if self._column_names is None:
self._column_names = list(name_to_value_map.keys())
self._line_number = 1
heading = "\t".join(self._column_names) + "\n"
self._target_file.write(heading)
self._line_number += 1
try:
self._target_file.write(
"\t".join(name_to_value_map[column_name] for column_name in self._column_names) + "\n"
)
except Exception as error:
raise PimdbTsvError(
self._target_file,
self.line_number,
f"cannot write TSV row: {error}; name_to_value_map={name_to_value_map}",
) from error
def camelized_dot_name(name: str) -> str:
assert name == name.lower()
result = ""
change_to_upper = True
for char in name:
if char == ".":
change_to_upper = True
else:
if change_to_upper:
char = char.upper()
change_to_upper = False
result += char
return result
| [
"logging.getLogger",
"csv.DictReader",
"gzip.open",
"requests.get",
"json.load",
"time.time",
"json.dump"
] | [((350, 376), 'logging.getLogger', 'logging.getLogger', (['"""pimdb"""'], {}), "('pimdb')\n", (367, 376), False, 'import logging\n'), ((5517, 5554), 'requests.get', 'requests.get', (['source_url'], {'stream': '(True)'}), '(source_url, stream=True)\n', (5529, 5554), False, 'import requests\n'), ((5063, 5124), 'json.dump', 'json.dump', (['self._url_to_last_modified_map', 'last_modified_file'], {}), '(self._url_to_last_modified_map, last_modified_file)\n', (5072, 5124), False, 'import json\n'), ((8092, 8160), 'gzip.open', 'gzip.open', (['self.gzipped_tsv_path', '"""rt"""'], {'encoding': '"""utf-8"""', 'newline': '""""""'}), "(self.gzipped_tsv_path, 'rt', encoding='utf-8', newline='')\n", (8101, 8160), False, 'import gzip\n'), ((8207, 8218), 'time.time', 'time.time', ([], {}), '()\n', (8216, 8218), False, 'import time\n'), ((8393, 8470), 'csv.DictReader', 'csv.DictReader', (['tsv_file'], {'delimiter': '"""\t"""', 'quoting': 'csv.QUOTE_NONE', 'strict': '(True)'}), "(tsv_file, delimiter='\\t', quoting=csv.QUOTE_NONE, strict=True)\n", (8407, 8470), False, 'import csv\n'), ((3958, 3987), 'json.load', 'json.load', (['last_modified_file'], {}), '(last_modified_file)\n', (3967, 3987), False, 'import json\n'), ((10268, 10279), 'time.time', 'time.time', ([], {}), '()\n', (10277, 10279), False, 'import time\n')] |
import os # path
import sys # path
import yaml # safe_load, YAMLError
import glob # glob
import importlib # import_module
import pytest # skip
import warnings # warn
# For type hints only:
from typing import Union
from types import ModuleType
from _pytest.config import Config
def getSingleFileFromName(name: str, rootdir: str) -> str:
# From your current dir, find all the files with this name:
recursive_path = os.path.abspath(os.path.join(rootdir, "**", name))
possible_paths = glob.glob(recursive_path, recursive=True)
# Make sure you got only found one config:
assert len(possible_paths) == 1, f"WRONG NUMBER OF FILES: Must have exactly one '{name}' file inside project. Found {len(possible_paths)} instead.\nBase path used to find files: {recursive_path}."
return possible_paths[0]
## Open yml/yaml File:
# Opens it and returns contents, or None if problems happen
# (Or throw if problems happen, if required == True)
def loadYamlFile(path: str, required: bool=False) -> Union[list,dict,None]:
path = os.path.abspath(path)
if not os.path.isfile(path):
error_msg = f"YAML ERROR: File not found: '{path}'."
# Throw if this is required to work, or warn otherwise
assert not required, error_msg
warnings.warn(UserWarning(error_msg))
return None
with open(path, "r") as yaml_file:
try:
yaml_dict = yaml.safe_load(yaml_file)
except yaml.YAMLError as e:
error_msg = f"YAML ERROR: Couldn't read file: '{path}'. Error '{e}'."
# Throw if this is required to work, or warn otherwise
assert not required, error_msg
warnings.warn(UserWarning(error_msg))
return None
if yaml_dict is None:
error_msg = f"YAML ERROR: File is empty: '{path}'."
# Throw if this is required to work, or warn otherwise
assert not required, error_msg
warnings.warn(UserWarning(error_msg))
return yaml_dict
## Given "key1: {key2: val}", returns -> {key2: val, 'title': key1} to keep everything top-level
# Usefull with "title: {test dict}" senarios
# file and dict_desc for error reporting if something's not formated correctly
def seperateKeyVal(to_seperate: dict, file: str, dict_desc: str) -> dict:
dict_desc = dict_desc.upper()
num_test_titles = len(list(to_seperate.keys()))
assert num_test_titles == 1, f"MISFORMATTED {dict_desc}: {num_test_titles} keys found in a {dict_desc.lower()}. Only have 1, the title of the test. File: '{file}'."
# Seperate the key and val, to opperate on each individually
title, test_info = next(iter( to_seperate.items() ))
# Make sure the value 'test_info' is a dict, not a list or anything:
assert isinstance(test_info, type({})), f"MISFORMATED {dict_desc}: Contents of {dict_desc.lower()} '{title}' is not a dict, can't collaps test correctly. File: '{file}'."
# Make sure the title key isn't in use already, it's reserved:
assert "title" not in test_info, f"MISFORMATTED {dict_desc}: 'title' key found in {dict_desc.lower()} '{title}'. This key is reserved for internal use only. File: '{file}'."
# Save title to test_info. (Might seem reduntant, but this gets all test_info keys at base level, AND still saves the test title)
test_info["title"] = title
return test_info
def getPytestManagerModule(pytest_managers_path: str) -> ModuleType:
# Add the path to PYTHONPATH, so you can import pytest-managers:
sys.path.append(os.path.dirname(pytest_managers_path))
try:
# Actually import pytest-managers now:
pytest_managers_module = importlib.import_module("pytest-managers")
except ImportError as e:
assert False, f"IMPORT ERROR: Problem importing '{pytest_managers_path}'. Error '{e}'."
# Done with the import, cleanup your path:
sys.path.remove(os.path.dirname(pytest_managers_path))
return pytest_managers_module
def skipTestsOnlyRunFilter(config: Config, option: str, check_against: str, option_description: str):
# If the option exists:
if config.getoption(option) is not None:
# If you match with ANY of the values passed to 'option':
found_in_title = False
for only_run_filter in config.getoption(option):
if only_run_filter.lower() in check_against.lower():
# Found it!
found_in_title = True
break
# Check if you found it. If you didn't, skip the test:
if not found_in_title:
pytest.skip(f"{option_description} did not contain {option} param (case insensitive)")
def skipTestsDontRunFilter(config: Config, option: str, check_against: str, option_description: str):
# If the option exists:
if config.getoption(option) is not None:
# Nice thing here is, you can skip the second you find it:
for dont_run_filter in config.getoption(option):
if dont_run_filter.lower() in check_against.lower():
pytest.skip(f"{option_description} contained {option} param (case insensitive)")
def skipTestsIfNecessary(config: Config, test_name: str, file_name: str, test_type: str) -> None:
# If they want to skip EVERYTHING:
if config.getoption("--skip-all"):
pytest.skip("Skipping ALL tests. (--skip-all cli arg was found).")
### ONLY/DONT RUN NAME:
# If they only want to run something, based on the test title:
skipTestsOnlyRunFilter(config, "--only-run-name", test_name, "Title of test")
# If they DONT want to run something, based on test title:
skipTestsDontRunFilter(config, "--dont-run-name", test_name, "Title of test")
### ONLY/DONT RUN FILE:
# If they only want to run something, based on the file name:
skipTestsOnlyRunFilter(config, "--only-run-file", file_name, "Name of file")
# If they DONT want to run something, based on file name:
skipTestsDontRunFilter(config, "--dont-run-file", file_name, "Name of file")
### ONLY/DONT RUN TYPE:
# If they only want to run something, based on the test type:
skipTestsOnlyRunFilter(config, "--only-run-type", test_type, "Test type")
# If they DONT want to run something, based on test type:
skipTestsDontRunFilter(config, "--dont-run-type", test_type, "Test type")
## Validates both pytest-managers.py and pytest-config.py, then loads their methods
# and stores pointers in a dict, for tests to run from.
def loadTestTypes(pytest_managers_path: str, pytest_config_path: str) -> dict:
## Load those methods, import what's needed. Save as global (to load in each YamlItem)
pytest_config_info = loadYamlFile(pytest_config_path, required=True)
assert "test_types" in pytest_config_info, "CONFIG ERROR: Required key 'test_types' not found in 'pytest-config.yml'."
assert isinstance(pytest_config_info["test_types"], type([])), f"CONFIG ERROR: 'test_types' must be a list inside 'pytest-config.yml'. (Currently type: {type(pytest_config_info['test_types'])})."
list_of_test_types = pytest_config_info["test_types"]
pytest_managers_module = getPytestManagerModule(pytest_managers_path)
# Time to load the tests inside the config:
for ii, test_type_config in enumerate(list_of_test_types):
test_info = seperateKeyVal(test_type_config, pytest_config_path, "test type")
# If "required_keys" or "required_files" field contain one item, turn into list of that one item:
if "required_keys" in test_info and not isinstance(test_info["required_keys"], type([])):
test_info["required_keys"] = [test_info["required_keys"]]
if "required_files" in test_info and not isinstance(test_info["required_files"], type([])):
test_info["required_files"] = [test_info["required_files"]]
# If neither are used, AND you have tests after this one, warn that those tests can't be reached:
if "required_keys" not in test_info and "required_files" not in test_info and ii < (len(list_of_test_types)-1):
warnings.warn(UserWarning(f"Test type found without required_keys AND required_files used, but there are test types after this one. Tests can't pass '{test_info['title']}' and run on those."))
# Make sure test_info has required keys:
assert "method" in test_info, f"CONFIG ERROR: Required key 'method' not found in test '{test_info['title']}'. File: '{pytest_config_path}'."
# Import the method inside the module:
try:
# This makes it so you can write test_info["method_pointer"](args) to actually call the method:
test_info["method_pointer"] = getattr(pytest_managers_module, test_info["method"])
except AttributeError:
assert False, f"IMPORT ERROR: '{test_info['method']}' not found in 'pytest-managers.py'. Tried loading from: '{pytest_managers_module.__file__}'."
# Just a guarantee this field is declared, to pass into functions:
if "variables" not in test_info:
test_info["variables"] = None
# Save it:
list_of_test_types[ii] = test_info
return pytest_config_info | [
"importlib.import_module",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"yaml.safe_load",
"os.path.abspath",
"pytest.skip",
"glob.glob"
] | [((544, 585), 'glob.glob', 'glob.glob', (['recursive_path'], {'recursive': '(True)'}), '(recursive_path, recursive=True)\n', (553, 585), False, 'import glob\n'), ((1093, 1114), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (1108, 1114), False, 'import os\n'), ((488, 521), 'os.path.join', 'os.path.join', (['rootdir', '"""**"""', 'name'], {}), "(rootdir, '**', name)\n", (500, 521), False, 'import os\n'), ((1126, 1146), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1140, 1146), False, 'import os\n'), ((3553, 3590), 'os.path.dirname', 'os.path.dirname', (['pytest_managers_path'], {}), '(pytest_managers_path)\n', (3568, 3590), False, 'import os\n'), ((3681, 3723), 'importlib.import_module', 'importlib.import_module', (['"""pytest-managers"""'], {}), "('pytest-managers')\n", (3704, 3723), False, 'import importlib\n'), ((3916, 3953), 'os.path.dirname', 'os.path.dirname', (['pytest_managers_path'], {}), '(pytest_managers_path)\n', (3931, 3953), False, 'import os\n'), ((5312, 5378), 'pytest.skip', 'pytest.skip', (['"""Skipping ALL tests. (--skip-all cli arg was found)."""'], {}), "('Skipping ALL tests. (--skip-all cli arg was found).')\n", (5323, 5378), False, 'import pytest\n'), ((1453, 1478), 'yaml.safe_load', 'yaml.safe_load', (['yaml_file'], {}), '(yaml_file)\n', (1467, 1478), False, 'import yaml\n'), ((4578, 4669), 'pytest.skip', 'pytest.skip', (['f"""{option_description} did not contain {option} param (case insensitive)"""'], {}), "(\n f'{option_description} did not contain {option} param (case insensitive)')\n", (4589, 4669), False, 'import pytest\n'), ((5046, 5131), 'pytest.skip', 'pytest.skip', (['f"""{option_description} contained {option} param (case insensitive)"""'], {}), "(f'{option_description} contained {option} param (case insensitive)'\n )\n", (5057, 5131), False, 'import pytest\n')] |
# Generated by Django 3.1.7 on 2021-04-01 06:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
import nautobot.extras.models.statuses
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("tenancy", "0001_initial"),
("extras", "0001_initial_part_1"),
("dcim", "0002_initial_part_2"),
("ipam", "0001_initial_part_1"),
]
operations = [
migrations.AddField(
model_name="rackreservation",
name="user",
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name="rackgroup",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="dcim.rackgroup",
),
),
migrations.AddField(
model_name="rackgroup",
name="site",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="rack_groups", to="dcim.site"
),
),
migrations.AddField(
model_name="rack",
name="group",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="racks",
to="dcim.rackgroup",
),
),
migrations.AddField(
model_name="rack",
name="role",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="racks",
to="dcim.rackrole",
),
),
migrations.AddField(
model_name="rack",
name="site",
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name="racks", to="dcim.site"),
),
migrations.AddField(
model_name="rack",
name="status",
field=nautobot.extras.models.statuses.StatusField(
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="dcim_rack_related",
to="extras.status",
),
),
migrations.AddField(
model_name="rack",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="rack",
name="tenant",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="racks",
to="tenancy.tenant",
),
),
migrations.AddField(
model_name="powerporttemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="powerporttemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="powerport",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="powerport",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="powerport",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="powerport",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="powerports", to="dcim.device"
),
),
migrations.AddField(
model_name="powerport",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="powerpanel",
name="rack_group",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to="dcim.rackgroup"
),
),
migrations.AddField(
model_name="powerpanel",
name="site",
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to="dcim.site"),
),
migrations.AddField(
model_name="powerpanel",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="poweroutlettemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="poweroutlettemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="poweroutlettemplate",
name="power_port",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="poweroutlet_templates",
to="dcim.powerporttemplate",
),
),
migrations.AddField(
model_name="poweroutlet",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="poweroutlet",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="poweroutlet",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="poweroutlet",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="poweroutlets", to="dcim.device"
),
),
migrations.AddField(
model_name="poweroutlet",
name="power_port",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="poweroutlets",
to="dcim.powerport",
),
),
migrations.AddField(
model_name="poweroutlet",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="powerfeed",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="powerfeed",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="powerfeed",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="powerfeed",
name="power_panel",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, related_name="powerfeeds", to="dcim.powerpanel"
),
),
migrations.AddField(
model_name="powerfeed",
name="rack",
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to="dcim.rack"),
),
migrations.AddField(
model_name="powerfeed",
name="status",
field=nautobot.extras.models.statuses.StatusField(
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="dcim_powerfeed_related",
to="extras.status",
),
),
migrations.AddField(
model_name="powerfeed",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="platform",
name="manufacturer",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="platforms",
to="dcim.manufacturer",
),
),
migrations.AddField(
model_name="inventoryitem",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="inventoryitems", to="dcim.device"
),
),
migrations.AddField(
model_name="inventoryitem",
name="manufacturer",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="inventory_items",
to="dcim.manufacturer",
),
),
migrations.AddField(
model_name="inventoryitem",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="child_items",
to="dcim.inventoryitem",
),
),
migrations.AddField(
model_name="inventoryitem",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="interfacetemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="interfacetemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="interface",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="interface",
name="_path",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="dcim.cablepath"
),
),
migrations.AddField(
model_name="interface",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="interface",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="interfaces", to="dcim.device"
),
),
migrations.AddField(
model_name="interface",
name="lag",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="member_interfaces",
to="dcim.interface",
),
),
migrations.AddField(
model_name="interface",
name="tagged_vlans",
field=models.ManyToManyField(blank=True, related_name="interfaces_as_tagged", to="ipam.VLAN"),
),
migrations.AddField(
model_name="interface",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="interface",
name="untagged_vlan",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="interfaces_as_untagged",
to="ipam.vlan",
),
),
migrations.AddField(
model_name="frontporttemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="frontporttemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="frontporttemplate",
name="rear_port",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="frontport_templates",
to="dcim.rearporttemplate",
),
),
migrations.AddField(
model_name="frontport",
name="_cable_peer_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="contenttypes.contenttype",
),
),
migrations.AddField(
model_name="frontport",
name="cable",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="+", to="dcim.cable"
),
),
migrations.AddField(
model_name="frontport",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="frontports", to="dcim.device"
),
),
migrations.AddField(
model_name="frontport",
name="rear_port",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="frontports", to="dcim.rearport"
),
),
migrations.AddField(
model_name="frontport",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="devicetype",
name="manufacturer",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, related_name="device_types", to="dcim.manufacturer"
),
),
migrations.AddField(
model_name="devicetype",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
migrations.AddField(
model_name="devicebaytemplate",
name="device_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="devicebaytemplates", to="dcim.devicetype"
),
),
migrations.AddField(
model_name="devicebay",
name="device",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="devicebays", to="dcim.device"
),
),
migrations.AddField(
model_name="devicebay",
name="installed_device",
field=models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="parent_bay",
to="dcim.device",
),
),
migrations.AddField(
model_name="devicebay",
name="tags",
field=taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag"),
),
]
| [
"django.db.models.OneToOneField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey"
] | [((387, 444), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (418, 444), False, 'from django.db import migrations, models\n'), ((748, 844), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.PROTECT, to=settings.\n AUTH_USER_MODEL)\n', (765, 844), False, 'from django.db import migrations, models\n'), ((1318, 1429), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""rack_groups"""', 'to': '"""dcim.site"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='rack_groups', to='dcim.site')\n", (1335, 1429), False, 'from django.db import migrations, models\n'), ((1571, 1705), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""racks"""', 'to': '"""dcim.rackgroup"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='racks', to='dcim.rackgroup')\n", (1588, 1705), False, 'from django.db import migrations, models\n'), ((1911, 2043), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""racks"""', 'to': '"""dcim.rackrole"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, related_name='racks', to='dcim.rackrole')\n", (1928, 2043), False, 'from django.db import migrations, models\n'), ((2249, 2354), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""racks"""', 'to': '"""dcim.site"""'}), "(on_delete=django.db.models.deletion.PROTECT, related_name\n ='racks', to='dcim.site')\n", (2266, 2354), False, 'from django.db import migrations, models\n'), ((3010, 3143), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""racks"""', 'to': '"""tenancy.tenant"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, related_name='racks', to='tenancy.tenant')\n", (3027, 3143), False, 'from django.db import migrations, models\n'), ((3369, 3493), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""powerporttemplates"""', 'to': '"""dcim.devicetype"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='powerporttemplates', to='dcim.devicetype')\n", (3386, 3493), False, 'from django.db import migrations, models\n'), ((3651, 3791), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""contenttypes.contenttype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='contenttypes.contenttype')\n", (3668, 3791), False, 'from django.db import migrations, models\n'), ((4003, 4115), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""dcim.cablepath"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='dcim.cablepath')\n", (4020, 4115), False, 'from django.db import migrations, models\n'), ((4262, 4388), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""dcim.cable"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='dcim.cable')\n", (4279, 4388), False, 'from django.db import migrations, models\n'), ((4536, 4648), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""powerports"""', 'to': '"""dcim.device"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='powerports', to='dcim.device')\n", (4553, 4648), False, 'from django.db import migrations, models\n'), ((4999, 5110), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""dcim.rackgroup"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, to='dcim.rackgroup')\n", (5016, 5110), False, 'from django.db import migrations, models\n'), ((5257, 5335), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""dcim.site"""'}), "(on_delete=django.db.models.deletion.PROTECT, to='dcim.site')\n", (5274, 5335), False, 'from django.db import migrations, models\n'), ((5672, 5798), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""poweroutlettemplates"""', 'to': '"""dcim.devicetype"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='poweroutlettemplates', to='dcim.devicetype')\n", (5689, 5798), False, 'from django.db import migrations, models\n'), ((5960, 6123), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""poweroutlet_templates"""', 'to': '"""dcim.powerporttemplate"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='poweroutlet_templates', to=\n 'dcim.powerporttemplate')\n", (5977, 6123), False, 'from django.db import migrations, models\n'), ((6343, 6483), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""contenttypes.contenttype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='contenttypes.contenttype')\n", (6360, 6483), False, 'from django.db import migrations, models\n'), ((6697, 6809), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""dcim.cablepath"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='dcim.cablepath')\n", (6714, 6809), False, 'from django.db import migrations, models\n'), ((6958, 7084), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""dcim.cable"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='dcim.cable')\n", (6975, 7084), False, 'from django.db import migrations, models\n'), ((7234, 7348), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""poweroutlets"""', 'to': '"""dcim.device"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='poweroutlets', to='dcim.device')\n", (7251, 7348), False, 'from django.db import migrations, models\n'), ((7502, 7643), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""poweroutlets"""', 'to': '"""dcim.powerport"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='poweroutlets', to='dcim.powerport')\n", (7519, 7643), False, 'from django.db import migrations, models\n'), ((8066, 8206), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""contenttypes.contenttype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='contenttypes.contenttype')\n", (8083, 8206), False, 'from django.db import migrations, models\n'), ((8418, 8530), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""dcim.cablepath"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='dcim.cablepath')\n", (8435, 8530), False, 'from django.db import migrations, models\n'), ((8677, 8803), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""dcim.cable"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='dcim.cable')\n", (8694, 8803), False, 'from django.db import migrations, models\n'), ((8956, 9072), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""powerfeeds"""', 'to': '"""dcim.powerpanel"""'}), "(on_delete=django.db.models.deletion.PROTECT, related_name\n ='powerfeeds', to='dcim.powerpanel')\n", (8973, 9072), False, 'from django.db import migrations, models\n'), ((9218, 9324), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""dcim.rack"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, to='dcim.rack')\n", (9235, 9324), False, 'from django.db import migrations, models\n'), ((10005, 10145), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""platforms"""', 'to': '"""dcim.manufacturer"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, related_name='platforms', to='dcim.manufacturer')\n", (10022, 10145), False, 'from django.db import migrations, models\n'), ((10362, 10478), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""inventoryitems"""', 'to': '"""dcim.device"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='inventoryitems', to='dcim.device')\n", (10379, 10478), False, 'from django.db import migrations, models\n'), ((10636, 10782), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""inventory_items"""', 'to': '"""dcim.manufacturer"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.PROTECT, related_name='inventory_items', to='dcim.manufacturer')\n", (10653, 10782), False, 'from django.db import migrations, models\n'), ((11579, 11703), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""interfacetemplates"""', 'to': '"""dcim.devicetype"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='interfacetemplates', to='dcim.devicetype')\n", (11596, 11703), False, 'from django.db import migrations, models\n'), ((11861, 12001), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""contenttypes.contenttype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='contenttypes.contenttype')\n", (11878, 12001), False, 'from django.db import migrations, models\n'), ((12213, 12325), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""dcim.cablepath"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='dcim.cablepath')\n", (12230, 12325), False, 'from django.db import migrations, models\n'), ((12472, 12598), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""dcim.cable"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='dcim.cable')\n", (12489, 12598), False, 'from django.db import migrations, models\n'), ((12746, 12858), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""interfaces"""', 'to': '"""dcim.device"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='interfaces', to='dcim.device')\n", (12763, 12858), False, 'from django.db import migrations, models\n'), ((13003, 13149), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""member_interfaces"""', 'to': '"""dcim.interface"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='member_interfaces', to='dcim.interface')\n", (13020, 13149), False, 'from django.db import migrations, models\n'), ((13368, 13460), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""interfaces_as_tagged"""', 'to': '"""ipam.VLAN"""'}), "(blank=True, related_name='interfaces_as_tagged', to=\n 'ipam.VLAN')\n", (13390, 13460), False, 'from django.db import migrations, models\n'), ((13783, 13929), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""interfaces_as_untagged"""', 'to': '"""ipam.vlan"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='interfaces_as_untagged', to='ipam.vlan')\n", (13800, 13929), False, 'from django.db import migrations, models\n'), ((14155, 14279), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""frontporttemplates"""', 'to': '"""dcim.devicetype"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='frontporttemplates', to='dcim.devicetype')\n", (14172, 14279), False, 'from django.db import migrations, models\n'), ((14438, 14569), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""frontport_templates"""', 'to': '"""dcim.rearporttemplate"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='frontport_templates', to='dcim.rearporttemplate')\n", (14455, 14569), False, 'from django.db import migrations, models\n'), ((14760, 14900), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""contenttypes.contenttype"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='contenttypes.contenttype')\n", (14777, 14900), False, 'from django.db import migrations, models\n'), ((15112, 15238), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""dcim.cable"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='dcim.cable')\n", (15129, 15238), False, 'from django.db import migrations, models\n'), ((15386, 15498), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""frontports"""', 'to': '"""dcim.device"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='frontports', to='dcim.device')\n", (15403, 15498), False, 'from django.db import migrations, models\n'), ((15649, 15763), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""frontports"""', 'to': '"""dcim.rearport"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='frontports', to='dcim.rearport')\n", (15666, 15763), False, 'from django.db import migrations, models\n'), ((16116, 16236), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""device_types"""', 'to': '"""dcim.manufacturer"""'}), "(on_delete=django.db.models.deletion.PROTECT, related_name\n ='device_types', to='dcim.manufacturer')\n", (16133, 16236), False, 'from django.db import migrations, models\n'), ((16596, 16720), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""devicebaytemplates"""', 'to': '"""dcim.devicetype"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='devicebaytemplates', to='dcim.devicetype')\n", (16613, 16720), False, 'from django.db import migrations, models\n'), ((16868, 16980), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""devicebays"""', 'to': '"""dcim.device"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='devicebays', to='dcim.device')\n", (16885, 16980), False, 'from django.db import migrations, models\n'), ((17138, 17277), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""parent_bay"""', 'to': '"""dcim.device"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='parent_bay', to='dcim.device')\n", (17158, 17277), False, 'from django.db import migrations, models\n')] |
from glyphNameFormatter.data.scriptPrefixes import scriptPrefixes
def process(self):
if self.has("LATIN"):
self.scriptTag = scriptPrefixes['latin']
if self.has("ARMENIAN"):
# self.scriptTag = scriptPrefixes['armenian']
self.processAs("Armenian")
elif self.has("HEBREW"):
# self.scriptTag = scriptPrefixes['hebrew']
self.processAs("Hebrew")
self.edit("LATIN SMALL LIGATURE FFI", "f_f_i")
self.edit("LATIN SMALL LIGATURE FFL", "f_f_l")
self.edit("LATIN SMALL LIGATURE FF", "f_f")
self.edit("LATIN SMALL LIGATURE FI", "fi")
self.edit("LATIN SMALL LIGATURE FL", "fl")
self.edit("LATIN SMALL LIGATURE LONG S T", "longs_t")
self.edit("LATIN SMALL LIGATURE ST", "s_t")
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
printRange("Alphabetic Presentation Forms")
| [
"glyphNameFormatter.exporters.printRange"
] | [((854, 897), 'glyphNameFormatter.exporters.printRange', 'printRange', (['"""Alphabetic Presentation Forms"""'], {}), "('Alphabetic Presentation Forms')\n", (864, 897), False, 'from glyphNameFormatter.exporters import printRange\n')] |