id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11566898
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as pyg_nn
import torch_geometric.utils as pyg_utils
class SkipLastGNN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, args):
super(SkipLastGNN, self).__init__()
self.args = args
self.dropout = args.dropout
self.num_layers = args.num_layers
self.pre_mp = nn.Sequential(nn.Linear(input_dim, hidden_dim))
conv_model = self.build_conv_model(args.model)
self.convs = nn.ModuleList()
if args.skip == 'learnable':
self.learnable_skip = nn.Parameter(torch.ones(self.num_layers, self.num_layers))
for l in range(args.num_layers):
if args.skip == 'all' or 'learnable':
hidden_input_dim = hidden_dim * (l + 1)
elif args.skip == 'last':
hidden_input_dim = hidden_dim
else:
raise ValueError(f'Unknown skip option {args.skip}')
self.convs.append(conv_model(hidden_input_dim, hidden_dim))
post_input_dim = hidden_dim * (args.num_layers + 1)
self.post_mp = nn.Sequential(
nn.Linear(post_input_dim, hidden_dim), nn.Dropout(args.dropout), nn.LeakyReLU(0.1),
nn.Linear(hidden_dim, output_dim))
def build_conv_model(self, model_type):
if model_type == 'GCN':
return pyg_nn.GCNConv
elif model_type == 'GAT':
return pyg_nn.GATConv
elif model_type == "GraphSage":
return pyg_nn.SAGEConv
elif model_type == "Graph":
return pyg_nn.GraphConv
elif model_type == "Simple":
if self.args.skip == 'all':
raise ValueError("SimpleConv does not have parameter and does not support full skip connections")
return SimpleConv
else:
raise ValueError("Model_type {} unavailable, please add it to GNN.build_conv_model.".format(model_type))
def forward(self, data):
x, edge_index, batch = data.node_feature, data.edge_index, data.batch
x = self.pre_mp(x)
num_nodes = x.size(0)
# [num nodes x current num layer x hidden_dim]
all_emb = x.unsqueeze(1)
# [num nodes x (curr num layer * hidden_dim)]
emb = x
for i in range(len(self.convs)):
if self.args.skip == 'learnable':
skip_vals = self.learnable_skip[i, :i+1].unsqueeze(0).unsqueeze(-1)
curr_emb = all_emb * torch.sigmoid(skip_vals)
curr_emb = curr_emb.view(num_nodes, -1)
x = self.convs[i](curr_emb, edge_index)
if self.args.skip == 'all' or self.args.skip == 'learnable':
x = self.convs[i](emb, edge_index)
else:
x = self.convs[i](x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
emb = torch.cat((emb, x), 1)
if self.args.skip == 'learnable':
all_emb = torch.cat((all_emb, x.unsqueeze(1)), 1)
# x = pyg_nn.global_mean_pool(x, batch)
emb = pyg_nn.global_add_pool(emb, batch)
emb = self.post_mp(emb)
out = F.log_softmax(emb, dim=1)
return out
def loss(self, pred, label):
return F.nll_loss(pred, label)
class SimpleConv(pyg_nn.MessagePassing):
def __init__(self, in_channels, out_channels, cache=False):
super(SimpleConv, self).__init__(aggr='add')
self.cache = cache
self.reset_cache()
def reset_cache(self):
self.cached_norm = None
def forward(self, x, edge_index, add_self=False):
edge_index, _ = pyg_utils.remove_self_loops(edge_index)
emb = self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
if add_self:
emb = emb + x
return emb
def message(self, x_i, x_j, edge_index, size):
if self.cached_norm is None:
row, col = edge_index
deg = pyg_utils.degree(row, size[0], dtype=x_j.dtype)
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
if self.cache:
self.cached_norm = norm
else:
norm = self.cached_norm
return norm.view(-1, 1) * x_j
|
11566915
|
import logging
import pathlib
from dataclasses import dataclass
import pytorch_lightning as pl
from omegaconf import DictConfig
from nuplan.planning.script.builders.model_builder import build_torch_module_wrapper
from nuplan.planning.script.builders.training_builder import (
build_lightning_datamodule,
build_lightning_module,
build_trainer,
)
from nuplan.planning.training.callbacks.profile_callback import ProfileCallback
from nuplan.planning.utils.multithreading.worker_pool import WorkerPool
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class TrainingEngine:
"""Lightning training engine dataclass wrapping the lightning trainer, model and datamodule."""
trainer: pl.Trainer # Trainer for models
model: pl.LightningModule # Module describing NN model, loss, metrics, visualization
datamodule: pl.LightningDataModule # Loading data
def __repr__(self) -> str:
"""
:return: String representation of class without expanding the fields.
"""
return f"<{type(self).__module__}.{type(self).__qualname__} object at {hex(id(self))}>"
def build_training_engine(cfg: DictConfig, worker: WorkerPool) -> TrainingEngine:
"""
Build the three core lightning modules: LightningDataModule, LightningModule and Trainer
:param cfg: omegaconf dictionary
:param worker: Worker to submit tasks which can be executed in parallel
:return: TrainingEngine
"""
logger.info('Building training engine...')
# Construct profiler
profiler = ProfileCallback(pathlib.Path(cfg.output_dir)) if cfg.enable_profiling else None
# Start profiler if enabled
if profiler:
profiler.start_profiler("build_training_engine")
# Create model
torch_module_wrapper = build_torch_module_wrapper(cfg.model)
# Build the datamodule
datamodule = build_lightning_datamodule(cfg, worker, torch_module_wrapper)
# Build lightning module
model = build_lightning_module(cfg, torch_module_wrapper)
# Build trainer
trainer = build_trainer(cfg)
engine = TrainingEngine(trainer=trainer, datamodule=datamodule, model=model)
# Save profiler output
if profiler:
profiler.save_profiler("build_training_engine")
return engine
|
11566929
|
from typing import List, Iterable
from requests import Response
import json
from TM1py.Exceptions.Exceptions import TM1pyRestException
from TM1py.Services.ObjectService import ObjectService
from TM1py.Services.RestService import RestService
from TM1py.Utils import format_url
from TM1py.Objects.Sandbox import Sandbox
class SandboxService(ObjectService):
""" Service to handle sandboxes in TM1
"""
def __init__(self, rest: RestService):
super().__init__(rest)
def get(self, sandbox_name: str, **kwargs) -> Sandbox:
""" get a sandbox from TM1 Server
:param sandbox_name: str
:return: instance of TM1py.Sandbox
"""
url = format_url("/api/v1/Sandboxes('{}')", sandbox_name)
response = self._rest.GET(url=url, **kwargs)
sandbox = Sandbox.from_json(response.text)
return sandbox
def get_all(self, **kwargs) -> List[Sandbox]:
""" get all sandboxes from TM1 Server
:return: List of TM1py.Sandbox instances
"""
url = "/api/v1/Sandboxes?$select=Name,IncludeInSandboxDimension"
response = self._rest.GET(url, **kwargs)
sandboxes = [
Sandbox.from_dict(sandbox_as_dict=sandbox)
for sandbox in response.json()["value"]
]
return sandboxes
def get_all_names(self, **kwargs) -> List[str]:
""" get all sandbox names
:param kwargs:
:return:
"""
url = "/api/v1/Sandboxes?$select=Name"
response = self._rest.GET(url, **kwargs)
return [entry["Name"] for entry in response.json()["value"]]
def create(self, sandbox: Sandbox, **kwargs) -> Response:
""" create a new sandbox in TM1 Server
:param sandbox: Sandbox
:return: response
"""
url = "/api/v1/Sandboxes"
return self._rest.POST(url=url, data=sandbox.body, **kwargs)
def update(self, sandbox: Sandbox, **kwargs) -> Response:
""" update a sandbox in TM1
:param sandbox:
:return: response
"""
url = format_url("/api/v1/Sandboxes('{}')", sandbox.name)
return self._rest.PATCH(url=url, data=sandbox.body, **kwargs)
def delete(self, sandbox_name: str, **kwargs) -> Response:
""" delete a sandbox in TM1
:param sandbox_name:
:return: response
"""
url = format_url("/api/v1/Sandboxes('{}')", sandbox_name)
return self._rest.DELETE(url, **kwargs)
def publish(self, sandbox_name: str, **kwargs) -> Response:
""" publish existing sandbox to base
:param sandbox_name: str
:return: response
"""
url = format_url("/api/v1/Sandboxes('{}')/tm1.Publish", sandbox_name)
return self._rest.POST(url=url, **kwargs)
def reset(self, sandbox_name: str, **kwargs) -> Response:
""" reset all changes in specified sandbox
:param sandbox_name: str
:return: response
"""
url = format_url("/api/v1/Sandboxes('{}')/tm1.DiscardChanges", sandbox_name)
return self._rest.POST(url=url, **kwargs)
def merge(
self,
source_sandbox_name: str,
target_sandbox_name: str,
clean_after: bool = False,
**kwargs
) -> Response:
""" merge one sandbox into another
:param source_sandbox_name: str
:param target_sandbox_name: str
:param clean_after: bool: Reset source sandbox after merging
:return: response
"""
url = format_url("/api/v1/Sandboxes('{}')/tm1.Merge", source_sandbox_name)
payload = dict()
payload["<EMAIL>"] = format_url(
"Sandboxes('{}')", target_sandbox_name
)
payload["CleanAfter"] = clean_after
return self._rest.POST(url=url, data=json.dumps(payload), **kwargs)
def exists(self, sandbox_name: str, **kwargs) -> bool:
""" check if the sandbox exists in TM1
:param sandbox_name: String
:return: bool
"""
url = format_url("/api/v1/Sandboxes('{}')", sandbox_name)
return self._exists(url, **kwargs)
|
11566947
|
from __future__ import absolute_import
from collections import OrderedDict
from ..utils import to_torch
from torch.autograd import Variable
def extract_cnn_feature(model, inputs):
model.eval()
inputs = Variable(to_torch(inputs))
outputs = model(inputs)[0]
outputs = outputs.data.cpu()
return outputs # pool5
# Register forward hook for each module
outputs = OrderedDict()
handles = []
for m in modules:
outputs[id(m)] = None
def func(m, i, o): outputs[id(m)] = o.data.cpu()
handles.append(m.register_forward_hook(func))
model(inputs)
for h in handles:
h.remove()
return list(outputs.values())
|
11566966
|
import array
import struct
import mmh2
def write_deps(f, alldeps):
signature = '# ninjadeps\n'
version = 4
paths = []
for out, mtime, deps in alldeps:
paths.append(out)
paths.extend(deps)
paths = set(paths)
pathids = {path: _id for _id, path, in enumerate(paths)}
f.write(signature.encode('utf-8'))
f.write(struct.pack('i', version))
for _id, path in enumerate(paths):
data = path.encode('utf-8')
if len(data) % 4:
data += b'\x00' * (len(data) % 4)
f.write(struct.pack('I', len(data) + 4))
f.write(data)
f.write(struct.pack('i', ~_id))
for out, mtime, deps in alldeps:
size = (1 + 2 + len(deps)) * 4
f.write(struct.pack('I', size | (1 << 31)))
f.write(struct.pack('iII', pathids[out], mtime & 0xffffffff, (mtime >> 32) & 0xffffffff))
f.write(array.array('I', [pathids[d] for d in deps]).tobytes())
def write_log(f, cmds):
seed = 0xDECAFBADDECAFBAD
f.write('# ninja log v5\n')
for obj, mtime, cmd in cmds:
hsh = mmh2.hash64(cmd.encode('utf-8'), seed)
f.write('%d\t%d\t%lu\t%s\t%x\n' % (0, 0, mtime, obj, hsh))
|
11566985
|
try:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
from ..widgets import LocationField
class MaterialOptionsWidget(QWidget):
def __init__(self):
super(MaterialOptionsWidget, self).__init__()
layout = QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(4)
self.path_label = QLabel('Path')
layout.addWidget(self.path_label, 0, 0)
self.path_field = LocationField()
layout.addWidget(self.path_field, 0, 1)
self.name_label = QLabel('Name')
layout.addWidget(self.name_label, 1, 0)
self.name_field = QLineEdit()
layout.addWidget(self.name_field, 1, 1)
self.comment_label = QLabel('Comment')
layout.addWidget(self.comment_label, 2, 0, Qt.AlignLeft | Qt.AlignTop)
self.comment_field = QTextEdit()
self.comment_field.setAutoFillBackground(False)
layout.addWidget(self.comment_field, 2, 1)
self.favorite_toggle = QCheckBox('Mark as favorite')
layout.addWidget(self.favorite_toggle, 3, 0, 1, -1)
def options(self):
return {
'path': self.path_field.path() or None,
'name': self.name_field.text() or None,
'comment': self.comment_field.toPlainText() or None,
'favorite': self.favorite_toggle.isChecked()
}
def setOptions(self, data):
self.path_field.setText(data.get('path', ''))
self.name_field.setText(data.get('name', ''))
self.comment_field.setPlainText(data.get('comment', ''))
self.favorite_toggle.setChecked(data.get('favorite', False))
|
11566998
|
import os
import pytest
import enaml
from jinja2 import Template
TEMPLATE_DIR = os.path.dirname(__file__)
with enaml.imports():
from pages import HelloWorld, Simple
@pytest.fixture
def app():
from web.core.app import WebApplication
app = WebApplication.instance() or WebApplication()
yield app
def test_hello_world_jinja(app, benchmark):
with open('{}/templates/hello_world.html'.format(
TEMPLATE_DIR)) as f:
tmpl = f.read()
@benchmark
def render():
Template(tmpl).render()
def test_hello_world(app, benchmark):
@benchmark
def render():
HelloWorld().render()
NAVIGATION = [
{'href': 'http://python.org',
'caption': 'Python'},
{'href': 'http://jinja.pocoo.org/docs/2.10/templates/',
'caption': 'Template Designer Documentation'},
{'href': 'https://github.com/channelcat/sanic',
'caption': 'Sanic'},
] * 3
def test_simple_jinja(app, benchmark):
with open('{}/templates/simple.html'.format(
TEMPLATE_DIR)) as f:
template = Template(f.read())
@benchmark
def render():
template.render(
navigation=NAVIGATION,
content="This is the content"
)
def test_simple(app, benchmark):
view = Simple()
@benchmark
def render():
view.render(navigation=NAVIGATION,
content="This is the content")
|
11567067
|
from tempfile import mkdtemp
from shutil import rmtree
import urllib
from helpers import DATA_FILE, DATA_URL
from archivekit import Collection, open_archive
from archivekit.store.file import FileStore
from archivekit.types.source import Source
from archivekit.util import checksum
def test_basic_package():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
assert len(list(coll)) == 0, list(coll)
pkg = coll.create()
assert pkg.id is not None, pkg
assert pkg.exists(), pkg
pkg = coll.get(None)
assert not pkg.exists(), pkg
rmtree(path)
def test_basic_manifest():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
pkg.manifest['foo'] = 'bar'
pkg.save()
npkg = coll.get(pkg.id)
assert npkg.id == pkg.id, npkg
assert npkg.manifest['foo'] == 'bar', npkg.meta.items()
rmtree(path)
def test_archive():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
coll.ingest(DATA_FILE)
archive = open_archive('file', path=path)
assert archive.get('test') == coll, archive.get('test')
colls = list(archive)
assert len(colls) == 1, colls
rmtree(path)
def test_collection_ingest():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
coll.ingest(DATA_FILE)
pkgs = list(coll)
assert len(pkgs) == 1, pkgs
pkg0 = pkgs[0]
assert pkg0.id == checksum(DATA_FILE), pkg0.id
sources = list(pkg0.all(Source))
assert len(sources) == 1, sources
assert sources[0].name == 'test.csv', sources[0].name
rmtree(path)
def test_package_ingest_file():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
source = pkg.ingest(DATA_FILE)
assert source.meta.get('name') == 'test.csv', source.meta
assert source.meta.get('extension') == 'csv', source.meta
assert source.meta.get('slug') == 'test', source.meta
rmtree(path)
def test_package_get_resource():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
source = pkg.ingest(DATA_FILE)
other = pkg.get_resource(source.path)
assert isinstance(other, Source), other.__class__
assert other.path == source.path, other
rmtree(path)
def test_resource_local():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
source = pkg.ingest(DATA_FILE)
with source.local() as file_name:
assert file_name.endswith(source.name), file_name
rmtree(path)
def test_package_source():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
assert pkg.source is None, pkg.source
source = pkg.ingest(DATA_FILE)
other = pkg.source
assert isinstance(other, Source), other.__class__
assert other.path == source.path, other
rmtree(path)
def test_package_ingest_url():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
source = pkg.ingest(DATA_URL)
assert source.name == 'barnet-2009.csv', source.name
assert source.meta['source_url'] == DATA_URL, source.meta
source = pkg.ingest(urllib.urlopen(DATA_URL))
assert source.name == 'barnet-2009.csv', source.name
assert source.meta['source_url'] == DATA_URL, source.meta
rmtree(path)
def test_package_ingest_fileobj():
path = mkdtemp()
store = FileStore(path=path)
coll = Collection('test', store)
pkg = coll.create()
with open(DATA_FILE, 'rb') as fh:
source = pkg.ingest(fh)
assert source.name == 'source.raw', source.name
rmtree(path)
|
11567075
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from .layers import dense_layer, highway_layer
import time
mnist = input_data.read_data_sets("../data/MNIST_data", one_hot=True) # load mnist data for training and testing
input_shape = 784 # 28x28x1, number of pixels for a MNIST image
output_shape = 10 # number of classes for MNIST dataset
hidden_layer_size = 50 # number of neurons for hidden layer
number_of_layers = 18 # the layers for a network
carry_bias = -20.0 # cary bias used at transform gate inside highway layer
learning_rate = 0.01 # learning rate for training
batch_size = 64 # mini-batch size
epochs = 40 # train dataset 40 times
inputs = tf.placeholder(tf.float32, [None, input_shape], name="input") # define inputs for tensorflow graph
targets = tf.placeholder(tf.float32, [None, output_shape], name="output") # define outputs for tensorflow graph
# define a highway networks
prev_layer = None
output_layer = None
for layer_index in range(number_of_layers):
if layer_index == 0:
prev_layer = dense_layer(inputs, input_shape, hidden_layer_size)
elif layer_index == number_of_layers - 1:
output_layer = dense_layer(prev_layer, hidden_layer_size, output_shape, activation=None)
else:
prev_layer = highway_layer(prev_layer, hidden_layer_size, carry_bias=carry_bias)
# define cost
with tf.name_scope('loss'):
cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=output_layer, labels=targets))
# define optimizer
with tf.name_scope('sgd'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# compute accuracy
with tf.name_scope('accuracy'):
y_pred = tf.argmax(tf.nn.softmax(output_layer), 1)
y_true = tf.argmax(targets, 1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(y_pred, y_true), tf.float32))
with tf.Session() as sess:
# initialize all parameters
sess.run(tf.global_variables_initializer())
# training
for epoch in range(epochs):
train_cost = []
train_time = time.time()
for batch_index in range(mnist.train.num_examples // batch_size):
batch_imgs, batch_labels = mnist.train.next_batch(batch_size)
c, _ = sess.run([cost, optimizer], feed_dict={inputs: batch_imgs, targets: batch_labels})
train_cost.append(c)
print("Epoch: {}/{}".format(epoch + 1, epochs), " | Current loss: {:9.6f}".format(np.mean(train_cost)),
" | Epoch time: {:5.2f}s".format(time.time() - train_time))
print("Test accuracy %g" % sess.run(accuracy, feed_dict={inputs: mnist.test.images,
targets: mnist.test.labels}))
# Testing
print("Test Accuracy: ", sess.run(accuracy, feed_dict={inputs: mnist.test.images, targets: mnist.test.labels}))
# Validation
print("Validation Accuracy: ", sess.run(accuracy, feed_dict={inputs: mnist.validation.images,
targets: mnist.validation.labels}))
|
11567124
|
import yaml
from dotmap import DotMap
def extend_dict(extend_me, extend_by):
if isinstance(extend_me, dict):
for k, v in extend_by.iteritems():
if k in extend_me:
extend_dict(extend_me[k], v)
else:
extend_me[k] = v
else:
if isinstance(extend_me, list):
extend_list(extend_me, extend_by)
else:
extend_me += extend_by
def extend_list(extend_me, extend_by):
missing = []
for item1 in extend_me:
if not isinstance(item1, dict):
continue
for item2 in extend_by:
if not isinstance(item2, dict) or item2 in missing:
continue
extend_dict(item1, item2)
def get_config(path):
with open(path, 'r') as file:
configuration = yaml.load(file, Loader=yaml.FullLoader)
with open('config/base.yml', 'r') as file:
base_configuration = yaml.load(file, Loader=yaml.FullLoader)
configuration = DotMap(configuration)
base_configuration = DotMap(base_configuration)
extend_dict(configuration, base_configuration)
return configuration
if __name__ == '__main__':
config = get_config('config/roberta-base.yml')
|
11567125
|
import json
import uuid
from pathlib import Path
from urllib.parse import urlparse
from datetime import datetime
import html2text
import requests
from bs4 import BeautifulSoup
source_url = 'http://tomaugspurger.github.io/modern-5-tidy.html'
IP_URL = 'http://www.instapaper.com/text?u={url}'
QVR_NOTEBOOK = '/Users/kristof/Dropbox/Applications/Quiver/Quiver.qvlibrary/F54CCC03-A5EC-48E7-8DCD-A264ABCC4277.qvnotebook'
# Download the images and generate UUIDs
def localize_images(resource_path, img_tags):
for img_tag in img_tags:
url = img_tag['src']
r = requests.get(url)
# Define the extension and the new filename
img_ext = Path(urlparse(url).path).suffix
img_name = '{}{}'.format(uuid.uuid4().hex.upper(),
img_ext)
img_filename = Path(resource_path, img_name)
with open(str(img_filename), 'wb') as f:
f.write(r.content)
# Convert the original URL to a Quiver URL
img_tag['src'] = 'quiver-image-url/{}'.format(img_name)
# Write content.json
def write_content(note_path, note_title, note_text):
qvr_content = {}
qvr_content['title'] = note_title
qvr_content['cells'] = []
cell = {'type': 'markdown',
'data': note_text}
qvr_content['cells'].append(cell)
with open(str(Path(note_path, 'content.json')), 'w') as f:
f.write(json.dumps(qvr_content))
# Write meta.json
def write_meta(note_path, note_title, note_uuid):
timestamp = int(datetime.timestamp(datetime.now()))
qvr_meta = {}
qvr_meta['title'] = note_title
qvr_meta['uuid'] = note_uuid
qvr_meta['created_at'] = timestamp
qvr_meta['updated_at'] = timestamp
with open(str(Path(note_path, 'meta.json')), 'w') as f:
f.write(json.dumps(qvr_meta))
# Download the IP version of the URL
r = requests.get(IP_URL.format(url=source_url))
r.raise_for_status()
bs = BeautifulSoup(r.content, 'lxml')
qvr_note_uuid = str(uuid.uuid4()).upper()
# Create the folders
paths = {}
paths['notebook'] = QVR_NOTEBOOK
paths['note'] = Path(paths['notebook'], '{}.qvnote'.format(qvr_note_uuid))
paths['resources'] = Path(paths['note'], 'resources')
paths['resources'].mkdir(parents=True, exist_ok=True)
# Replace the original links by the quiver links
localize_images(paths['resources'], bs.find_all('img'))
# Remove title
_ = bs.select('body main > div.titlebar')[0].extract()
# Convert to Markdown
parser = html2text.HTML2Text()
parser.protect_links = True
parser.wrap_links = False
parser.body_width = 0
note_text = parser.handle(str(bs.find('main')))
write_content(paths['note'],
bs.head.title.string,
note_text)
write_meta(paths['note'],
bs.head.title.string,
qvr_note_uuid)
|
11567191
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
try:
dataFileName = sys.argv[1]
except IndexError:
print("USAGE: python plotEnergies.py 'filename'")
sys.exit(0)
HFEnergy3 = 3.161921401722216
HFEnergy6 = 20.71924844033019
numParticles = \
int(dataFileName[dataFileName.find('N')+1:dataFileName.find('E')-1])
hfenergyFound = False
if (numParticles == 2):
HFEnergy = 3.161921401722216
hfenergyFound = True
elif (numParticles == 6):
HFEnergy = 20.71924844033019
hfenergyFound = True
else:
hfenergyFound = False
data = np.loadtxt(dataFileName, dtype=np.float64)
data[:,1] = np.sqrt(data[:,1])
n = len(data[:,0])
x = np.arange(0,n)
fig = plt.figure()
if (hfenergyFound):
yline = np.zeros(n)
yline.fill(HFEnergy)
plt.plot(x, yline, 'r--', label="HF Energy")
msize = 1.0
ax = fig.add_subplot(111)
plt.errorbar(x, data[:,0], yerr=data[:,1], fmt='bo', markersize=msize, label="VMC Energy")
plt.fill_between(x, data[:,0]-data[:,1], data[:,0]+data[:,1])
plt.xlim(0,n)
plt.xlabel('Iteration')
plt.ylabel('$E_0[a.u]$')
plt.legend(loc='best')
minSub = 80
maxSub = 120
inset_axes(ax, width="50%", height=1.0, loc='right')
plt.errorbar(x[minSub:maxSub], data[minSub:maxSub,0],
yerr=data[minSub:maxSub,1], fmt='bo', markersize=msize, label="VMC "
"Energy")
plt.plot(x[minSub:maxSub], yline[minSub:maxSub], 'r--', label="HF Energy")
plt.show()
|
11567195
|
import numpy
from amuse.test import amusetest
from amuse.units import nbody_system
from amuse.units import units
from amuse.ic import limepy
class TestLimepy(amusetest.TestCase):
def test1(self):
cluster=limepy.Limepy(5, 1, N=100).result
self.assertAlmostEqual(cluster.total_mass().number, 1.0)
self.assertEqual(len(cluster),100)
def test2(self):
cluster = limepy.new_limepy_model(7, 2, N=10000)
self.assertAlmostEqual(cluster.total_mass().number, 1.0)
self.assertEqual(len(cluster),10000)
def test3(self):
c = nbody_system.nbody_to_si(200 | units.MSun, 2 | units.parsec)
cluster = limepy.new_limepy_model(7, 2, N=100, converter=c)
self.assertAlmostEqual(cluster.total_mass(), 200. | units.MSun)
self.assertEqual(len(cluster),100)
def setUp(self):
if not limepy.scipy_imported:
self.skip("scipy not installed")
|
11567225
|
import angr
import logging
l = logging.getLogger(name=__name__)
class CallReturn(angr.SimProcedure):
NO_RET = True
def run(self):
l.info("A factory.call_state-created path returned!")
return
|
11567233
|
import re
import string
import typing as t
from pathlib import Path
def raw(data: t.ByteString) -> str:
"""Returns string of printable characters. Replacing non-printable characters
with '.', or CHR(46)
``"""
return "".join(
[chr(byte) if byte >= 0x20 and byte < 0x7F else chr(46) for byte in data],
)
def print(data: t.ByteString) -> filter:
"""Returns string of printable characters. Works similar to the Linux
`string` function.
"""
cleansed = "".join(
[chr(byte) if byte >= 0x20 and byte < 0x7F else chr(0) for byte in data],
)
return filter(lambda string: len(string) >= 4, cleansed.split(chr(0)))
def split_camel_case(value: str) -> list[str]:
return re.sub("([A-Z][a-z]+)", r" \1", re.sub("([A-Z]+)", r" \1", value)).split()
def wraptext(
source_text: str,
separator_chars: str,
width: int = 70,
keep_separators: bool = True,
):
current_length = 0
latest_separator = -1
current_chunk_start = 0
output = ""
char_index = 0
while char_index < len(source_text):
if source_text[char_index] in separator_chars:
latest_separator = char_index
output += source_text[char_index]
current_length += 1
if current_length == width:
if latest_separator >= current_chunk_start:
# Valid earlier separator, cut there
cutting_length = char_index - latest_separator
if not keep_separators:
cutting_length += 1
if cutting_length:
output = output[:-cutting_length]
output += "\n"
current_chunk_start = latest_separator + 1
char_index = current_chunk_start
else:
# No separator found, hard cut
output += "\n"
current_chunk_start = char_index + 1
latest_separator = current_chunk_start - 1
char_index += 1
current_length = 0
else:
char_index += 1
return output
def filter_strings_in_file(filename: Path, min_chars=4):
with open(filename, errors="ignore") as file:
result = ""
printable = set(string.printable)
for char in file.read():
if char in printable:
result += char
continue
if len(result) >= min_chars:
yield result
result = ""
if len(result) >= min_chars: # catch reslt at EOF
yield result
|
11567245
|
import os
import cv2
import json
import torch
import logging
import detectron2
import numpy as np
from detectron2.structures import ImageList
from detectron2.modeling.poolers import ROIPooler
from sklearn.metrics.pairwise import cosine_similarity
from defrcn.dataloader import build_detection_test_loader
from defrcn.evaluation.archs import resnet101
logger = logging.getLogger(__name__)
class PrototypicalCalibrationBlock:
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.device = torch.device(cfg.MODEL.DEVICE)
self.alpha = self.cfg.TEST.PCB_ALPHA
self.imagenet_model = self.build_model()
self.dataloader = build_detection_test_loader(self.cfg, self.cfg.DATASETS.TRAIN[0])
self.roi_pooler = ROIPooler(output_size=(1, 1), scales=(1 / 32,), sampling_ratio=(0), pooler_type="ROIAlignV2")
self.prototypes = self.build_prototypes()
self.exclude_cls = self.clsid_filter()
def build_model(self):
logger.info("Loading ImageNet Pre-train Model from {}".format(self.cfg.TEST.PCB_MODELPATH))
if self.cfg.TEST.PCB_MODELTYPE == 'resnet':
imagenet_model = resnet101()
else:
raise NotImplementedError
state_dict = torch.load(self.cfg.TEST.PCB_MODELPATH)
imagenet_model.load_state_dict(state_dict)
imagenet_model = imagenet_model.to(self.device)
imagenet_model.eval()
return imagenet_model
def build_prototypes(self):
all_features, all_labels = [], []
for index in range(len(self.dataloader.dataset)):
inputs = [self.dataloader.dataset[index]]
assert len(inputs) == 1
# load support images and gt-boxes
img = cv2.imread(inputs[0]['file_name']) # BGR
img_h, img_w = img.shape[0], img.shape[1]
ratio = img_h / inputs[0]['instances'].image_size[0]
inputs[0]['instances'].gt_boxes.tensor = inputs[0]['instances'].gt_boxes.tensor * ratio
boxes = [x["instances"].gt_boxes.to(self.device) for x in inputs]
# extract roi features
features = self.extract_roi_features(img, boxes)
all_features.append(features.cpu().data)
gt_classes = [x['instances'].gt_classes for x in inputs]
all_labels.append(gt_classes[0].cpu().data)
# concat
all_features = torch.cat(all_features, dim=0)
all_labels = torch.cat(all_labels, dim=0)
assert all_features.shape[0] == all_labels.shape[0]
# calculate prototype
features_dict = {}
for i, label in enumerate(all_labels):
label = int(label)
if label not in features_dict:
features_dict[label] = []
features_dict[label].append(all_features[i].unsqueeze(0))
prototypes_dict = {}
for label in features_dict:
features = torch.cat(features_dict[label], dim=0)
prototypes_dict[label] = torch.mean(features, dim=0, keepdim=True)
return prototypes_dict
def extract_roi_features(self, img, boxes):
"""
:param img:
:param boxes:
:return:
"""
mean = torch.tensor([0.406, 0.456, 0.485]).reshape((3, 1, 1)).to(self.device)
std = torch.tensor([[0.225, 0.224, 0.229]]).reshape((3, 1, 1)).to(self.device)
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img).to(self.device)
images = [(img / 255. - mean) / std]
images = ImageList.from_tensors(images, 0)
conv_feature = self.imagenet_model(images.tensor[:, [2, 1, 0]])[1] # size: BxCxHxW
box_features = self.roi_pooler([conv_feature], boxes).squeeze(2).squeeze(2)
activation_vectors = self.imagenet_model.fc(box_features)
return activation_vectors
def execute_calibration(self, inputs, dts):
img = cv2.imread(inputs[0]['file_name'])
ileft = (dts[0]['instances'].scores > self.cfg.TEST.PCB_UPPER).sum()
iright = (dts[0]['instances'].scores > self.cfg.TEST.PCB_LOWER).sum()
assert ileft <= iright
boxes = [dts[0]['instances'].pred_boxes[ileft:iright]]
features = self.extract_roi_features(img, boxes)
for i in range(ileft, iright):
tmp_class = int(dts[0]['instances'].pred_classes[i])
if tmp_class in self.exclude_cls:
continue
tmp_cos = cosine_similarity(features[i - ileft].cpu().data.numpy().reshape((1, -1)),
self.prototypes[tmp_class].cpu().data.numpy())[0][0]
dts[0]['instances'].scores[i] = dts[0]['instances'].scores[i] * self.alpha + tmp_cos * (1 - self.alpha)
return dts
def clsid_filter(self):
dsname = self.cfg.DATASETS.TEST[0]
exclude_ids = []
if 'test_all' in dsname:
if 'coco' in dsname:
exclude_ids = [7, 9, 10, 11, 12, 13, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 59, 61, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79]
elif 'voc' in dsname:
exclude_ids = list(range(0, 15))
else:
raise NotImplementedError
return exclude_ids
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
11567257
|
from typing import Any, Union
from typing import Dict, Hashable
import numpy as np
from cumm import tensorview as tv
import json
from collections import abc
from functools import reduce
JSON_INDEX_KEY = "__cumm_io_json_index"
NPDTYPE_TO_JSONARRAY_MAP = {
np.dtype(np.uint64): tv.uint64,
np.dtype(np.uint32): tv.uint32,
np.dtype(np.uint16): tv.uint16,
np.dtype(np.uint8): tv.uint8,
np.dtype(np.int64): tv.int64,
np.dtype(np.int32): tv.int32,
np.dtype(np.int16): tv.int16,
np.dtype(np.int8): tv.int8,
np.dtype(np.float64): tv.float64,
np.dtype(np.float32): tv.float32,
np.dtype(np.float16): tv.float16,
np.dtype(np.bool_): tv.bool_,
}
def _inv_map(dict_map: Dict[Hashable, Hashable]) -> Dict[Hashable, Hashable]:
return {v: k for k, v in dict_map.items()}
INV_NPDTYPE_TO_JSONARRAY_MAP = _inv_map(NPDTYPE_TO_JSONARRAY_MAP)
class Placeholder(object):
def __init__(self, index: int, nbytes: int):
self.index = index
self.nbytes = nbytes
def __add__(self, other):
assert self.index == other.index
return Placeholder(self.index, self.nbytes + other.nbytes)
def __repr__(self):
return "Placeholder[{},{}]".format(self.index, self.nbytes)
def __eq__(self, other):
return self.index == other.index and self.nbytes == other.nbytes
def is_json_index(data):
return isinstance(data, dict) and JSON_INDEX_KEY in data
def byte_size(obj: Union[np.ndarray, tv.Tensor]) -> int:
if isinstance(obj, np.ndarray):
return obj.nbytes
if isinstance(obj, tv.Tensor):
return obj.size * obj.itemsize
else:
raise NotImplementedError
def _extract_arrays_from_data(arrays,
data,
object_classes=(np.ndarray,),
json_index=False):
# can't use abc.Sequence because string is sequence too.
if isinstance(data, (list, tuple)):
data_skeleton = [None] * len(data)
for i in range(len(data)):
e = data[i]
if isinstance(e, object_classes):
data_skeleton[i] = {JSON_INDEX_KEY: len(arrays)}
arrays.append(e)
else:
data_skeleton[i] = _extract_arrays_from_data(
arrays, e, object_classes, json_index)
if isinstance(data, tuple):
data_skeleton = tuple(data_skeleton)
return data_skeleton
elif isinstance(data, abc.Mapping):
data_skeleton = {}
for k, v in data.items():
if isinstance(v, object_classes):
data_skeleton[k] = {JSON_INDEX_KEY: len(arrays)}
arrays.append(v)
else:
data_skeleton[k] = _extract_arrays_from_data(
arrays, v, object_classes, json_index)
return data_skeleton
else:
data_skeleton = None
if isinstance(data, object_classes):
data_skeleton = {JSON_INDEX_KEY: len(arrays)}
arrays.append(data)
else:
data_skeleton = data
return data_skeleton
def extract_arrays_from_data(data,
object_classes=(np.ndarray,),
json_index=False):
arrays = []
data_skeleton = _extract_arrays_from_data(arrays,
data,
object_classes=object_classes,
json_index=json_index)
return arrays, data_skeleton
def align_offset(offset, n):
"""given a byte offset, align it and return an aligned offset
"""
if n <= 0:
return offset
return n * ((offset + n - 1) // n)
def put_arrays_to_data(arrays, data_skeleton, json_index=False) -> Any:
if not arrays:
return data_skeleton
return _put_arrays_to_data(arrays, data_skeleton, json_index)
def _put_arrays_to_data(arrays, data_skeleton, json_index=False):
if isinstance(data_skeleton, (list, tuple)):
length = len(data_skeleton)
data = [None] * length
for i in range(length):
e = data_skeleton[i]
if is_json_index(e):
data[i] = arrays[e[JSON_INDEX_KEY]]
else:
data[i] = _put_arrays_to_data(arrays, e, json_index)
if isinstance(data_skeleton, tuple):
data = tuple(data)
return data
elif isinstance(data_skeleton, abc.Mapping):
data = {}
for k, v in data_skeleton.items():
if is_json_index(v):
data[k] = arrays[v[JSON_INDEX_KEY]]
else:
data[k] = _put_arrays_to_data(arrays, v, json_index)
return data
else:
if is_json_index(data_skeleton):
data = arrays[data_skeleton[JSON_INDEX_KEY]]
else:
data = data_skeleton
return data
def dumps_jsonarray(obj, multi_thread=False, buffer=None, use_bytearray=False, align_size: int = 32):
"""
layout:
+--------------+------------+---------------------------------+--------------+
|meta_start_pos|meta_end_pos| array/bytes content | meta |
+--------------+------------+---------------------------------+--------------+
data without array/bytes will be saved as bytes in content.
meta format:
{
"array": [
{
"shape": [...]
"dtype": ...
"offset": ...
}
]
"data": skeleton
}
"""
arrays, data_skeleton = extract_arrays_from_data(obj, (np.ndarray, tv.Tensor), True)
array_meta = []
start = 16
for i in range(len(arrays)):
arr = arrays[i]
start_aligned = align_offset(start, align_size)
if isinstance(arr, tv.Tensor):
assert arr.device == -1
arr_np = arr.numpy_view()
else:
arr_np = arr
# ascontiguous will convert scalar to 1-D array. be careful.
arrays[i] = np.ascontiguousarray(arr_np)
array_meta.append({
"shape": arrays[i].shape,
"dtype": NPDTYPE_TO_JSONARRAY_MAP[arrays[i].dtype],
"offset": start_aligned,
"is_np": isinstance(arr, np.ndarray),
})
start = start_aligned + arrays[i].nbytes
meta = {
"array": array_meta,
"data": data_skeleton,
}
meta_json = json.dumps(meta).encode("utf8")
meta_length = len(meta_json)
array_buffers = []
for i in range(len(arrays)):
array_buffers.append((arrays[i].view(np.uint8),
array_meta[i]["offset"], arrays[i].nbytes))
total_length = start + meta_length
if buffer is None:
if not use_bytearray:
buffer = np.empty(total_length, dtype=np.uint8)
else:
buffer = bytearray(total_length)
else:
assert len(buffer) >= total_length
buffer_view = memoryview(buffer)
content_end_offset = start
meta_end_offset = content_end_offset + meta_length
buffer_view[:8] = np.array(content_end_offset, dtype=np.int64).tobytes()
buffer_view[8:16] = np.array(meta_end_offset, dtype=np.int64).tobytes()
buffer_view[16:24] = np.array(align_size, dtype=np.int64).tobytes()
shared_mem = np.frombuffer(buffer_view, dtype=np.uint8)
for a_buf, offset, size in array_buffers:
shared_mem_view = memoryview(shared_mem[offset:offset + size])
if not isinstance(a_buf, bytes):
buf_mem_view = memoryview(a_buf.reshape(-1))
if multi_thread: # slow when multi_thread copy in worker
shared_mem[offset:offset + size] = a_buf.reshape(-1)
else:
shared_mem_view[:] = buf_mem_view
else:
shared_mem_view[:] = a_buf
shared_mem[content_end_offset:content_end_offset +
meta_length] = np.frombuffer(meta_json, dtype=np.uint8)
return buffer
def loads_jsonarray(binary, copy=False):
buffer_view = memoryview(binary)
content_end_offset = np.frombuffer(buffer_view[:8], dtype=np.int64).item()
meta_end_offset = np.frombuffer(buffer_view[8:16], dtype=np.int64).item()
pb_bytes = buffer_view[content_end_offset:meta_end_offset]
meta = json.loads(bytearray(pb_bytes))
array_metas = meta["array"]
data_skeleton = meta["data"]
shared_mem = buffer_view
results_array = []
for array_meta in array_metas:
shape = array_meta["shape"]
dtype = INV_NPDTYPE_TO_JSONARRAY_MAP[array_meta["dtype"]]
offset = array_meta["offset"]
is_np = array_meta["is_np"]
length = reduce(lambda x, y: x * y, shape) * np.dtype(dtype).itemsize
arr = np.frombuffer(memoryview(shared_mem[offset:offset + length]),
dtype=dtype).reshape(shape)
if is_np:
if copy:
arr = arr.copy()
else:
arr = tv.from_numpy(arr)
if copy:
arr = arr.clone()
results_array.append(arr)
results = put_arrays_to_data(results_array, data_skeleton, json_index=True)
return results
|
11567283
|
from keras.layers import Highway as KerasHighway
class Highway(KerasHighway):
"""
Keras' `Highway` layer does not support masking, but it easily could, just by returning the
mask. This `Layer` makes this possible.
"""
def __init__(self, **kwargs):
super(Highway, self).__init__(**kwargs)
self.supports_masking = True
|
11567287
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class BrokerZerodhaTxn(models.Model):
bzt_id = models.AutoField(primary_key=True)
bzt_user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
bzt_tdate = models.DateField(blank=True, null=True)
bzt_tsymbol = models.TextField(blank=True, null=True)
bzt_exchange = models.TextField(blank=True, null=True)
bzt_segment = models.TextField(blank=True, null=True)
bzt_trade_type = models.TextField(blank=True, null=True)
bzt_quantity = models.FloatField(blank=True, null=True)
bzt_price = models.FloatField(blank=True, null=True)
bzt_order_id = models.IntegerField(blank=True, null=True)
bzt_trade_id = models.IntegerField(blank=True, null=True)
bzt_order_exec_time = models.TextField(blank=True, null=True)
class Meta:
db_table = 'broker_zerodha_txn'
unique_together = (('bzt_tsymbol', 'bzt_tdate'),)
|
11567321
|
from xv_leak_tools.test_components.ip_tool.ip_tool_builder import IPToolBuilder
def register(factory):
factory.register(IPToolBuilder())
|
11567379
|
import json
import os
import sys
from collections import OrderedDict
def build_entry(file_name, industries, regions, malwares):
title = file_name.split('.')[0].upper()
# entry = {
# "pb_file": file_name,
# "title": title,
# "industries": industries,
# "regions": regions,
# "malwares": malwares
# }
entry = OrderedDict()
entry['pb_file'] = file_name
entry['title'] = title
entry['industries'] = industries
entry['regions'] = regions
entry['malwares'] = malwares
entry_json = json.dumps(entry)
print(entry_json)
def process_playbook(pb, bundle):
objects = bundle.get('objects', [])
identity_objects = [o for o in objects if o['type'] == 'identity']
malware_objects = [o for o in objects if o['type'] == 'malware']
industries = []
regions = []
malwares = []
for obj in identity_objects:
for sector in obj['sectors']:
if sector not in industries:
industries.append(sector)
for country in obj['x_cta_country']:
if country not in regions:
regions.append(country)
for obj in malware_objects:
if obj['name'] not in malwares:
malwares.append(obj['name'])
industries.sort()
regions.sort()
malwares.sort()
build_entry(pb, industries, regions, malwares)
def process_file(fp):
with open(fp, 'r') as raw_json:
try:
pb = os.path.basename(fp)
bundle = json.load(raw_json)
if bundle.get('type', '') == 'bundle' and bundle.get('spec_version', '') == '2.0':
process_playbook(pb, bundle)
else:
print('{} - no valid stix 2.0 bundle found'.format(pb))
except Exception as e:
print('{} - could not parse json from file\n{}'.format(pb, e))
def main():
if len(sys.argv) == 2:
process_file(sys.argv[1])
else:
print('error {} args given - use 1 arg (path to Playbook)'.format(len(sys.argv)))
if __name__ == '__main__':
main()
|
11567386
|
class CONTENT_STATUS:
IN_PROGRESS = 0
COMPLETE = 1
class EXIT_STATUS:
SYSETM_EXIT = 0
ERROR_EXIT = 1
USER_QUIT = 2
class POC_RESULT_STATUS:
FAIL = 0
SUCCESS = 1
RETRAY = 2
class TARGET_MODE_STATUS:
FILE = 9
SINGLE = 8
IPMASK = 7
RANGE = 6
API = 5
class PROXY_TYPE: # keep same with SocksiPy(import socks)
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
PROXY_TYPE_HTTP = HTTP = 3
PROXY_TYPE_HTTP_NO_TUNNEL = 4
|
11567406
|
from setuptools import setup
setup(name='aenet',
version='0.1',
author='<NAME>, <NAME>',
packages=['aenet'],
install_requires=['numpy', 'moviepy', 'theano', 'lasagne'],
zip_safe=False)
|
11567444
|
NAME_PREFIX_SEPARATOR = "_"
ENDPOINTS_SEPARATOR = ", "
CSI_CONTROLLER_SERVER_WORKERS = 10
# array types
ARRAY_TYPE_XIV = 'A9000'
ARRAY_TYPE_SVC = 'SVC'
ARRAY_TYPE_DS8K = 'DS8K'
ALL_ARRAY_TYPES = [ARRAY_TYPE_XIV, ARRAY_TYPE_SVC, ARRAY_TYPE_DS8K]
|
11567445
|
description = 'Lenght devices for polarisation analysis'
group = 'lowlevel'
devices = dict(
lsd1 = device('nicos.devices.generic.ManualMove',
description = 'distance sample-deflector 1',
default = 613,
unit = 'mm',
fmtstr = '%.0f',
abslimits = (0, 1000),
),
lsd2 = device('nicos.devices.generic.ManualMove',
description = 'distance sample-deflector 2',
default = 663,
unit = 'mm',
fmtstr = '%.0f',
abslimits = (0, 1000),
),
lpsd = device('nicos.devices.generic.ManualMove',
description = 'distance sample PSD',
default = 2316,
unit = 'mm',
fmtstr = '%.0f',
abslimits = (1800, 2800),
),
)
|
11567451
|
from typing import List, cast
import fannypack
import numpy as np
import torch.utils.data
import torchfilter
# These need to externally set before training
buddy: fannypack.utils.Buddy
filter_model: torchfilter.base.Filter
trajectories: List[torchfilter.types.TrajectoryNumpy]
num_workers: int
def configure(
*,
buddy: fannypack.utils.Buddy,
trajectories: List[torchfilter.types.TrajectoryNumpy],
num_workers: int = 8,
):
"""Configure global settings for training helpers."""
assert isinstance(buddy.model, torchfilter.base.Filter)
globals()["buddy"] = buddy
globals()["filter_model"] = cast(torchfilter.base.Filter, buddy.model)
globals()["trajectories"] = trajectories
globals()["num_workers"] = num_workers
# Training helpers
def train_pf_dynamics_single_step(*, epochs, batch_size=32, model=None):
if model is None:
model = filter_model
assert isinstance(model, torchfilter.base.Filter)
# Put model in train mode
model.train()
dataloader = torch.utils.data.DataLoader(
torchfilter.data.SingleStepDataset(trajectories=trajectories),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
for _ in range(epochs):
torchfilter.train.train_dynamics_single_step(
buddy, model.dynamics_model, dataloader, loss_function="mse"
)
def train_pf_dynamics_recurrent(
*, subsequence_length, epochs, batch_size=32, model=None
):
assert isinstance(filter_model, torchfilter.base.Filter)
if model is None:
model = filter_model
assert isinstance(model, torchfilter.base.Filter)
# Put model in train mode
model.train()
dataloader = torch.utils.data.DataLoader(
torchfilter.data.SubsequenceDataset(
trajectories=trajectories, subsequence_length=subsequence_length
),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
for _ in range(epochs):
torchfilter.train.train_dynamics_recurrent(
buddy, model.dynamics_model, dataloader, loss_function="mse"
)
def train_pf_measurement(*, epochs, batch_size, cov_scale=0.1):
assert isinstance(filter_model, torchfilter.filters.ParticleFilter)
# Put model in train mode
filter_model.train()
dataloader = torch.utils.data.DataLoader(
torchfilter.data.ParticleFilterMeasurementDataset(
trajectories=trajectories,
covariance=np.identity(filter_model.state_dim) * cov_scale,
samples_per_pair=10,
),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
for _ in range(epochs):
torchfilter.train.train_particle_filter_measurement(
buddy, filter_model.measurement_model, dataloader
)
def train_virtual_sensor(
*, epochs, batch_size=32, model=None, optimizer_name="train_virtual_sensor"
):
if model is None:
model = filter_model
assert isinstance(model, torchfilter.filters.VirtualSensorExtendedKalmanFilter)
# Put model in train mode
model.train()
dataloader = torch.utils.data.DataLoader(
torchfilter.data.SingleStepDataset(trajectories=trajectories),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
for _ in range(epochs):
torchfilter.train.train_virtual_sensor(
buddy,
model.virtual_sensor_model,
dataloader,
optimizer_name=optimizer_name,
)
def train_e2e(
*,
subsequence_length,
epochs,
batch_size=32,
initial_cov_scale=0.1,
measurement_initialize=False,
model=None,
optimizer_name="train_filter_recurrent",
):
if model is None:
model = filter_model
assert isinstance(model, torchfilter.base.Filter)
# Put model in train mode
model.train()
dataloader = torch.utils.data.DataLoader(
torchfilter.data.SubsequenceDataset(
trajectories=trajectories, subsequence_length=subsequence_length
),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
drop_last=True,
)
initial_covariance = (
torch.eye(model.state_dim, device=buddy.device) * initial_cov_scale
)
for _ in range(epochs):
torchfilter.train.train_filter(
buddy,
model,
dataloader,
initial_covariance=initial_covariance,
measurement_initialize=measurement_initialize,
optimizer_name=optimizer_name,
)
|
11567453
|
import pandas as pd
class FeatureExtractor():
def __init__(self):
pass
def fit(self, X_df, y):
pass
def transform(self, X_df):
X_df.index = range(len(X_df))
X_df_new = pd.concat(
[X_df.get(['instant_t', 'windspeed', 'latitude', 'longitude',
'hemisphere', 'Jday_predictor', 'initial_max_wind',
'max_wind_change_12h', 'dist2land']),
pd.get_dummies(X_df.nature, prefix='nature', drop_first=True)],
# 'basin' is not used here ..but it can!
axis=1)
# get data from the past of the same storm (if it exists)
past_winds = []
for i in range(len(X_df)):
if i - 1 < 0:
past_winds.append(X_df['windspeed'][i])
elif X_df['stormid'][i] == X_df['stormid'][i - 1]:
past_winds.append(X_df['windspeed'][i - 1])
else:
past_winds.append(X_df['windspeed'][i])
X_df_new = X_df_new.assign(
past_windspeed=pd.Series(past_winds))
X_df_new = X_df_new.fillna(-1)
XX = X_df_new.values
return XX
|
11567477
|
import bpy
import os
import time
class Init(object):
def __init__(self):
self.StartTime = 0.0
self.VertCount = 0
# Addon Folder Name
self.FolderName = os.path.basename(os.path.dirname(__file__))
# Solidify Name (use Modifire and Material)
self.SolidfyName = "Solidify_Edge"
# WeightType vertex group name
self.WeightTypeName = "*WeightType"
# Left/Right Naming Conventions
self.TextLR = ('_L', '.L', '_R', '.R')
self.GetLR_JP = {
'_L': '\u5de6',
'.L': '\u5de6',
'_R': '\u53f3',
'.R': '\u53f3',
}
# Bone Name
self.TextAnkle = ('ankle', '\u8DB3\u9996')
self.GetAnkle_JP = {
'ankle': '\u3064\u307E\u5148',
'\u8DB3\u9996': '\u3064\u307E\u5148',
}
self.GetAnkle_EN = {
'ankle': 'toe',
'\u8DB3\u9996': 'toe',
}
self.Tip_JP = '\u5148'
self.Tip_EN = ' tip'
# Shape Name
self.ShapeTwist1 = "b2pmxe_shape_twist1"
self.ShapeTwist2 = "b2pmxe_shape_twist2"
self.ShapeAuto = "b2pmxe_shape_auto"
self.ShapeMaster = "b2pmxe_shape_master"
self.ShapeEyes = "b2pmxe_shape_eyes"
def SetStartTime(self):
self.StartTime = time.time()
def SetVertCount(self, vert_count):
self.VertCount = vert_count
def PrintTime(self, filepath, type):
take_time = time.time() - self.StartTime
text_type = {
'import': "Importing",
'export': "Exporting",
}
print(
"Finished %s: %r in %.3f sec. %d verts." % (
text_type.get(type, ""),
bpy.path.basename(filepath),
take_time,
self.VertCount
))
bpy.ops.wm.memory_statistics()
|
11567487
|
from twindb_backup.status.binlog_status import BinlogStatus
def test_eq(raw_binlog_status):
status_1 = BinlogStatus(raw_binlog_status)
status_2 = BinlogStatus(raw_binlog_status)
assert status_1 == status_2
def test_ne(raw_binlog_status):
status_1 = BinlogStatus(raw_binlog_status)
status_2 = BinlogStatus()
assert status_1 != status_2
|
11567538
|
import os
import sys
import jinja2
from rdflib import ConjunctiveGraph, URIRef
from rdflib.namespace import DCTERMS, RDFS, FOAF
from rdflib.namespace import Namespace
FM = Namespace('https://purl.org/fair-metrics/terms/')
fairGraph = ConjunctiveGraph()
fairGraph.parse('http://purl.org/fair-ontology#', format='trig')
fairTermGraph = ConjunctiveGraph()
fairTermGraph.parse('terms', format='n3')
class FairMetricData():
def __init__(self, id):
self.base = 'https://purl.org/fair-metrics/'
self.id = URIRef(id)
self.assertion = URIRef(id+'#assertion')
# id = id.replace(self.base, '') # HACK -- remove this line before merging commit
self.g = ConjunctiveGraph()
self.g.parse(id, format='trig')
def getID(self):
return self.id
def getShortID(self):
return self.id.replace(self.base, '')
def getAuthors(self):
authors = [o.toPython() for o in self.g.objects(subject=self.assertion, predicate=DCTERMS.author)]
authors.sort()
return ' \\\\ '.join(authors)
def getTitle(self):
return ', '.join([o.toPython() for o in self.g.objects(subject=self.assertion, predicate=RDFS.comment)])
def getShortTitle(self):
return ', '.join([o.toPython() for o in self.g.objects(subject=self.assertion, predicate=DCTERMS.title)])
def getTopicDescription(self):
descs = []
for o in self.g.objects(subject=self.id, predicate=FOAF.primaryTopic):
# o should be fair:A1.1
for o2 in fairGraph.objects(subject=o, predicate=DCTERMS.description):
descs.append(o2.toPython())
return ' '.join(descs)
def getTopicTitle(self):
descs = []
for o in self.g.objects(subject=self.id, predicate=FOAF.primaryTopic):
# o should be fair:A1.1
for o2 in fairGraph.objects(subject=o, predicate=DCTERMS.title):
descs.append(o2.toPython())
return ' '.join(descs)
def getMeasuring(self):
# return fm:measuring
return self.getFMPropertyValue(FM.measuring)
def getRationale(self):
# return fm:rationale
return self.getFMPropertyValue(FM.rationale)
def getRequirements(self):
# return fm:requirements
return self.getFMPropertyValue(FM.requirements)
def getProcedure(self):
# return fm:procedure
return self.getFMPropertyValue(FM.procedure)
def getValidation(self):
# return fm:validation
return self.getFMPropertyValue(FM.validation)
def getRelevance(self):
# return fm:relevance
return self.getFMPropertyValue(FM.relevance)
def getExamples(self):
# return fm:examples
return self.getFMPropertyValue(FM.examples)
def getComments(self):
# return fm:comments
return self.getFMPropertyValue(FM.comments)
def getFMPropertyLabel(self, property):
return ', '.join([ o.toPython() for o in fairTermGraph.objects(subject=FM[property], predicate=RDFS['label'])])
def getFMPropertyValue(self, property):
return ', '.join([o.toPython() for o in self.g.objects(subject=self.assertion, predicate=property)])
if __name__=='__main__':
args = sys.argv
if len(args)!=2:
raise Exception('Expected metric IRI as input')
# The idea is that we could fill the table http://fairmetrics.org/fairmetricform.html
# from a given metric IRI
# id = 'https://purl.org/fair-metrics/FM_A1.1'
metricFile = args[1]
id = 'https://purl.org/fair-metrics/' + metricFile
fm = FairMetricData(id)
latex_jinja_env = jinja2.Environment(
variable_start_string = '\VAR{',
variable_end_string = '}',
trim_blocks = True,
autoescape = False,
loader = jinja2.FileSystemLoader(os.path.abspath('.'))
)
template = latex_jinja_env.get_template('template.tex')
title=fm.getTitle()
authors=fm.getAuthors()
metricId=fm.getShortID().replace('_','-') # Avoid _ in latex template
metricIdVerb=fm.getID()
shortTitle=fm.getShortTitle()
topicTitle=fm.getTopicTitle()
topicDesription=fm.getTopicDescription().replace('\n','\\newline\n')
measuring=fm.getMeasuring().replace('\n','\\newline\n')
rationale=fm.getRationale().replace('\n','\\newline\n')
requirements=fm.getRequirements().replace('\n','\\newline\n')
procedure=fm.getProcedure().replace('\n','\\newline\n')
validation=fm.getValidation().replace('\n','\\newline\n')
relevance=fm.getRelevance().replace('\n','\\newline\n')
examples=fm.getExamples().replace('\n','\\newline\n')
comments=fm.getComments().replace('\n','\\newline\n')
measuringLabel=fm.getFMPropertyLabel('measuring')
rationaleLabel=fm.getFMPropertyLabel('rationale')
requirementsLabel=fm.getFMPropertyLabel('requirements')
procedureLabel=fm.getFMPropertyLabel('procedure')
validationLabel=fm.getFMPropertyLabel('validation')
relevanceLabel=fm.getFMPropertyLabel('relevance')
examplesLabel=fm.getFMPropertyLabel('examples')
commentsLabel=fm.getFMPropertyLabel('comments')
print(template.render(
title=title,
authors=authors,
metricId=metricId,
metricIdVerb=metricIdVerb,
shortTitle=shortTitle,
topicTitle=topicTitle,
topicDesription=topicDesription,
measuring=measuring,
rationale=rationale,
requirements=requirements,
procedure=procedure,
validation=validation,
relevance=relevance,
examples=examples,
comments=comments,
measuringLabel=measuringLabel,
rationaleLabel=rationaleLabel,
requirementsLabel=requirementsLabel,
procedureLabel=procedureLabel,
validationLabel=validationLabel,
relevanceLabel=relevanceLabel,
examplesLabel=examplesLabel,
commentsLabel=commentsLabel,
))
|
11567562
|
import abc
from dataclasses import dataclass
import math
import torch
from .basedist import ExponentialFamily
from .basedist import ConjugateLikelihood
__all__ = ['NormalFullCovariance', 'NormalFullCovarianceStdParams']
@dataclass(init=False, eq=False, unsafe_hash=True)
class NormalFullCovarianceStdParams(torch.nn.Module):
'''Standard parameterization of the Normal pdf with full
covariance matrix.
'''
mean: torch.Tensor
cov: torch.Tensor
def __init__(self, mean, cov):
super().__init__()
self.register_buffer('mean', mean)
self.register_buffer('cov', cov)
@classmethod
def from_natural_parameters(cls, natural_params):
npsize = natural_params.shape
if len(npsize) == 1:
natural_params = natural_params.view(1, -1)
# First we recover the dimension of the mean parameters (D).
# Since the dimension of the natural parameters of the
# Normal-Wishart is:
# l = natural_params.shape[-1]
# D^2 + D = l
# we can find D by looking for the positive root of the above
# polynomial which is given by:
# D = .5 * (-1 + sqrt(1 + 4 * l))
l = natural_params.shape[-1]
dim = int(.5 * (-1 + math.sqrt(1 + 4 * l)))
np1 = natural_params[:, :dim]
np2 = natural_params[:, dim: dim * (dim + 1)]
cov = np2.inverse()
mean = cov * np1
if len(npsize) == 1:
return cls(mean.view(-1), cov.view(dim, dim))
return cls(mean.view(-1, dim), cov.view(-1, dim, dim))
class NormalFullCovariance(ExponentialFamily):
_std_params_def = {
'mean': 'Mean parameter.',
'cov': 'Covariance matrix.',
}
_std_params_cls = NormalFullCovarianceStdParams
def __len__(self):
paramshape = self.params.mean.shape
return 1 if len(paramshape) <= 1 else paramshape[0]
@property
def dim(self):
return self.params.mean.shape[-1]
def conjugate(self):
raise NotImplementedError
def forward(self, stats, pdfwise=False):
nparams = self.natural_parameters()
mean = self.params.mean
cov = self.params.cov
size = mean.shape
dim = self.dim
if len(size) <= 1:
mean = mean.view(1, -1)
nparams = nparams.view(1, -1)
# Get the precision matrix from the natural parameters not
# to inverse the covariance matrix once more time.
prec = nparams[:, dim: dim * (dim + 1)].reshape(-1, dim, dim)
# Log determinant of all the covariance matrix.
L = torch.cholesky(cov, upper=False)
logdet = 2 * torch.log(L[:, range(dim), range(dim)]).sum(dim=-1)
# Quadratic term of the log-normalizer: -.5 * mu^T S mu
Sm = nparams[:, :dim]
mSm = (Sm * mean).sum(dim=-1)
lnorm = .5 * (logdet + mSm)
log_basemeasure = -.5 * (dim * math.log(2 * math.pi))
if pdfwise:
return torch.sum(nparams * stats, dim=-1) - lnorm \
+ log_basemeasure
retval = nparams @ stats.t() - lnorm[:, None] + log_basemeasure
if len(size) <= 1:
return retval.reshape(-1)
return retval
def sufficient_statistics(self, data):
data_quad = (data[:, :, None] * data[:, None, :])
data_quad = data_quad.reshape(len(data), -1)
return torch.cat([data, -.5 * data_quad], dim=-1)
def expected_sufficient_statistics(self):
'''Expected sufficient statistics given the current
parameterization.
For the random variable x (vector)the sufficient statistics of
the Normal with diagonal covariance matrix are given by:
stats = (
x,
x**2,
)
For the standard parameters (m=mean, s=diagonal of the cov.
matrix) the expectation of the sufficient statistics is
given by:
E[stats] = (
m,
s + m**2
)
'''
mean, cov = self.params.mean, self.params.cov
size = mean.shape
dim = self.dim
if len(size) <= 1:
mean = mean.view(1, -1)
cov = cov.view(1, dim, dim)
mean_quad = mean[:, :, None] * mean[:, None, :]
retval = torch.cat([
mean,
-.5 * (cov + mean_quad).reshape(len(mean), -1)
], dim=-1)
if len(size) <= 1:
return retval.view(-1)
return retval
def expected_value(self):
return self.params.mean
def log_norm(self):
mean, cov = self.params.mean, self.params.cov
size = mean.shape
dim = self.dim
if len(size) <= 1:
mean = mean.view(1, -1)
cov = cov.view(1, dim, dim)
L = torch.cholesky(cov, upper=False)
logdet = 2 * torch.log(L[:, range(dim), range(dim)]).sum(dim=-1)
prec = cov.inverse()
Sm = torch.matmul(prec, mean[:, :, None]).view(-1, dim)
mSm = (Sm * mean).sum(dim=-1)
log_base_measure = .5 * dim * math.log(2 * math.pi)
return .5 * (logdet + mSm) + log_base_measure
def sample(self, nsamples):
mean, cov = self.params.mean, self.params.cov
size = mean.shape
dim = self.dim
if len(size) <= 1:
mean = mean.view(1, -1)
cov = cov.view(1, dim, dim)
L = torch.cholesky(cov, upper=False)
noise = torch.randn(mean.shape[0], nsamples, mean.shape[1],
dtype=mean.dtype, device=mean.device)
retval = mean[:, None, :] + torch.matmul(noise, L.permute(0, 2, 1))
if len(size) <= 1:
return retval.reshape(nsamples, -1)
return retval
def natural_parameters(self):
'''Natural form of the current parameterization. For the
standard parameters (m=mean, s=diagonal of the cov. matrix) the
natural parameterization is given by:
nparams = (
s^-1 * m ,
-.5 * s^1
)
Returns:
``torch.Tensor[2 * D]``
'''
mean, cov = self.params.mean, self.params.cov
size = len(mean.shape) if len(mean.shape) > 0 else 1
dim = mean.shape[-1]
if size == 1:
mean = mean.view(1, -1)
cov = cov.view(1, dim, dim)
prec = cov.inverse()
Sm = torch.matmul(prec, mean[:, :, None]).view(-1, dim)
retval = torch.cat([Sm, prec.reshape(len(mean), -1)], dim=-1)
if size == 1:
return retval.view(-1)
return retval
def update_from_natural_parameters(self, natural_params):
self.params = self.params.from_natural_parameters(natural_params)
|
11567574
|
import sys
def align_the_unaligned(unalnd_nodes,words,is_alnd_words):
ret_alns = {}
for (node, prev) in unalnd_nodes:
aln = -1
bestscr = 10000
for i in range(len(words)):
if not is_alnd_words[i]:
scr = abs(prev-i)
if scr < bestscr:
bestscr = scr
aln = i
if aln != -1:
ret_alns[node] = str(aln)+"-"+str(aln+1)
is_alnd_words[aln] = True
#print(node+"\t"+words[aln])
return ret_alns
fg = open(sys.argv[1])
unalnd_nodes = []
isalnd_words = []
words = []
graph_lines = []
prev = 0
for line in fg:
line = line.rstrip()
if line == "":
alns = align_the_unaligned(unalnd_nodes,words,isalnd_words)
for (i,line) in enumerate(graph_lines):
if line in alns:
graph_lines[i] = line+"\t"+alns[line]
print("\n".join(graph_lines)+"\n")
unalnd_nodes = []
isalnd_words = []
words = []
graph_lines = []
prev = 0
continue
if "::tok" in line:
words = line.split()[2:]
isalnd_words = [False]*len(words)
if "::node" in line:
if len(line.split("\t")) == 3:
unalnd_nodes.append((line,prev))
else:
(a,b) = line.split("\t")[-1].split('-')
if not a.isdigit() or not b.isdigit():
nline = "\t".join(line.split("\t")[:-1])+" "+line.split("\t")[-1]
line = nline
continue
for i in range(int(a),int(b)):
if i < len(isalnd_words):
isalnd_words[i] = True
prev = int(b)-1
graph_lines.append(line)
|
11567579
|
import json
import sys
import os
import argparse
from model.pfn import *
import torch
from transformers import AlbertTokenizer, AutoTokenizer
import re
def map_origin_word_to_bert(words, tokenizer):
bep_dict = {}
current_idx = 1
for word_idx, word in enumerate(words):
bert_word = tokenizer.tokenize(word)
word_len = len(bert_word)
bep_dict[word_idx] = [current_idx, current_idx + word_len - 1]
current_idx = current_idx + word_len
return bep_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--sent", type=str, required=True,
help="input sentence")
parser.add_argument("--model_file", type=str, required=True,
help="loading pre-trained model files")
parser.add_argument("--embed_mode", type=str,
help="loading pre-trained model files")
parser.add_argument("--hidden_size", type=int, default=300,
help="hidden size of the model")
parser.add_argument("--dropconnect", type=float, default=0.,
help="dropconnect on encoder")
parser.add_argument("--dropout", type=float, default=0.,
help="dropout on word embedding and task units")
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data = None
if "web" in args.model_file:
data = "WEBNLG"
elif "nyt" in args.model_file:
data = "NYT"
elif "ade" in args.model_file:
data = "ADE"
elif "ace" in args.model_file:
data = "ACE2005"
elif "sci" in args.model_file:
data = "SCIERC"
if "albert" in args.model_file:
input_size = 4096
args.embed_mode = "albert"
elif "sci" in args.model_file:
input_size = 768
args.embed_mode = "scibert"
elif "bert" in args.model_file:
input_size = 768
args.embed_mode = "bert_cased"
with open("data/" + data + "/ner2idx.json", "r") as f:
ner2idx = json.load(f)
with open("data/" + data + "/rel2idx.json", "r") as f:
rel2idx = json.load(f)
idx2ner = {v: k for k, v in ner2idx.items()}
idx2rel = {v: k for k, v in rel2idx.items()}
model = PFN(args, input_size, ner2idx, rel2idx)
model.load_state_dict(torch.load(args.model_file))
model.to(device)
model.eval()
if args.embed_mode == "albert":
tokenizer = AlbertTokenizer.from_pretrained("albert-xxlarge-v1")
elif args.embed_mode == "bert_cased":
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
else:
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
target_sent = re.findall(r"\w+|[^\w\s]", args.sent)
sent_bert_ids = tokenizer(target_sent, return_tensors="pt", is_split_into_words=True)["input_ids"].tolist()
sent_bert_ids = sent_bert_ids[0]
sent_bert_str = []
for i in sent_bert_ids:
sent_bert_str.append(tokenizer.convert_ids_to_tokens(i))
bert_len = len(sent_bert_str)
mask = torch.ones(bert_len, 1).to(device)
ner_score, re_score = model(target_sent, mask)
ner_score = torch.where(ner_score>=0.5, torch.ones_like(ner_score), torch.zeros_like(ner_score))
re_score = torch.where(re_score>=0.5, torch.ones_like(re_score), torch.zeros_like(re_score))
entity = (ner_score == 1).nonzero(as_tuple=False).tolist()
relation = (re_score == 1).nonzero(as_tuple=False).tolist()
word_to_bep = map_origin_word_to_bert(target_sent, tokenizer)
bep_to_word = {word_to_bep[i][0]:i for i in word_to_bep.keys()}
entity_names = {}
for en in entity:
type = idx2ner[en[3]]
start = None
end = None
if en[0] in bep_to_word.keys():
start = bep_to_word[en[0]]
if en[1] in bep_to_word.keys():
end = bep_to_word[en[1]]
if start == None or end == None:
continue
entity_str = " ".join(target_sent[start:end+1])
entity_names[entity_str] = start
print("entity_name: {}, entity type: {}".format(entity_str, type))
for re in relation:
type = idx2rel[re[3]]
e1 = None
e2 = None
if re[0] in bep_to_word.keys():
e1 = bep_to_word[re[0]]
if re[1] in bep_to_word.keys():
e2 = bep_to_word[re[1]]
if e1 == None or e2 == None:
continue
subj = None
obj = None
for en, start_index in entity_names.items():
if en.startswith(target_sent[e1]) and start_index == e1:
subj = en
if en.startswith(target_sent[e2]) and start_index == e2:
obj = en
if subj == None or obj == None:
continue
print("triple: {}, {}, {}".format(subj, type, obj))
|
11567583
|
import os
from collections import OrderedDict
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear, ReLU
from . import constants
"""Config schemas
This file loads config files and computes some extra stuff.
For example, if a NodeEncoder schema uses CNNs for the RGB/Depth
pathways but uses a LinearEncoder for the Mask pathway, it will
automatically calculate what the fusion module input dimension
should be.
SplitNet, DeleteNet, and SGS-Net each have separate configs.
The configs are largely similar. They each (potentially) have:
- node_encoder_config:
- {rgb,depth,mask}_encoder_config
- These describe the CNN and/or linear encoders applied to
each input. Note that in the paper, no linear encoders
are used. They are typically all CNN encoders.
- fusion_module_config (SGS-Net only)
- decoder_config (SplitNet only)
- bg_fusion_module_config (DeleteNet only)
- gn_layer_config (SGS-Net only)
Note that the CNN encoders for the NodeEncoder are shared amongst
SplitNet/DeleteNet/SGS-Net.
"""
def dictify(dict_, dict_type=dict):
"""Turn nested dicts into nested dict_type dicts.
E.g. turn OrderedDicts into dicts, or dicts into OrderedDicts.
"""
if not isinstance(dict_, dict):
return dict_
new_dict = dict_type()
for key in dict_:
if isinstance(dict_[key], dict):
new_dict[key] = dictify(dict_[key], dict_type=dict_type)
elif isinstance(dict_[key], list):
new_dict[key] = [dictify(x, dict_type=dict_type) for x in dict_[key]]
else:
new_dict[key] = dict_[key]
return new_dict
def process_node_encoder_config(node_encoder_cfg,
img_size=(64, 64),
CNN_reduction_factor=None):
"""Update node encoder config dictionary in place."""
# For the CNN encoders, compute linear encoder (on top of CNN) input if specified
for key in node_encoder_cfg.keys():
if 'encoder_config' not in key:
continue
if (node_encoder_cfg[key]['type'] == 'cnn' and
'linear_encoder_config' in node_encoder_cfg[key]):
reduced_img_size = (np.array(img_size) / CNN_reduction_factor / node_encoder_cfg[key]['avg_pool_kernel_size'])
input_dim = int(np.prod(reduced_img_size) * node_encoder_cfg[key]['output_channels'])
node_encoder_cfg[key]['linear_encoder_config']['input_dim'] = input_dim
# If fusion module is specified, compute input dimension (fusing the features from each encoder)
if 'fusion_module_config' in node_encoder_cfg:
fm_inc = 0
for key in node_encoder_cfg:
if 'encoder_config' not in key:
continue
if node_encoder_cfg[key]['type'] == 'linear':
fm_inc += node_encoder_cfg[key]['output_dim']
elif node_encoder_cfg[key]['type'] == 'cnn': # Note, linear fusion module assumes CNNs have additional linear module on top
fm_inc += node_encoder_cfg[key]['linear_encoder_config']['output_dim']
node_encoder_cfg['fusion_module_config']['input_dim'] = fm_inc
def get_splitnet_config(cfg_filename):
"""Get SplitNet config from file, perform additional computation."""
# First, load the YAML file
with open(cfg_filename, 'r') as f:
cfg = yaml.load(f)
# Process node encoder config
node_encoder_cfg = cfg['node_encoder_config']
process_node_encoder_config(node_encoder_cfg,
img_size=cfg['img_size'])
# Compute encoder output channels/dims for decoder input
encoder_output_channels = dict()
encoder_output_dims = dict()
for k in node_encoder_cfg.keys():
if 'encoder_config' in k:
if node_encoder_cfg[k]['type'] == 'cnn':
encoder_output_channels[k] = node_encoder_cfg[k]['output_channels']
if node_encoder_cfg[k]['type'] == 'linear':
encoder_output_dims[k] = node_encoder_cfg[k]['output_dim']
cfg['decoder_config'].update({
'encoder_output_channels' : encoder_output_channels,
'encoder_output_dims' : encoder_output_dims,
'img_size' : cfg['img_size'],
})
return dictify(cfg, dict_type=OrderedDict)
def get_splitnet_train_config(cfg_filename):
"""Get SplitNet training config from file, perform additional computation."""
# First, load the YAML file
with open(cfg_filename, 'r') as f:
train_cfg = yaml.load(f)
# Extra things to compute
train_cfg['tb_directory'] = os.path.join(constants.BASE_TENSORBOARD_DIR, train_cfg['tb_directory'])
iter_num = train_cfg['iter_num']
train_cfg['opt_filename'] = os.path.join(train_cfg['tb_directory'],
f'SplitNetTrainer_SplitNetWrapper_iter{iter_num}_checkpoint.pth')
train_cfg['model_filename'] = os.path.join(train_cfg['tb_directory'],
f'SplitNetWrapper_iter{iter_num}_checkpoint.pth')
train_cfg['rn50_fpn_filename'] = os.path.join(train_cfg['tb_directory'],
f'BackboneWithFPN_iter{iter_num}_checkpoint.pth')
return train_cfg
def get_deletenet_config(cfg_filename):
"""Get DeleteNet config from file, perform additional computation."""
# First, load the YAML file
with open(cfg_filename, 'r') as f:
cfg = yaml.load(f)
# Process node encoder config
node_encoder_cfg = cfg['node_encoder_config']
process_node_encoder_config(node_encoder_cfg,
img_size=cfg['img_size'],
CNN_reduction_factor=cfg['CNN_reduction_factor'])
bg_fm_inc = 0
for key in node_encoder_cfg:
if 'encoder_config' not in key:
continue
if node_encoder_cfg[key]['type'] == 'linear':
bg_fm_inc += node_encoder_cfg[key]['output_dim']
elif node_encoder_cfg[key]['type'] == 'cnn': # Note, linear fusion module assumes CNNs have additional linear module on top
bg_fm_inc += node_encoder_cfg[key]['linear_encoder_config']['output_dim']
cfg['bg_fusion_module_config']['input_dim'] = bg_fm_inc
return dictify(cfg, dict_type=OrderedDict)
def get_deletenet_train_config(cfg_filename):
"""Get DeleteNet training config from file, perform additional computation."""
# First, load the YAML file
with open(cfg_filename, 'r') as f:
train_cfg = yaml.load(f)
# Extra things to compute
train_cfg['tb_directory'] = os.path.join(constants.BASE_TENSORBOARD_DIR, train_cfg['tb_directory'])
iter_num = train_cfg['iter_num']
train_cfg['opt_filename'] = os.path.join(train_cfg['tb_directory'],
f'DeleteNetTrainer_DeleteNetWrapper_iter{iter_num}_checkpoint.pth')
train_cfg['model_filename'] = os.path.join(train_cfg['tb_directory'],
f'DeleteNetWrapper_iter{iter_num}_checkpoint.pth')
return train_cfg
def get_sgsnet_config(cfg_filename):
"""Get SGSNet config from file, perform additional computation."""
# First, load the YAML file
with open(cfg_filename, 'r') as f:
cfg = yaml.load(f)
# Process node encoder config
node_encoder_cfg = cfg['node_encoder_config']
process_node_encoder_config(node_encoder_cfg,
img_size=cfg['img_size'],
CNN_reduction_factor=cfg['CNN_reduction_factor'])
# GraphNet layers
cfg['layer_config'] = []
for i in range(cfg['num_gn_layers']):
gn_layer_config = cfg['gn_layer_config'].copy()
if i == 0:
gn_layer_config['node_input_channels'] = node_encoder_cfg['fusion_module_config']['output_dim']
gn_layer_config['edge_input_channels'] = node_encoder_cfg['fusion_module_config']['output_dim']
else:
gn_layer_config['node_input_channels'] = cfg['layer_config'][-1]['node_output_channels']
gn_layer_config['edge_input_channels'] = cfg['layer_config'][-1]['edge_output_channels']
cfg['layer_config'].append(gn_layer_config)
# GraphNet Output layer
cfg['gn_output_layer']['input_dim'] = (cfg['layer_config'][-1]['node_output_channels'] +
cfg['layer_config'][-1]['edge_output_channels'])
return dictify(cfg, dict_type=OrderedDict)
def get_sgsnet_train_config(cfg_filename):
"""Get SGSNet training config from file, perform additional computation."""
# First, load the YAML file
with open(cfg_filename, 'r') as f:
train_cfg = yaml.load(f)
# Extra things to compute
train_cfg['tb_directory'] = os.path.join(constants.BASE_TENSORBOARD_DIR, train_cfg['tb_directory'])
# Get filenames for pretrained ResNet50+FPN and SplitNet (to get encoders)
train_cfg['rn50_fpn_filename'] = os.path.join(
constants.BASE_TENSORBOARD_DIR,
train_cfg['pretrained_tb_path'],
f'BackboneWithFPN_iter{train_cfg["pretrained_iter_num"]}_checkpoint.pth')
train_cfg['splitnet_filename'] = os.path.join(
constants.BASE_TENSORBOARD_DIR,
train_cfg['pretrained_tb_path'],
f'SplitNetWrapper_iter{train_cfg["pretrained_iter_num"]}_checkpoint.pth')
iter_num = train_cfg['iter_num']
train_cfg['opt_filename'] = os.path.join(train_cfg['tb_directory'],
f'SGSNetTrainer_SGSNetWrapper_iter{iter_num}_checkpoint.pth')
train_cfg['model_filename'] = os.path.join(train_cfg['tb_directory'],
f'SGSNetWrapper_iter{iter_num}_checkpoint.pth')
return train_cfg
|
11567585
|
token = "<PASSWORD> token"
bot_invite = "bot's invite"
support_server = "https://discord.gg/aBM5xz6"
dev = 548163406537162782 #you can change it yours but I would really appreciate if you keep it the same
extensions = ['cogs.events', 'cogs.setup', 'cogs.dev',
'cogs.messages', 'cogs.misc', 'cogs.help']
host_logs = "webhook url where you wanna send some not very useful logs"
dev_logo = "https://i.ibb.co/1v7PGN1/deadshoticon.gif" #same as dev you can change it yours but I would appreciate it you keep it as it is.
#sql-conf
SQL_INFO = {
"user": "",
"password": "",
"host": "",
"database": "",
}
|
11567599
|
import argparse
import os
cmd_opt = argparse.ArgumentParser(description='DS')
cmd_opt.add_argument('-seed', type=int, default=0, help='seed')
cmd_opt.add_argument('-phase', type=str, default='train', help='training or testing phase')
# hyperparameters for training
cmd_opt.add_argument('-loss_type', type=str, default='mle', help='type of loss function')
cmd_opt.add_argument('-loss_weight', type=float, default=0.5)
cmd_opt.add_argument('-iters_per_eval', type=int, default=100, help='iterations per evaluation')
cmd_opt.add_argument('-iters_per_epoch', type=int, default=10, help='iterations per epoch')
cmd_opt.add_argument('-loss_temp', type=float, default=1, help='temperature for softmin in loss')
cmd_opt.add_argument('-min_temp', type=float, default=0, help='minimal temperature for softmin in loss')
cmd_opt.add_argument('-max_temp', type=float, default=0, help='maximal temperature for softmin in loss')
# hyperparameters for policy
cmd_opt.add_argument('-share', type=eval, default=True, help='pi_t for different t, share parameters or not')
cmd_opt.add_argument('-post_dim', type=int, default=8, help='position embedding dimension')
cmd_opt.add_argument('-val_policy_dump', type=str, default=None, help='best validation policy dump')
cmd_opt.add_argument('-stochastic', type=eval, default=True, help='stopping rule is stochastic or not')
cmd_opt.add_argument('-kl_type', type=str, default='forward', help='forward kl or backward kl')
cmd_opt.add_argument('-policy_type', type=str, default='sequential', help='choose from sequential and multiclass')
cmd_opt.add_argument("--outf", type=str, default="logs", help='path of log files')
cmd_opt.add_argument("--num_of_layers", type=int, default=20, help="Number of total layers")
cmd_opt.add_argument('-batch_size', type=int, default=16, help='batch size')
cmd_opt.add_argument('-learning_rate', type=float, default=1e-4, help='learning rate')
cmd_opt.add_argument('-weight_decay', type=float, default=1e-5)
cmd_opt.add_argument('-num_epochs', type=int, default=100, help='num epochs')
cmd_opt.add_argument('-temp', type=float, default=5, help='temperature for soft sign')
# hyperparameters for LISTA model
cmd_opt.add_argument('-T_max', type=int, default=50, help='max number of layers')
cmd_opt.add_argument('-num_output', type=int, default=10, help='num outputs')
cmd_opt.add_argument('-untied', type=eval, default=False, help='share parameters over layers or not')
cmd_opt.add_argument("--data_folder", type=str, default='./', help='the data folder')
cmd_opt.add_argument("--num_workers", type=int, default=12, help='the number of worker for io')
cmd_opt.add_argument("--restart", type=bool, default=False, help='load a snapshot and continue')
cmd_opt.add_argument("--val_ratio", type=float, default=0.2, help='the ratio for val data')
cmd_args = cmd_opt.parse_args()
print(cmd_args)
|
11567630
|
from dataclasses import dataclass, field
from typing import Any, Dict, List, Type
from .destinations import Destination, DestinationFactory
from .transformers import Transformer
@dataclass
class Pipeline(Destination):
transformers: List[Type[Transformer]]
destinations: List[Destination]
@classmethod
def from_configuration(cls, transformers: List[Type[Transformer]] = [], destinations: List[Dict[str, Any]] = []):
destination_objs = [DestinationFactory.create(configuration) for configuration in destinations]
return Pipeline(transformers=transformers, destinations=destination_objs)
def _transform(self, input_normalized_json):
for transformer in self.transformers:
if transformer.match(input_normalized_json, transformer._original_schema()):
return transformer.transform(input_normalized_json)
return []
def publish(self, data):
return [destination.push(data) for destination in self.destinations]
def _push(self, input_dict):
transformed_dict = self._transform(input_dict)
# output_normalized_json = transformed_dict
self.publish(transformed_dict)
|
11567664
|
filename = 'python_first_run.txt'
with open(filename, mode = 'w') as write_file:
write_file.write('Congratulations! You ran your first python script!')
|
11567679
|
from pygbif.gbifutils import (
check_data,
stop,
gbif_baseurl,
gbif_GET,
get_meta,
parse_results,
len2,
)
def nodes(
data="all",
uuid=None,
q=None,
identifier=None,
identifierType=None,
limit=100,
offset=None,
isocode=None,
**kwargs
):
"""
Nodes metadata.
:param data: [str] The type of data to get. Default: ``all``
:param uuid: [str] UUID of the data node provider. This must be specified if data
is anything other than ``all``.
:param q: [str] Query nodes. Only used when ``data = 'all'``
:param identifier: [fixnum] The value for this parameter can be a simple string or integer,
e.g. identifier=120
:param identifierType: [str] Used in combination with the identifier parameter to filter
identifiers by identifier type: ``DOI``, ``FTP``, ``GBIF_NODE``, ``GBIF_PARTICIPANT``,
``GBIF_PORTAL``, ``HANDLER``, ``LSID``, ``UNKNOWN``, ``URI``, ``URL``, ``UUID``
:param limit: [int] Number of results to return. Default: ``100``
:param offset: [int] Record to start at. Default: ``0``
:param isocode: [str] A 2 letter country code. Only used if ``data = 'country'``.
:return: A dictionary
References http://www.gbif.org/developer/registry#nodes
Usage::
from pygbif import registry
registry.nodes(limit=5)
registry.nodes(identifier=120)
registry.nodes(uuid="1193638d-32d1-43f0-a855-8727c94299d8")
registry.nodes(data='identifier', uuid="03e816b3-8f58-49ae-bc12-4e18b358d6d9")
registry.nodes(data=['identifier','organization','comment'], uuid="03e816b3-8f58-49ae-bc12-4e18b358d6d9")
uuids = ["8cb55387-7802-40e8-86d6-d357a583c596","02c40d2a-1cba-4633-90b7-e36e5e97aba8",
"7a17efec-0a6a-424c-b743-f715852c3c1f","b797ce0f-47e6-4231-b048-6b62ca3b0f55",
"1193638d-32d1-43f0-a855-8727c94299d8","d3499f89-5bc0-4454-8cdb-60bead228a6d",
"cdc9736d-5ff7-4ece-9959-3c744360cdb3","a8b16421-d80b-4ef3-8f22-098b01a89255",
"8df8d012-8e64-4c8a-886e-521a3bdfa623","b35cf8f1-748d-467a-adca-4f9170f20a4e",
"03e816b3-8f58-49ae-bc12-4e18b358d6d9","073d1223-70b1-4433-bb21-dd70afe3053b",
"07dfe2f9-5116-4922-9a8a-3e0912276a72","086f5148-c0a8-469b-84cc-cce5342f9242",
"0909d601-bda2-42df-9e63-a6d51847ebce","0e0181bf-9c78-4676-bdc3-54765e661bb8",
"109aea14-c252-4a85-96e2-f5f4d5d088f4","169eb292-376b-4cc6-8e31-9c2c432de0ad",
"1e789bc9-79fc-4e60-a49e-89dfc45a7188","1f94b3ca-9345-4d65-afe2-4bace93aa0fe"]
[ registry.nodes(data='identifier', uuid=x) for x in uuids ]
"""
args = {
"q": q,
"limit": limit,
"offset": offset,
"identifier": identifier,
"identifierType": identifierType,
}
data_choices = [
"all",
"organization",
"endpoint",
"identifier",
"tag",
"machineTag",
"comment",
"pendingEndorsement",
"country",
"dataset",
"installation",
]
check_data(data, data_choices)
def getdata(x, uuid, args, **kwargs):
if x != "all" and uuid is None:
stop('You must specify a uuid if data does not equal "all"')
if uuid is None:
if x == "all":
url = gbif_baseurl + "node"
else:
if isocode is not None and x == "country":
url = gbif_baseurl + "node/country/" + isocode
else:
url = gbif_baseurl + "node/" + x
else:
if x == "all":
url = gbif_baseurl + "node/" + uuid
else:
url = gbif_baseurl + "node/" + uuid + "/" + x
res = gbif_GET(url, args, **kwargs)
return {"meta": get_meta(res), "data": parse_results(res, uuid)}
# Get data
if len2(data) == 1:
return getdata(data, uuid, args, **kwargs)
else:
return [getdata(x, uuid, args, **kwargs) for x in data]
|
11567681
|
from mu.harness.project import StoreUpdate
from mu.harness.sub_sim import SubSim
from mu.protogen import stores_pb2
from mu.protogen import mcp3427_pb2
MCP3427_KEY = (stores_pb2.MuStoreType.MCP3427, 0)
NUM_MCP3427_CHANNELS = 2
class Mcp3427(SubSim):
# val1 writes channel1 of mcp3427, val2 channel2
def update_mcp3427(self, val1, val2, fault_flag):
mcp3427_msg = mcp3427_pb2.MuMcp3427Store()
mcp3427_msg.readings.extend([0] * NUM_MCP3427_CHANNELS)
mcp3427_msg.fault_flag = fault_flag
mcp3427_msg.readings[0] = val1
mcp3427_msg.readings[1] = val2
mcp3427_mask = mcp3427_pb2.MuMcp3427Store()
mcp3427_mask.readings.extend([0] * NUM_MCP3427_CHANNELS)
mcp3427_mask.fault_flag = 1
mcp3427_mask.readings[0] = 1
mcp3427_mask.readings[1] = 1
mcp3427_update = StoreUpdate(mcp3427_msg, mcp3427_mask, MCP3427_KEY)
self.parent.proj.write_store(mcp3427_update)
|
11567747
|
from django.test import TestCase
from django.shortcuts import reverse
from events.models import Event, User, Category
from comments.models import Comment
class CommentDeleteViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='iyanu', password=<PASSWORD>, email='<EMAIL>')
self.attendee = User.objects.create_user(username='tobi', password=<PASSWORD>)
self.attendee.save()
self.user.save()
self.category = Category.objects.create(name='Technology', description='This is the future')
self.event = Event.objects.create(name='Party Outside', details='This party is gonna be banging again',
venue='Mapo Hall',
date='2018-05-18', time='12:25:00', category=self.category, creator=self.user)
self.comment = Comment.objects.create(comment='Hey yo', event=self.event, created_by=self.user)
def test_delete_comment_url_exists_at_desired_location(self):
comment = Comment.objects.create(comment='Hey yo', event=self.event, created_by=self.user)
self.client.login(username='iyanu', password=<PASSWORD>)
response = self.client.get(reverse('comments:comment-delete', kwargs={'pk': self.comment.pk}))
# Check the user is logged in
# self.assertEqual(str(response.context['user']), 'iyanu')
# Check that we got a response 'success'
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
self.client.login(username='iyanu', password='<PASSWORD>')
response = self.client.get(reverse('comments:comment-delete', kwargs={'pk': self.comment.pk}))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
self.client.login(username='iyanu', password='<PASSWORD>')
response = self.client.get(reverse('comments:comment-delete', kwargs={'pk': self.comment.pk}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comments/delete.html')
def test_delete_view_redirect_to_event_detail_view(self):
self.client.login(username='iyanu', password='<PASSWORD>')
response = self.client.get(reverse('comments:comment-delete', kwargs={'pk': self.comment.pk}))
self.assertEqual(response.status_code, 200)
self.assertRedirects(response, self.event.get_absolute_url())
|
11567748
|
import torch
from .Container import Container
class Parallel(Container):
def __init__(self, inputDimension, outputDimension):
super(Parallel, self).__init__()
self.inputDimension = inputDimension
self.outputDimension = outputDimension
self.totalOutputSize = None
def updateOutput(self, input):
nModule = input.size(self.inputDimension)
outputs = []
for i in range(nModule):
currentInput = input.select(self.inputDimension, i)
currentOutput = self.modules[i].updateOutput(currentInput)
outputs.append(currentOutput)
outputSize = currentOutput.size(self.outputDimension)
if i == 0:
totalOutputSize = list(currentOutput.size())
else:
totalOutputSize[self.outputDimension] += outputSize
self.totalOutputSize = torch.Size(totalOutputSize)
self.output.resize_(self.totalOutputSize)
offset = 0
for i in range(nModule):
currentOutput = outputs[i]
outputSize = currentOutput.size(self.outputDimension)
self.output.narrow(self.outputDimension, offset, outputSize).copy_(currentOutput)
offset = offset + currentOutput.size(self.outputDimension)
return self.output
def updateGradInput(self, input, gradOutput):
nModule = input.size(self.inputDimension)
self.gradInput.resize_as_(input)
offset = 0
for i in range(nModule):
module = self.modules[i]
currentInput = input.select(self.inputDimension, i)
currentOutput = module.output
outputSize = currentOutput.size(self.outputDimension)
currentGradOutput = gradOutput.narrow(self.outputDimension, offset, outputSize)
currentGradInput = module.updateGradInput(currentInput, currentGradOutput)
self.gradInput.select(self.inputDimension, i).copy_(currentGradInput)
offset = offset + outputSize
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
nModule = input.size(self.inputDimension)
offset = 0
for i in range(nModule):
module = self.modules[i]
currentOutput = module.output
outputSize = currentOutput.size(self.outputDimension)
module.accGradParameters(
input.select(self.inputDimension, i),
gradOutput.narrow(self.outputDimension, offset, outputSize),
scale)
offset += outputSize
def accUpdateGradParameters(self, input, gradOutput, lr):
nModule = input.size(self.inputDimension)
offset = 0
for i in range(nModule):
module = self.modules[i]
currentOutput = module.output
module.accupdateGradParameters(
input.select(self.inputDimension, i),
gradOutput.narrow(self.outputDimension, offset, currentOutput.size(self.outputDimension)),
lr)
offset = offset + currentOutput.size(self.outputDimension)
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res += ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res += line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
res += line + tab + last + 'output'
res += line + '}'
return res
|
11567755
|
import os
import pytest
import torch
from kale.embed.gripnet import TypicalGripNetEncoder
from kale.utils.download import download_file_by_url
pose_url = "https://github.com/pykale/data/raw/main/graphs/pose.pt"
@pytest.fixture(scope="module")
def pose_data(download_path):
download_file_by_url(pose_url, download_path, "pose.pt", "pt")
return torch.load(os.path.join(download_path, "pose.pt"))
def test_gripnet_encoder(pose_data):
gg_layers = [32, 16, 16]
gd_layers = [16, 32]
dd_layers = [sum(gd_layers), 16]
gripnet = TypicalGripNetEncoder(
gg_layers, gd_layers, dd_layers, pose_data.n_d_node, pose_data.n_g_node, pose_data.n_dd_edge_type
)
gripnet(
pose_data.g_feat,
pose_data.gg_edge_index,
pose_data.edge_weight,
pose_data.gd_edge_index,
pose_data.train_idx,
pose_data.train_et,
pose_data.train_range,
)
assert gripnet.source_graph.conv_list[0].__repr__() == "GCNEncoderLayer(32, 16)"
assert gripnet.source_graph.conv_list[1].__repr__() == "GCNEncoderLayer(16, 16)"
assert gripnet.s2t_graph.conv.__repr__() == "GCNEncoderLayer(64, 16)"
assert gripnet.target_graph.conv_list[0].__repr__() == "RGCNEncoderLayer(48, 16, num_relations=854)"
|
11567785
|
from jivago.lang.registry import ParametrizedAnnotation
@ParametrizedAnnotation
def MyAnnotation(param1: str, param2: str):
return lambda x: x
@MyAnnotation(param1="foo", param2="baz")
class MyAnnotatedClass(object):
pass
|
11567788
|
import os
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import numpy as np
import os
from PIL import Image
def create_list_file_in_directory(datadir):
i = 0
os.remove(os.path.join(datadir,'data.txt'))
for single_label in os.listdir(datadir):
single_full_list = os.listdir(os.path.join(datadir , single_label))
for file_name in single_full_list:
file = open( os.path.join(datadir,'data.txt') ,'a')
file.write( '{} {} \n'.format(os.path.join( single_label , file_name) , str(i) ) )
file.close()
i += 1
class DriveData(Dataset):
__xs = []
__ys = []
def __init__(self, folder_dataset, transform=None , resize_tuple = (224, 224)):
self.transform = transform
self.resize_tuple = resize_tuple
# Open and load text file including the whole training data
with open( os.path.join(folder_dataset , "data.txt" ) ) as f:
for line in f:
# Image path
self.__xs.append( os.path.join( folder_dataset ,line.split()[0]) )
# Steering wheel label
self.__ys.append(np.float32(line.split()[1]))
# Override to give PyTorch access to any image on the dataset
def __getitem__(self, index):
img = Image.open(self.__xs[index])
img = img.convert('RGB')
#img= img.resize( self.resize_tuple )
if self.transform is not None:
img = self.transform(img)
# Convert image and label to torch tensors
img = torch.from_numpy(np.asarray(img))
label = torch.from_numpy(np.asarray(self.__ys[index]).reshape([1,1]))
return img, label
# Override to give PyTorch size of dataset
def __len__(self):
return len(self.__xs)
def build_image_dataset(datadir , batch_size=10, shuffle=True, num_workers=1 , transform=None , resize_tuple = (224, 224) ):
create_list_file_in_directory(datadir)
dset_created = DriveData(datadir,transform , resize_tuple)
#img , label = dset_train.__getitem__(0)
data_loader = DataLoader(dset_created, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return data_loader
|
11567794
|
from .base import API
from .routing import Router
from .. import urls
class UserMgmtAPI(API):
__router, route = Router.new()
@route('users.get')
def _perform(self, user_id, **kwargs):
"""
https://visibility.amp.cisco.com/iroh/user-mgmt/index.html#/User/get_iroh_user_mgmt_users__user_id_
"""
return self._get(
urls.join('/iroh/user-mgmt/users', user_id),
**kwargs
)
@route('users.post')
def _perform(self,
user_id,
payload,
**kwargs):
"""
https://visibility.amp.cisco.com/iroh/user-mgmt/index.html#/User/post_iroh_user_mgmt_users__user_id_
"""
return self._post(
urls.join('/iroh/user-mgmt/users', user_id),
json=payload,
**kwargs
)
@route('batch.users')
def _perform(self, user_ids, **kwargs):
"""
https://visibility.amp.cisco.com/iroh/user-mgmt/index.html#/User/get_iroh_user_mgmt_batch_users
"""
return self._get(
'/iroh/user-mgmt/batch/users',
params={'id': user_ids},
**kwargs
)
@route('search.users')
def _perform(self,
payload,
sort_by=None,
sort_order=None,
offset=None,
search_after=None,
limit=None,
**kwargs):
"""
https://visibility.amp.cisco.com/iroh/user-mgmt/index.html#/User/post_iroh_user_mgmt_search_users
"""
query = {
'sort_by': sort_by,
'sort_order': sort_order,
'offset': offset,
'search_after': search_after,
'limit': limit
}
return self._post(
'/iroh/user-mgmt/search/users',
json=payload,
params=query,
**kwargs
)
|
11567811
|
class ParseMetaException(Exception):
def __init__(self):
super(ParseMetaException, self).__init__('Error parsing Meta data')
|
11567838
|
from typing import Optional, Union
from pyuploadcare.transformations.base import BaseTransformation, StrEnum
class VideoFormat(StrEnum):
webm = "webm"
ogg = "ogg"
mp4 = "mp4"
class ResizeMode(StrEnum):
preserve_ratio = "preserve_ratio"
change_ratio = "change_ratio"
scale_crop = "scale_crop"
add_padding = "add_padding"
class Quality(StrEnum):
normal = "normal"
better = "better"
best = "best"
lighter = "lighter"
lightest = "lightest"
class VideoTransformation(BaseTransformation):
def format(
self, file_format: Union[VideoFormat, str]
) -> "VideoTransformation":
self.set("format", [file_format])
return self
def size(
self,
width: Optional[int] = None,
height: Optional[int] = None,
resize_mode: Optional[Union[str, ResizeMode]] = None,
) -> "VideoTransformation":
parameters = [f'{width or ""}x{height or ""}']
if resize_mode:
parameters.append(resize_mode)
self.set("size", parameters)
return self
def quality(self, file_quality: Quality) -> "VideoTransformation":
self.set("quality", [file_quality])
return self
def cut(self, start_time: str, length: str) -> "VideoTransformation":
self.set("cut", [start_time, length])
return self
def thumbs(self, amount: int) -> "VideoTransformation":
self.set("thumbs", [str(amount)])
return self
def _prefix(self, file_id: str) -> str:
return f"{file_id}/video/"
def path(self, file_id: str) -> str:
path_ = super().path(file_id)
path_ = path_.replace("thumbs/", "thumbs~")
return path_
|
11567864
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connections
from rental.models import SubRegion, House, HouseEtc, RegionTS, HouseTS
from crawlerrequest.models import RequestTS
import json
class Command(BaseCommand):
help = 'All migration tasks from peewee to Django that cannot be perfromed using built-in facilities'
requires_migrations_checks = True
def add_arguments(self, parser):
parser.add_argument(
'--db',
dest='db_name',
help='Specify db to migrate'
)
def handle(self, *args, **options):
db_name = 'default'
if options['db_name']:
db_name = options['db_name']
if db_name not in settings.DATABASES:
raise CommandError('DB {} is not defined in settings'.format(db_name))
if 'postgresql' not in settings.DATABASES[db_name]['ENGINE']:
raise CommandError('Only PostgreSQL is required for manual migration :)')
target_models = [House, HouseEtc, RegionTS, HouseTS, RequestTS]
for model in target_models:
db_table = model._meta.db_table
sql = 'select column_name, data_type from INFORMATION_SCHEMA.COLUMNS where ' \
"table_name = %s and data_type = 'timestamp without time zone'"
tz_fields = []
with connections[db_name].cursor() as cursor:
cursor.execute(sql, [db_table])
for row in cursor.fetchall():
tz_fields.append(row[0])
if len(tz_fields) > 0:
self.stdout.write('{} | migrating timestamp to timestamptz'.format(db_table))
for field in tz_fields:
self.stdout.write('{}::{} | alter type to timestamptz'.format(db_table, field))
cursor.execute('alter table {} alter {} type timestamptz'.format(db_table, field))
self.stdout.write('Migration done~~')
|
11567866
|
class Error:
"""
Base Error Class
"""
def __init__(self):
self.message = ""
def __repr__(self):
return str({
"message": self.message
})
|
11567897
|
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_irl_algorithm import TorchIRLAlgorithm
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.core.train_util import linear_schedule
def concat_trajs(trajs):
new_dict = {}
for k in trajs[0].keys():
if isinstance(trajs[0][k], dict):
new_dict[k] = concat_trajs([t[k] for t in trajs])
else:
new_dict[k] = np.concatenate([t[k] for t in trajs], axis=0)
return new_dict
class AdvBC(TorchIRLAlgorithm):
'''
BC trained using an adversarial setup
Either forward or reverse KL
'''
def __init__(
self,
env,
policy,
discriminator,
expert_replay_buffer,
KL_mode='forward',
disc_num_trajs_per_batch=128,
disc_samples_per_traj=8,
disc_optim_batch_size=1024,
policy_optim_batch_size=1024,
num_update_loops_per_train_call=1000,
num_disc_updates_per_loop_iter=1,
num_policy_updates_per_loop_iter=1,
disc_lr=1e-3,
disc_momentum=0.0,
disc_optimizer_class=optim.Adam,
policy_lr=1e-3,
policy_momentum=0.0,
policy_optimizer_class=optim.Adam,
use_grad_pen=True,
grad_pen_weight=10,
disc_ce_grad_clip=0.5,
disc_gp_grad_clip=10.0,
use_target_disc=False,
target_disc=None,
soft_target_disc_tau=0.005,
# rew_clip_min=None,
# rew_clip_max=None,
plotter=None,
render_eval_paths=False,
eval_deterministic=True,
use_disc_input_noise=False,
disc_input_noise_scale_start=0.1,
disc_input_noise_scale_end=0.0,
epochs_till_end_scale=50.0,
**kwargs
):
assert disc_lr != 1e-3, 'Just checking that this is being taken from the spec file'
assert KL_mode in ['forward', 'reverse']
if eval_deterministic:
eval_policy = MakeDeterministic(policy)
else:
eval_policy = policy
super().__init__(
env=env,
exploration_policy=policy,
eval_policy=eval_policy,
expert_replay_buffer=expert_replay_buffer,
**kwargs
)
assert not self.wrap_absorbing
self.disc_num_trajs_per_batch = disc_num_trajs_per_batch
self.disc_samples_per_traj = disc_samples_per_traj
self.discriminator = discriminator
self.rewardf_eval_statistics = None
self.disc_optimizer = disc_optimizer_class(
self.discriminator.parameters(),
lr=disc_lr,
betas=(disc_momentum, 0.999)
)
print('\n\nDISC MOMENTUM: %f\n\n' % disc_momentum)
self.policy_optimizer = policy_optimizer_class(
self.exploration_policy.parameters(),
lr=policy_lr,
betas=(policy_momentum, 0.999)
)
self.disc_optim_batch_size = disc_optim_batch_size
self.policy_optim_batch_size = policy_optim_batch_size
self.bce = nn.BCEWithLogitsLoss()
target_batch_size = self.disc_optim_batch_size
self.bce_targets = torch.cat(
[
torch.ones(target_batch_size, 1),
torch.zeros(target_batch_size, 1)
],
dim=0
)
self.bce_targets = Variable(self.bce_targets)
if ptu.gpu_enabled():
self.bce.cuda()
self.bce_targets = self.bce_targets.cuda()
self.use_grad_pen = use_grad_pen
self.grad_pen_weight = grad_pen_weight
self.disc_ce_grad_clip = disc_ce_grad_clip
self.disc_gp_grad_clip = disc_gp_grad_clip
self.disc_grad_buffer = {}
self.disc_grad_buffer_is_empty = True
self.use_target_disc = use_target_disc
self.soft_target_disc_tau = soft_target_disc_tau
if use_target_disc:
if target_disc is None:
print('\n\nMAKING TARGET DISC\n\n')
self.target_disc = deepcopy(self.discriminator)
else:
print('\n\nUSING GIVEN TARGET DISC\n\n')
self.target_disc = target_disc
self.disc_ce_grad_norm = 0.0
self.disc_ce_grad_norm_counter = 0.0
self.max_disc_ce_grad = 0.0
self.disc_gp_grad_norm = 0.0
self.disc_gp_grad_norm_counter = 0.0
self.max_disc_gp_grad = 0.0
self.use_disc_input_noise = use_disc_input_noise
self.disc_input_noise_scale_start = disc_input_noise_scale_start
self.disc_input_noise_scale_end = disc_input_noise_scale_end
self.epochs_till_end_scale = epochs_till_end_scale
self.num_update_loops_per_train_call = num_update_loops_per_train_call
self.num_disc_updates_per_loop_iter = num_disc_updates_per_loop_iter
self.num_policy_updates_per_loop_iter = num_policy_updates_per_loop_iter
if KL_mode == 'forward':
self.use_forward_KL = True
else:
self.use_forward_KL = False
# self.rew_clip_min = rew_clip_min
# self.rew_clip_max = rew_clip_max
# self.clip_min_rews = rew_clip_min is not None
# self.clip_max_rews = rew_clip_max is not None
def get_batch(self, batch_size, from_expert):
if from_expert:
buffer = self.expert_replay_buffer
else:
buffer = self.replay_buffer
batch = buffer.random_batch(batch_size)
batch = np_to_pytorch_batch(batch)
return batch
def _do_training(self, epoch):
for t in range(self.num_update_loops_per_train_call):
for _ in range(self.num_disc_updates_per_loop_iter):
self._do_reward_training(epoch)
for _ in range(self.num_policy_updates_per_loop_iter):
self._do_policy_training(epoch)
def _do_reward_training(self, epoch):
'''
Train the discriminator
'''
self.disc_optimizer.zero_grad()
expert_batch = self.get_batch(self.disc_optim_batch_size, True)
policy_batch = {}
pol_mode = self.exploration_policy.training
self.exploration_policy.train()
policy_batch['observations'] = expert_batch['observations']
policy_batch['actions'] = self.exploration_policy(expert_batch['observations'])[0].detach()
self.exploration_policy.train(pol_mode)
expert_obs = expert_batch['observations']
policy_obs = policy_batch['observations']
expert_actions = expert_batch['actions']
policy_actions = policy_batch['actions']
# print('----------------------------')
# print(torch.mean(expert_obs, dim=0))
# print(torch.std(expert_obs, dim=0))
# print(torch.mean(expert_actions, dim=0))
# print(torch.std(expert_actions, dim=0))
if self.use_disc_input_noise:
noise_scale = linear_schedule(
epoch,
self.disc_input_noise_scale_start,
self.disc_input_noise_scale_end,
self.epochs_till_end_scale
)
if noise_scale > 0.0:
expert_obs = expert_obs + noise_scale * Variable(torch.randn(expert_obs.size()))
expert_actions = expert_actions + noise_scale * Variable(torch.randn(expert_actions.size()))
policy_obs = policy_obs + noise_scale * Variable(torch.randn(policy_obs.size()))
policy_actions = policy_actions + noise_scale * Variable(torch.randn(policy_actions.size()))
obs = torch.cat([expert_obs, policy_obs], dim=0)
actions = torch.cat([expert_actions, policy_actions], dim=0)
disc_logits = self.discriminator(obs, actions)
disc_preds = (disc_logits > 0).type(disc_logits.data.type())
disc_ce_loss = self.bce(disc_logits, self.bce_targets)
accuracy = (disc_preds == self.bce_targets).type(torch.FloatTensor).mean()
disc_ce_loss.backward()
ce_grad_norm = 0.0
for name, param in self.discriminator.named_parameters():
if param.grad is not None:
if self.disc_grad_buffer_is_empty:
self.disc_grad_buffer[name] = param.grad.data.clone()
else:
self.disc_grad_buffer[name].copy_(param.grad.data)
param_norm = param.grad.data.norm(2)
ce_grad_norm += param_norm ** 2
ce_grad_norm = ce_grad_norm ** 0.5
self.disc_grad_buffer_is_empty = False
ce_clip_coef = self.disc_ce_grad_clip / (ce_grad_norm + 1e-6)
if ce_clip_coef < 1.:
for name, grad in self.disc_grad_buffer.items():
grad.mul_(ce_clip_coef)
if ce_clip_coef < 1.0: ce_grad_norm *= ce_clip_coef
self.max_disc_ce_grad = max(ce_grad_norm, self.max_disc_ce_grad)
self.disc_ce_grad_norm += ce_grad_norm
self.disc_ce_grad_norm_counter += 1
self.disc_optimizer.zero_grad()
if self.use_grad_pen:
eps = Variable(torch.rand(expert_obs.size(0), 1))
if ptu.gpu_enabled(): eps = eps.cuda()
interp_obs = eps*expert_obs + (1-eps)*policy_obs
interp_obs = interp_obs.detach()
interp_obs.requires_grad = True
interp_actions = eps*expert_actions + (1-eps)*policy_actions
interp_actions = interp_actions.detach()
interp_actions.requires_grad = True
gradients = autograd.grad(
outputs=self.discriminator(interp_obs, interp_actions).sum(),
# inputs=[interp_obs, interp_actions],
inputs=[interp_actions],
# grad_outputs=torch.ones(exp_specs['batch_size'], 1).cuda(),
create_graph=True, retain_graph=True, only_inputs=True
)
# total_grad = torch.cat([gradients[0], gradients[1]], dim=1)
total_grad = gradients[0]
# GP from Gulrajani et al.
gradient_penalty = ((total_grad.norm(2, dim=1) - 1) ** 2).mean()
disc_grad_pen_loss = gradient_penalty * self.grad_pen_weight
# # GP from Mescheder et al.
# gradient_penalty = (total_grad.norm(2, dim=1) ** 2).mean()
# disc_grad_pen_loss = gradient_penalty * 0.5 * self.grad_pen_weight
disc_grad_pen_loss.backward()
gp_grad_norm = 0.0
for p in list(filter(lambda p: p.grad is not None, self.discriminator.parameters())):
param_norm = p.grad.data.norm(2)
gp_grad_norm += param_norm ** 2
gp_grad_norm = gp_grad_norm ** 0.5
gp_clip_coef = self.disc_gp_grad_clip / (gp_grad_norm + 1e-6)
if gp_clip_coef < 1.:
for p in self.discriminator.parameters():
p.grad.data.mul_(gp_clip_coef)
if gp_clip_coef < 1.: gp_grad_norm *= gp_clip_coef
self.max_disc_gp_grad = max(gp_grad_norm, self.max_disc_gp_grad)
self.disc_gp_grad_norm += gp_grad_norm
self.disc_gp_grad_norm_counter += 1
# now add back the gradients from the CE loss
for name, param in self.discriminator.named_parameters():
param.grad.data.add_(self.disc_grad_buffer[name])
self.disc_optimizer.step()
if self.use_target_disc:
ptu.soft_update_from_to(self.discriminator, self.target_disc, self.soft_target_disc_tau)
"""
Save some statistics for eval
"""
if self.rewardf_eval_statistics is None:
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
self.rewardf_eval_statistics = OrderedDict()
if self.use_target_disc:
target_disc_logits = self.target_disc(obs, actions)
target_disc_preds = (target_disc_logits > 0).type(target_disc_logits.data.type())
target_disc_ce_loss = self.bce(target_disc_logits, self.bce_targets)
target_accuracy = (target_disc_preds == self.bce_targets).type(torch.FloatTensor).mean()
if self.use_grad_pen:
eps = Variable(torch.rand(expert_obs.size(0), 1))
if ptu.gpu_enabled(): eps = eps.cuda()
interp_obs = eps*expert_obs + (1-eps)*policy_obs
interp_obs = interp_obs.detach()
interp_obs.requires_grad = True
interp_actions = eps*expert_actions + (1-eps)*policy_actions
interp_actions = interp_actions.detach()
interp_actions.requires_grad = True
target_gradients = autograd.grad(
outputs=self.target_disc(interp_obs, interp_actions).sum(),
# inputs=[interp_obs, interp_actions],
inputs=[interp_actions],
# grad_outputs=torch.ones(exp_specs['batch_size'], 1).cuda(),
create_graph=True, retain_graph=True, only_inputs=True
)
# total_grad = torch.cat([gradients[0], gradients[1]], dim=1)
total_target_grad = target_gradients[0]
# GP from Gulrajani et al.
target_gradient_penalty = ((total_target_grad.norm(2, dim=1) - 1) ** 2).mean()
# # GP from Mescheder et al.
# target_gradient_penalty = (total_target_grad.norm(2, dim=1) ** 2).mean()
self.rewardf_eval_statistics['Target Disc CE Loss'] = np.mean(ptu.get_numpy(target_disc_ce_loss))
self.rewardf_eval_statistics['Target Disc Acc'] = np.mean(ptu.get_numpy(target_accuracy))
self.rewardf_eval_statistics['Target Grad Pen'] = np.mean(ptu.get_numpy(target_gradient_penalty))
self.rewardf_eval_statistics['Target Grad Pen W'] = np.mean(self.grad_pen_weight)
self.rewardf_eval_statistics['Disc CE Loss'] = np.mean(ptu.get_numpy(disc_ce_loss))
self.rewardf_eval_statistics['Disc Acc'] = np.mean(ptu.get_numpy(accuracy))
self.rewardf_eval_statistics['Grad Pen'] = np.mean(ptu.get_numpy(gradient_penalty))
self.rewardf_eval_statistics['Grad Pen W'] = np.mean(self.grad_pen_weight)
self.rewardf_eval_statistics['Disc Avg CE Grad Norm this epoch'] = np.mean(self.disc_ce_grad_norm / self.disc_ce_grad_norm_counter)
self.rewardf_eval_statistics['Disc Max CE Grad Norm this epoch'] = np.mean(self.max_disc_ce_grad)
self.rewardf_eval_statistics['Disc Avg GP Grad Norm this epoch'] = np.mean(self.disc_gp_grad_norm / self.disc_gp_grad_norm_counter)
self.rewardf_eval_statistics['Disc Max GP Grad Norm this epoch'] = np.mean(self.max_disc_gp_grad)
if self.use_disc_input_noise:
self.rewardf_eval_statistics['Disc Input Noise Scale'] = noise_scale
self.max_disc_ce_grad = 0.0
self.disc_ce_grad_norm = 0.0
self.disc_ce_grad_norm_counter = 0.0
self.max_disc_gp_grad = 0.0
self.disc_gp_grad_norm = 0.0
self.disc_gp_grad_norm_counter = 0.0
def _do_policy_training(self, epoch):
self.exploration_policy.train()
self.policy_optimizer.zero_grad()
batch = self.get_batch(self.policy_optim_batch_size, True)
obs = batch['observations']
acts = self.exploration_policy(obs)[0]
if self.use_target_disc:
self.target_disc.eval()
# If you compute log(D) - log(1-D) then you just get the logits
disc_logits = self.target_disc(obs, acts)
self.target_disc.train()
else:
self.discriminator.eval()
# If you compute log(D) - log(1-D) then you just get the logits
disc_logits = self.discriminator(obs, acts)
self.discriminator.train()
if self.use_forward_KL:
loss = (torch.exp(disc_logits)*(disc_logits)).mean()
else:
loss = -1.0 * disc_logits.mean()
loss.backward()
self.policy_optimizer.step()
self.rewardf_eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(loss))
@property
def networks(self):
return [self.exploration_policy, self.discriminator]
def get_epoch_snapshot(self, epoch):
snapshot = super().get_epoch_snapshot(epoch)
snapshot.update(exploration_policy=self.exploration_policy)
snapshot.update(disc=self.discriminator)
return snapshot
def _elem_or_tuple_to_variable(elem_or_tuple):
if isinstance(elem_or_tuple, tuple):
return tuple(
_elem_or_tuple_to_variable(e) for e in elem_or_tuple
)
return Variable(ptu.from_numpy(elem_or_tuple).float(), requires_grad=False)
def _filter_batch(np_batch):
for k, v in np_batch.items():
if v.dtype == np.bool:
yield k, v.astype(int)
else:
yield k, v
def np_to_pytorch_batch(np_batch):
return {
k: _elem_or_tuple_to_variable(x)
for k, x in _filter_batch(np_batch)
if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)
}
|
11567899
|
import abc
class Callback(abc.ABC):
def on_epoch_begin(self, trainer):
pass
def on_epoch_end(self, trainer):
pass
def on_training_step_begin(self, trainer):
pass
def on_training_step_end(self, trainer):
pass
|
11567942
|
import demo_hic_et_nunc.models as models
from demo_hic_et_nunc.types.hen_minter.parameter.mint_objkt import MintOBJKTParameter
from demo_hic_et_nunc.types.hen_minter.storage import HenMinterStorage
from demo_hic_et_nunc.types.hen_objkts.parameter.mint import MintParameter
from demo_hic_et_nunc.types.hen_objkts.storage import HenObjktsStorage
from dipdup.context import HandlerContext
from dipdup.models import Transaction
async def on_mint(
ctx: HandlerContext,
mint_objkt: Transaction[MintOBJKTParameter, HenMinterStorage],
mint: Transaction[MintParameter, HenObjktsStorage],
) -> None:
holder, _ = await models.Holder.get_or_create(address=mint.parameter.address)
token = models.Token(
id=mint.parameter.token_id,
creator=holder,
supply=mint.parameter.amount,
level=mint.data.level,
timestamp=mint.data.timestamp,
)
await token.save()
|
11567944
|
from typing import Union
from Util.geometry import Position, Pose, Line, intersection_between_segments, intersection_between_line_and_segment, \
closest_point_on_segment
class Area:
def __init__(self, a, b):
neg_x, pos_x = min(a.x, b.x), max(a.x, b.x)
neg_y, pos_y = min(a.y, b.y), max(a.y, b.y)
self.upper_left = Position(neg_x, pos_y)
self.upper_right = Position(pos_x, pos_y)
self.lower_right = Position(pos_x, neg_y)
self.lower_left = Position(neg_x, neg_y)
def point_inside(self, p: Position) -> bool:
return self.left <= p.x <= self.right and \
self.bottom <= p.y <= self.top
def __str__(self):
return f'Area(top={self.top}, bottom={self.bottom}, right={self.right}, left={self.left})'
def __contains__(self, item: Union[Pose, Position]):
if type(item) is Pose:
return self.point_inside(item.position)
elif type(item) is Position:
return self.point_inside(item)
else:
raise ValueError('You can only test if a position or a pose is contained inside the area.')
def intersect(self, seg: Line):
assert isinstance(seg, Line)
if self.point_inside(seg.p1) and self.point_inside(seg.p2):
return []
inters = []
for segment in self.segments:
inter = intersection_between_segments(segment.p1, segment.p2, seg.p1, seg.p2)
if inter is not None:
inters.append(inter)
return inters
def intersect_with_line(self, line: Line):
assert isinstance(line, Line)
inters = []
for segment in self.segments:
inter = intersection_between_line_and_segment(segment.p1, segment.p2, line.p1, line.p2)
if inter is not None:
inters.append(inter)
return inters
def closest_border_point(self, p: Position):
closest_on_borders = None
for segment in self.segments:
closest_on_segment = closest_point_on_segment(p, segment.p1, segment.p2)
if closest_on_borders is None or (closest_on_segment - p).norm < (closest_on_borders - p).norm:
closest_on_borders = closest_on_segment
return closest_on_borders
@property
def segments(self):
return [Line(self.upper_left, self.upper_right),
Line(self.upper_right, self.lower_right),
Line(self.lower_right, self.lower_left),
Line(self.lower_left, self.upper_left)]
@property
def center(self):
return self.lower_left + Position(self.width / 2, self.height / 2)
@property
def width(self):
return self.right - self.left
@property
def height(self):
return self.top - self.bottom
@property
def top(self):
return self.upper_left.y
@property
def bottom(self):
return self.lower_right.y
@property
def left(self):
return self.upper_left.x
@property
def right(self):
return self.lower_right.x
@classmethod
def pad(cls, area, padding=0):
return cls.from_limits(area.top + padding, area.bottom - padding,
area.right + padding, area.left - padding)
@classmethod
def from_limits(cls, top, bottom, right, left):
return cls(Position(left, top), Position(right, bottom))
@classmethod
def flip_x(cls, area):
return cls.from_limits(area.top, area.bottom, -area.left, -area.right)
@classmethod
def flip_y(cls, area):
return cls.from_limits(-area.top, -area.bottom, area.left, area.right)
@classmethod
def from_4_point(cls, p1, p2, p3, p4):
top = max(p1.y, p2.y, p3.y, p4.y)
bot = min(p1.y, p2.y, p3.y, p4.y)
right = max(p1.x, p2.x, p3.x, p4.x)
left = min(p1.x, p2.x, p3.x, p4.x)
return cls.from_limits(top, bot, right, left)
class ForbiddenZone(Area):
def __init__(self, a, b, inside_forbidden=True):
super(ForbiddenZone, self).__init__(a, b)
self.inside_forbidden = inside_forbidden
def __contains__(self, item: Union[Pose, Position]):
if type(item) is Pose:
return self.point_inside(item.position)
elif type(item) is Position:
return self.point_inside(item)
else:
raise ValueError('You can only test if a position or a pose is contained inside the area.')
def point_inside(self, p: Position) -> bool:
is_inside = self.left <= p.x <= self.right and \
self.bottom <= p.y <= self.top
# If inside_forbidden is false, the outside is the forbidden zone
return self.inside_forbidden == is_inside
|
11567999
|
import io
import os
import json
import random
import numpy as np
import tensorflow as tf
from pathlib import Path
from common.kb import load_kb
from collections import defaultdict
from nltk import wordpunct_tokenize
from configs import configs as _config
from nltk.tokenize import RegexpTokenizer
def preprocess_sentence(start_sign, end_sign, w):
"""
用于给句子首尾添加start和end
:param w:
:return: 合成之后的句子
"""
w = start_sign + ' ' + w + ' ' + end_sign
return w
def create_dataset(path, num_examples, start_sign, end_sign):
"""
用于将分词文本读入内存,并整理成问答对
:param path:
:param num_examples:
:return: 整理好的问答对
"""
is_exist = Path(path)
if not is_exist.exists():
file = open(path, 'w', encoding='utf-8')
file.write('吃饭 了 吗' + '\t' + '吃 了')
file.close()
size = os.path.getsize(path)
lines = io.open(path, encoding='utf-8').read().strip().split('\n')
if num_examples == 0:
word_pairs = [[preprocess_sentence(start_sign, end_sign, w) for w in l.split('\t')] for l in lines]
else:
word_pairs = [[preprocess_sentence(start_sign, end_sign, w) for w in l.split('\t')] for l in
lines[:num_examples]]
return zip(*word_pairs)
def max_length(tensor):
"""
:param tensor:
:return: 列表中最大的长度
"""
return max(len(t) for t in tensor)
def read_data(path, num_examples, start_sign, end_sign):
"""
读取数据,将input和target进行分词后返回
:param path: Tokenizer文本路径
:param num_examples: 最大序列长度
:return: input_tensor, target_tensor, lang_tokenizer
"""
input_lang, target_lang = create_dataset(path, num_examples, start_sign, end_sign)
input_tensor, target_tensor, lang_tokenizer = tokenize(input_lang, target_lang)
return input_tensor, target_tensor, lang_tokenizer
def tokenize(input_lang, target_lang):
"""
分词方法,使用Keras API中的Tokenizer进行分词操作
:param input_lang: 输入
:param target_lang: 目标
:return: input_tensor, target_tensor, lang_tokenizer
"""
lang = np.hstack((input_lang, target_lang))
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token=3)
lang_tokenizer.fit_on_texts(lang)
input_tensor = lang_tokenizer.texts_to_sequences(input_lang)
target_tensor = lang_tokenizer.texts_to_sequences(target_lang)
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor, maxlen=_config.max_length,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor, maxlen=_config.max_length,
padding='post')
return input_tensor, target_tensor, lang_tokenizer
def load_dataset(dict_fn, data_fn, start_sign, end_sign, max_train_data_size=0):
"""
数据加载方法,含四个元素的元组,包括如下:
:return:input_tensor, input_token, target_tensor, target_token
"""
input_tensor, target_tensor, lang_tokenizer = read_data(data_fn, max_train_data_size, start_sign, end_sign)
with open(dict_fn, 'w', encoding='utf-8') as file:
file.write(json.dumps(lang_tokenizer.word_index, indent=4, ensure_ascii=False))
return input_tensor, target_tensor, lang_tokenizer
def load_token_dict(dict_fn):
"""
加载字典方法
:return:input_token, target_token
"""
with open(dict_fn, 'r', encoding='utf-8') as file:
token = json.load(file)
return token
def pad_sequence(seqs, max_len):
"""
填充序列,0
:param seqs: 序列
:return: 返回填充好的序列
"""
padded = [seq + [0] * (max_len - len(seq)) for seq in seqs]
return padded
def sequences_to_texts(sequences, token_dict):
"""
将序列转换成text
"""
inv = {}
for key, value in token_dict.items():
inv[value] = key
result = []
for text in sequences:
temp = ''
for token in text:
temp = temp + ' ' + inv[token]
result.append(temp)
return result
def tokenize_en(sent, tokenizer):
"""
用来针对英文句子的分词
:param sent: 句子
:param tokenizer: 正则表达式分词器
:return: 分好的句子
"""
tokens = tokenizer.tokenize(sent)
ret = []
for t in tokens:
# 这里要注意,如果是槽位,要直接作为一个token放进去,例如<v.pricerange>
if '<' not in t:
ret.extend(wordpunct_tokenize(t))
else:
ret.append(t)
return ret
def load_dialogs(diag_fn, kb, groups_fn=None):
"""
加载数据集中的对话,按照格式整理好并返回
:param diag_fn: 数据集文件路径
:param kb: knowledge base的词表
:param groups_fn: 语句槽位集合文件路径
:return: 整理好的数据
"""
with open(diag_fn) as file:
dialogues = json.load(file)
data = []
for dialogue in dialogues:
usr_utterances = []
sys_utterances = []
states = []
kb_found = []
sys_utterance_groups = []
for turn in dialogue['dialogue']:
usr_utterances.append('<sos> ' + turn['transcript'] + '<eos>')
sys_utterances.append('<sos> ' + turn['system_transcript'] + '<eos>')
slots = []
search_keys = []
for state in turn['belief_state']:
if state['act'] == 'inform':
slots.append(state['slots'][0])
state['slots'][0][0] = state['slots'][0][0].replace(' ', '').replace('center', 'centre')
search_keys.append(state['slots'][0])
elif state['act'] == 'request':
slots.append((state['slots'][0][1].replace(' ', '') + '_req', 'care'))
else:
raise RuntimeError('illegal state : %s' % (state,))
states.append(slots)
ret = kb.search_multi(search_keys)
kb_found.append(len(ret))
# 这里就跳过第一个,因为一般系统第一个是空
sys_utterances = sys_utterances[1:]
usr_utterances = usr_utterances[:-1]
kb_found = kb_found[:-1]
states = states[:-1]
data.append({
'usr_utterances': usr_utterances,
'sys_utterances': sys_utterances,
'sys_utterance_groups': sys_utterance_groups,
'states': states,
'kb_found': kb_found,
})
return data
def load_ontology(fn):
"""
加载对话数据集中的本体
:param fn:本体数据集的文件路径
:return:返回整理好的本体和本体索引
"""
with open(fn) as file:
data = json.load(file)
onto = {}
onto_idx = defaultdict(dict)
# 这里获取用户告知系统的信息
inform_data = data['informable']
for key, values in inform_data.items():
onto[key] = values + ['dontcare']
onto_idx[key]['dontcare'] = 0
for value in values:
onto_idx[key][value] = len(onto_idx[key])
key = key + '_req'
onto[key] = values + ['dontcare']
onto_idx[key] = {
'dontcare': 0,
'care': 1,
}
req_data = data['requestable']
for key in req_data:
key = key + '_req'
onto[key] = ['dontcare']
onto_idx[key] = {
'dontcare': 0,
'care': 1,
}
return onto, onto_idx
class DataLoader:
"""
对话数据加载工具类
"""
def __init__(self, dialogues, max_length, tokenizer, onto, onto_idx, max_train_data_size, kb_fonud_len=5,
mode='train'):
self.dialogues = dialogues
self.tokenizer = tokenizer
self.max_length = max_length
self.cur = 0
self.onto = onto
self.onto_idx = onto_idx
self.kb_found_len = kb_fonud_len
self.max_train_data_size = max_train_data_size
self.mode = mode
def get_vocabs(self):
"""
获取对话数据集中的token集合,分为user和system两个token集合
:return: user和system两个token集合
"""
vocabs = []
sys_vocabs = []
for dialogue in self.dialogues:
for s in dialogue['usr_utterances']:
vocabs.extend(self._sent_normalize(s))
for s in dialogue['sys_utterances']:
sys_vocabs.extend(self._sent_normalize(s))
return set(vocabs), set(sys_vocabs)
def __len__(self):
sum = 0
if self.max_train_data_size == 0:
for dialogue in self.dialogues:
sum += len(dialogue['usr_utterances'])
else:
for i in range(self.max_train_data_size):
sum += len(self.dialogues[i]['usr_utterances'])
return sum
def _sent_normalize(self, sent):
"""
分词器
:param sent: 语句
:return: 语句序列
"""
tokenizer = RegexpTokenizer(r'<[a-z][.\w]+>|[^<]+')
return tokenize_en(sent=sent.lower(), tokenizer=tokenizer)
def _get(self, i):
"""
获取整理对话数据集中的第i个对话的相关数据,整理
至对应格式,并统一将数据类型转成tf.int64
:param i: 第i个对话数据
:return: 整理好的对话数据
"""
dialogue = self.dialogues[i]
usr_utterances = [self._gen_utterance_seq(self.tokenizer, s) for s in dialogue['usr_utterances']]
usr_utterances = tf.convert_to_tensor(pad_sequence(seqs=usr_utterances, max_len=self.max_length),
dtype=tf.int64)
states = self._gen_state_vectors(dialogue['states'])
kb_indicator = [[0] if x == 0 else [1] for x in dialogue['kb_found']]
sys_utterances = [self._gen_utterance_seq(self.tokenizer, s) for s in dialogue['sys_utterances']]
sys_utterances = [tf.reshape(tf.convert_to_tensor(utt, dtype=tf.int64), [1, -1]) for utt in sys_utterances]
sys_utterance_groups = tf.convert_to_tensor(dialogue['sys_utterance_groups'], dtype=tf.int64)
return dialogue['usr_utterances'], dialogue['sys_utterances'], \
dialogue['kb_found'], usr_utterances, sys_utterances, \
states, kb_indicator, sys_utterance_groups
def _gen_utterance_seq(self, tokenizer, utterance):
"""
将语句转成token索引向量
:param tokenizer: 索引字典
:param utterance: 语句
:return: 返回转换好的向量
"""
utterance = self._sent_normalize(utterance)
utterance = [tokenizer.get(x, 0) for x in utterance]
return utterance
def _gen_state_vectors(self, states):
"""
将状态序列中槽位值转成Tensor序列
:param states: 状态列表
:return: 整理好的状态张量
"""
state_vectors = {slot: tf.cast(tf.zeros(len(states)), dtype=tf.float32).numpy() for slot in self.onto}
for t, states_at_time_t in enumerate(states):
for s, v in states_at_time_t:
if v == 'center':
v = 'centre'
state_vectors[s][t] = self.onto_idx[s][v]
return state_vectors
def __iter__(self):
return self
def reset(self):
self.cur = 0
def next(self):
"""
移动到下一个对话,如果运行到test数据集,直接停止
:return: 返回对应对话的数据
"""
ret = self._get(self.cur)
self.cur += 1
# 没运行完一个epoch,就直接乱进行下一个epoch
if self.cur > self.max_train_data_size and not self.max_train_data_size == 0:
self.cur = 0
if self.cur == len(self.dialogues):
if self.mode == 'test':
raise StopIteration()
random.shuffle(self.dialogues)
self.cur = 0
return ret
def load_data(dialogues_train, max_length, kb_fn, ontology_fn, tokenizer, max_train_data_size, kb_indicator_len):
"""
加载对原始数据、本体数据、database数据处理好的数据集
:param dialogues_train: 原始对话数据路径
:param kb_fn: database数据路径
:param ontology_fn: 本体数据路径
:param tokenizer: token
:param kb_indicator_len: kb指针长度
"""
kb = load_kb(kb_fn, 'name')
dialogue_data = load_dialogs(dialogues_train, kb)
onto, onto_idx = load_ontology(ontology_fn)
kb_found_len = kb_indicator_len - 2
return DataLoader(dialogues=dialogue_data, max_length=max_length, tokenizer=tokenizer, onto=onto, onto_idx=onto_idx,
max_train_data_size=max_train_data_size, kb_fonud_len=kb_found_len)
|
11568006
|
class Solution:
def reverseWords(self, s: str) -> str:
lis = s.split()
return " ".join(reversed([ss[: : -1] for ss in lis]))
|
11568038
|
import pytest
from src.graphs import BLACK
from src.graphs import clone_graph
from src.graphs import fill_surrounded_regions
from src.graphs import flip_color
from src.graphs import GraphNode
from src.graphs import is_minimally_connected
from src.graphs import search_maze
from src.graphs import WHITE
class TestSearchMaze(object):
"""
Question 19.1
"""
maze = [
[BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, BLACK, WHITE, WHITE],
[WHITE, WHITE, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE],
[BLACK, WHITE, BLACK, WHITE, WHITE, BLACK, BLACK, WHITE, BLACK, BLACK],
[WHITE, WHITE, WHITE, BLACK, BLACK, BLACK, WHITE, WHITE, BLACK, WHITE],
[WHITE, BLACK, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE],
[WHITE, BLACK, BLACK, WHITE, WHITE, BLACK, WHITE, BLACK, BLACK, WHITE],
[WHITE, WHITE, WHITE, WHITE, BLACK, WHITE, WHITE, WHITE, WHITE, WHITE],
[BLACK, WHITE, BLACK, WHITE, BLACK, WHITE, BLACK, WHITE, WHITE, WHITE],
[BLACK, WHITE, BLACK, BLACK, WHITE, WHITE, WHITE, BLACK, BLACK, BLACK],
[WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, WHITE, BLACK, BLACK, WHITE],
]
def test_book_example(self):
start_coord = (9, 0)
end_coord = (0, 9)
path = search_maze(self.maze, start_coord, end_coord)
assert path[0] == start_coord
assert path[-1] == end_coord
class TestFlipColor(object):
"""
Question 19.2
"""
matrix_a = [
[1, 0, 1, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 1, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
]
matrix_b = [
[1, 0, 1, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
]
matrix_c = [
[1, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
def test_book_example(self):
start = (5, 4)
output = flip_color(self.matrix_a, start)
assert self.matrix_b == output
def test_second_iteration(self):
start = (3, 6)
output = flip_color(self.matrix_b, start)
assert self.matrix_c == output
class TestFillSurroundedRegions(object):
"""
Question 19.3
"""
matrix_a = [
['B', 'B', 'B', 'B'],
['W', 'B', 'W', 'B'],
['B', 'W', 'W', 'B'],
['B', 'B', 'B', 'B'],
]
matrix_b = [
['B', 'B', 'B', 'B'],
['W', 'B', 'B', 'B'],
['B', 'B', 'B', 'B'],
['B', 'B', 'B', 'B'],
]
def test_book_example(self):
assert self.matrix_b == fill_surrounded_regions(self.matrix_a)
@pytest.fixture(scope='module')
def build_straight_line_graph():
root = GraphNode()
one = GraphNode()
two = GraphNode()
three = GraphNode()
four = GraphNode()
root.add_neighbor(one)
one.add_neighbor(root)
one.add_neighbor(two)
two.add_neighbor(one)
two.add_neighbor(three)
three.add_neighbor(two)
three.add_neighbor(four)
four.add_neighbor(three)
return root
@pytest.fixture(scope='module')
def build_branching_graph():
root = GraphNode()
one = GraphNode()
two = GraphNode()
three = GraphNode()
four = GraphNode()
root.add_neighbor(one)
one.add_neighbor(root)
root.add_neighbor(two)
two.add_neighbor(root)
root.add_neighbor(three)
three.add_neighbor(root)
root.add_neighbor(four)
four.add_neighbor(root)
return root
@pytest.fixture(scope='module')
def build_cycle_graph():
one = GraphNode(val=1)
two = GraphNode(val=2)
three = GraphNode(val=3)
four = GraphNode(val=4)
one.add_neighbor(two)
two.add_neighbor(one)
two.add_neighbor(three)
three.add_neighbor(two)
three.add_neighbor(four)
four.add_neighbor(three)
four.add_neighbor(one)
one.add_neighbor(four)
one.add_neighbor(three)
three.add_neighbor(one)
return one
class TestIsMinimallyConnected(object):
"""
Question 19.4
"""
def test_straight_line_graph(self, build_straight_line_graph):
assert is_minimally_connected(build_straight_line_graph)
def test_none_case(self):
assert is_minimally_connected(None)
def test_star_case(self, build_branching_graph):
assert is_minimally_connected(build_branching_graph)
def test_cycle_case(self, build_cycle_graph):
assert not is_minimally_connected(build_cycle_graph)
class TestCloneGraph(object):
"""
Question 19.5
"""
def test_graph_cloning_example(self, build_cycle_graph):
cloned = clone_graph(build_cycle_graph)
# building edge list for original graph
original_edges = {}
orig_queue = [build_cycle_graph]
while orig_queue:
node = orig_queue.pop()
if node.val not in original_edges:
original_edges[node.val] = [x.val for x in node.neighbors]
node.color = BLACK
for x in node.neighbors:
if x.val not in original_edges:
orig_queue.append(x)
# check cloned graph against original_edges
cloned_queue = [cloned]
while cloned_queue:
node = cloned_queue.pop()
assert node.val in original_edges
neighbors = [x.val for x in node.neighbors]
assert sorted(original_edges[node.val]) == sorted(neighbors)
node.color = BLACK
for x in node.neighbors:
if x.color != BLACK:
cloned_queue.append(x)
|
11568067
|
import unittest
import pulse as p
class TestBPM(unittest.TestCase):
"""This test Class is for variances()"""
def test_range_of_bpm(self):
"""
This function assures that variances()
returns a one dimensional matrix.
"""
testing_uid = "1kzd0DmeunLGEeB0nWLFFaIfuFZn"
pulse = p.Pulse()
pulse.pulsebox_to_frames(testing_uid)
hr = pulse.bpm()
self.assertTrue(int(hr) > 0 and int(hr) <= 220)
unittest.main()
|
11568070
|
from django.urls import path, re_path
from . import views
app_name = "twitter"
urlpatterns = [
path("", view=views.HomeView.as_view(), name="home"),
path("likes/", view=views.FavoriteListView.as_view(), name="favorite_list"),
re_path(
r"^(?P<screen_name>\w+)/$",
view=views.UserDetailView.as_view(),
name="user_detail",
),
re_path(
r"^(?P<screen_name>\w+)/likes/$",
view=views.AccountFavoriteListView.as_view(),
name="account_favorite_list",
),
re_path(
r"^(?P<screen_name>\w+)/(?P<twitter_id>\d+)/$",
view=views.TweetDetailView.as_view(),
name="tweet_detail",
),
]
|
11568148
|
from collections import namedtuple
import os
import shutil
import sys
import tempfile
import yaml
from passpie.importers import find_importer, BaseImporter, get_instances
from passpie.importers.default_importer import DefaultImporter
from passpie.importers.pysswords_importer import PysswordsImporter
def mock_open():
try:
from mock import mock_open as mopen
except:
from unittest.mock import mock_open as mopen
return mopen()
def test_find_importers_through_entry_points(mocker):
from passpie import importers
temp_dir = tempfile.mkdtemp()
sys.path.insert(0, temp_dir)
with open(os.path.join(temp_dir, 'fake_module.py'), 'w') as f:
f.write("""\
from passpie.importers import BaseImporter
class FakeKeepassImporterClass(BaseImporter):
pass
""")
import pkg_resources
fake_ep = pkg_resources.EntryPoint(
'fake_keepass', 'fake_module',
attrs=('FakeKeepassImporterClass', ))
mock_iter_entry_points = mocker.patch(
'pkg_resources.iter_entry_points',
return_value=iter([fake_ep, ]))
try:
target_klass = None
for klass in importers._get_importers_from_entry_points():
if klass.__name__ == 'FakeKeepassImporterClass':
target_klass = klass
break
mock_iter_entry_points.assert_called_once_with('passpie_importers')
assert target_klass.__name__ == 'FakeKeepassImporterClass'
assert target_klass.__module__ == 'fake_module'
finally:
sys.path.remove(temp_dir)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_get_all_yields_importers_from_entry_points(mocker):
from passpie import importers
class FakeImporter(importers.BaseImporter):
pass
fake_importers = {FakeImporter, }
mocker.patch.object(importers, '__all__', new=[])
mock_ep_finder = mocker.patch.object(
importers, '_get_importers_from_entry_points',
return_value=iter(fake_importers))
found_importers = set(importers.get_all())
assert found_importers == fake_importers
|
11568154
|
import numpy as np
from scipy.misc import imsave
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
from torchvision import models
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision.transforms as Transforms
from dataloader import TrainDataset, DevDataset, TestDataset
from networks.baseblocks import ConvBlock, ResidualBlock, Upsample2xBlock
from networks.unet import UNet, unet_weight_init
from networks.hed import HED, HED_1L, hed_weight_init
from networks.resnet import ResnetGenerator, Upscale4xResnetGenerator, Upscale2xResnetGenerator
from networks.discriminators import NLayerDiscriminator
from networks.vggfeature import VGGFeatureMap
from utils.visualizer import Visualizer
from utils.loss import BCE2d
from utils.normalize import norm, denorm, weights_init_normal
from utils.target import PSNR, SSIM, batch_compare_filter, batch_SSIM
USE_GPU = torch.cuda.is_available()
NORM = 'batch'
from scipy.misc import imsave
def save_img(img, save_fn=''):
if not os.path.exists(os.path.split(save_fn)[0]):
os.makedirs(os.path.split(save_fn)[0])
if list(img.shape)[0] == 3:
# save_image = img * 125.0
save_image = img
save_image = save_image.clamp(0, 1).numpy().transpose(1, 2, 0)
else:
save_image = img.squeeze().clamp(0, 1).numpy().transpose(1, 2, 0)
imsave(save_fn, save_image)
class SRCNN(nn.Module):
def __init__(self):
super(SRCNN, self).__init__()
self.conv1 = ConvBlock(3, 64, 9, 1, 4, norm=None, activation='relu')
self.conv2 = ConvBlock(64, 32, 1, 1, 0, norm=None, activation='relu')
self.conv3 = ConvBlock(32, 3, 5, 1, 2, norm=None, activation=None)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
return F.sigmoid(out)
class SRResnet(nn.Module):
def __init__(self, num_channels=3, base_filters=64, num_residuals=16):
super(SRResnet, self).__init__()
self.conv_ipt = ConvBlock(num_channels, base_filters, 9, 1, 4, activation='prelu', norm=None)
res_blocks = []
for _ in range(num_residuals):
res_blocks.append(ResidualBlock(base_filters, activation='prelu', norm=NORM))
self.residual_blocks = nn.Sequential(* res_blocks)
self.conv_mid = ConvBlock(base_filters, base_filters, 3, 1, 1, activation=None, norm=NORM)
self.upscale4x = nn.Sequential(
Upsample2xBlock(base_filters, base_filters, norm=NORM),
Upsample2xBlock(base_filters, base_filters, norm=NORM)
)
self.conv_opt = ConvBlock(base_filters, num_channels, 9, 1, 4, activation=None, norm=None)
def forward(self, x):
out = self.conv_ipt(x)
residual = out
out = self.residual_blocks(out)
out = self.conv_mid(out)
out += residual
out = self.upscale4x(out)
out = self.conv_opt(out)
return F.sigmoid(out)
class Model(object):
def __init__(self, cfg):
# parameter init
self.env = cfg.env
self.train_dataset = cfg.train_dataset
self.valid_dataset = cfg.valid_dataset
self.test_dataset = cfg.test_dataset
self.data_dir = cfg.data_dir
self.save_dir = cfg.save_dir
self.num_threads = int(cfg.num_threads)
self.num_epochs = int(cfg.num_epochs)
self.save_epochs = int(cfg.save_epochs)
self.pretrain_epochs = int(cfg.pretrain_epochs)
self.batch_size = int(cfg.batch_size)
self.valid_batch_size = int(cfg.valid_batch_size)
self.test_batch_size = int(cfg.test_batch_size)
self.plot_iter = int(cfg.plot_iter)
self.crop_size = int(cfg.crop_size)
self.scale_factor = int(cfg.scale_factor)
self.lr = float(cfg.lr)
def load_dataset(self, mode='train', random_scale=True, rotate=True, fliplr=True, fliptb=True):
if mode == 'train':
train_set = TrainDataset(os.path.join(self.data_dir, self.train_dataset),
crop_size=self.crop_size, scale_factor=self.scale_factor,
random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)
return DataLoader(dataset=train_set, num_workers=self.num_threads,
batch_size=self.batch_size, shuffle=True)
elif mode == 'valid':
valid_set = DevDataset(os.path.join(
self.data_dir, self.valid_dataset))
return DataLoader(dataset=valid_set, num_workers=self.num_threads,
batch_size=self.valid_batch_size, shuffle=True)
elif mode == 'test':
test_set = TestDataset(os.path.join(
self.data_dir, self.test_dataset))
return DataLoader(dataset=test_set, num_workers=self.num_threads,
batch_size=self.test_batch_size, shuffle=False)
def train(self, edgenetpath=None, sr2x1_path=None, sr2x2_path=None, srcnn_path=None, srresnet_path=None,
is_fine_tune=False, random_scale=True, rotate=True, fliplr=True, fliptb=True):
vis = Visualizer(self.env)
print('================ Loading datasets =================')
# load training dataset
print('## Current Mode: Train')
# train_data_loader = self.load_dataset(mode='valid')
train_data_loader = self.load_dataset(
mode='train', random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)
t_save_dir = 'results/train_result/'+self.train_dataset+"_{}"
if not os.path.exists(t_save_dir.format("origin")):
os.makedirs(t_save_dir.format("origin"))
if not os.path.exists(t_save_dir.format("lr4x")):
os.makedirs(t_save_dir.format("lr4x"))
if not os.path.exists(t_save_dir.format("srunit_2x")):
os.makedirs(t_save_dir.format("srunit_2x"))
if not os.path.exists(t_save_dir.format("bicubic")):
os.makedirs(t_save_dir.format("bicubic"))
if not os.path.exists(t_save_dir.format("bicubic2x")):
os.makedirs(t_save_dir.format("bicubic2x"))
if not os.path.exists(t_save_dir.format("srunit_common")):
os.makedirs(t_save_dir.format("srunit_common"))
if not os.path.exists(t_save_dir.format("srunit_2xbicubic")):
os.makedirs(t_save_dir.format("srunit_2xbicubic"))
if not os.path.exists(t_save_dir.format("srunit_4xbicubic")):
os.makedirs(t_save_dir.format("srunit_4xbicubic"))
if not os.path.exists(t_save_dir.format("srresnet")):
os.makedirs(t_save_dir.format("srresnet"))
if not os.path.exists(t_save_dir.format("srcnn")):
os.makedirs(t_save_dir.format("srcnn"))
##########################################################
##################### build network ######################
##########################################################
print('Building Networks and initialize parameters\' weights....')
# init sr resnet
srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu',learn_residual=True)
srresnet2x1.apply(weights_init_normal)
srresnet2x2.apply(weights_init_normal)
# init srresnet
srresnet = SRResnet()
srresnet.apply(weights_init_normal)
# init srcnn
srcnn = SRCNN()
srcnn.apply(weights_init_normal)
# init discriminator
discnet = NLayerDiscriminator(input_nc=3, ndf=64, n_layers=5)
# init edgenet
edgenet = HED_1L()
if edgenetpath is None or not os.path.exists(edgenetpath):
raise Exception('Invalid edgenet model')
else:
pretrained_dict = torch.load(edgenetpath)
model_dict = edgenet.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
edgenet.load_state_dict(model_dict)
# init vgg feature
featuremapping = VGGFeatureMap(models.vgg19(pretrained=True))
# load pretrained srresnet or just initialize
if sr2x1_path is None or not os.path.exists(sr2x1_path):
print('===> initialize the srresnet2x1')
print('======> No pretrained model')
else:
print('======> loading the weight from pretrained model')
# deblurnet.load_state_dict(torch.load(sr2x1_path))
pretrained_dict = torch.load(sr2x1_path)
model_dict = srresnet2x1.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
srresnet2x1.load_state_dict(model_dict)
if sr2x2_path is None or not os.path.exists(sr2x2_path):
print('===> initialize the srresnet2x2')
print('======> No pretrained model')
else:
print('======> loading the weight from pretrained model')
# deblurnet.load_state_dict(torch.load(sr2x2_path))
pretrained_dict = torch.load(sr2x2_path)
model_dict = srresnet2x2.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
srresnet2x2.load_state_dict(model_dict)
if srresnet_path is None or not os.path.exists(srresnet_path):
print('===> initialize the srcnn')
print('======> No pretrained model')
else:
print('======> loading the weight from pretrained model')
pretrained_dict = torch.load(srresnet_path)
model_dict = srresnet.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
srresnet.load_state_dict(model_dict)
if srcnn_path is None or not os.path.exists(srcnn_path):
print('===> initialize the srcnn')
print('======> No pretrained model')
else:
print('======> loading the weight from pretrained model')
pretrained_dict = torch.load(srcnn_path)
model_dict = srcnn.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
srcnn.load_state_dict(model_dict)
# optimizer init
# different learning rate
lr = self.lr
srresnet2x1_optimizer = optim.Adam(
srresnet2x1.parameters(), lr=lr, betas=(0.9, 0.999))
srresnet2x2_optimizer = optim.Adam(
srresnet2x2.parameters(), lr=lr, betas=(0.9, 0.999))
srresnet_optimizer = optim.Adam(
srresnet.parameters(), lr=lr, betas=(0.9, 0.999))
srcnn_optimizer = optim.Adam(
srcnn.parameters(), lr=lr, betas=(0.9, 0.999))
disc_optimizer = optim.Adam(
discnet.parameters(), lr=lr/10, betas=(0.9, 0.999))
# loss function init
MSE_loss = nn.MSELoss()
BCE_loss = nn.BCELoss()
# cuda accelerate
if USE_GPU:
edgenet.cuda()
srresnet2x1.cuda()
srresnet2x2.cuda()
srresnet.cuda()
srcnn.cuda()
discnet.cuda()
featuremapping.cuda()
MSE_loss.cuda()
BCE_loss.cuda()
print('\tCUDA acceleration is available.')
##########################################################
##################### train network ######################
##########################################################
import torchnet as tnt
from tqdm import tqdm
from PIL import Image
batchnorm = nn.BatchNorm2d(1).cuda()
upsample = nn.Upsample(scale_factor=2, mode='bilinear')
edge_avg_loss = tnt.meter.AverageValueMeter()
total_avg_loss = tnt.meter.AverageValueMeter()
disc_avg_loss = tnt.meter.AverageValueMeter()
psnr_2x_avg = tnt.meter.AverageValueMeter()
ssim_2x_avg = tnt.meter.AverageValueMeter()
psnr_4x_avg = tnt.meter.AverageValueMeter()
ssim_4x_avg = tnt.meter.AverageValueMeter()
psnr_bicubic_avg = tnt.meter.AverageValueMeter()
ssim_bicubic_avg = tnt.meter.AverageValueMeter()
psnr_2xcubic_avg = tnt.meter.AverageValueMeter()
ssim_2xcubic_avg = tnt.meter.AverageValueMeter()
psnr_4xcubic_avg = tnt.meter.AverageValueMeter()
ssim_4xcubic_avg = tnt.meter.AverageValueMeter()
psnr_srresnet_avg = tnt.meter.AverageValueMeter()
ssim_srresnet_avg = tnt.meter.AverageValueMeter()
psnr_srcnn_avg = tnt.meter.AverageValueMeter()
ssim_srcnn_avg = tnt.meter.AverageValueMeter()
srresnet2x1.train()
srresnet2x2.train()
srresnet.train()
srcnn.train()
discnet.train()
itcnt = 0
for epoch in range(self.num_epochs):
psnr_2x_avg.reset()
ssim_2x_avg.reset()
psnr_4x_avg.reset()
ssim_4x_avg.reset()
psnr_bicubic_avg.reset()
ssim_bicubic_avg.reset()
psnr_2xcubic_avg.reset()
ssim_2xcubic_avg.reset()
psnr_4xcubic_avg.reset()
ssim_4xcubic_avg.reset()
psnr_srresnet_avg.reset()
ssim_srresnet_avg.reset()
psnr_srcnn_avg.reset()
ssim_srcnn_avg.reset()
# learning rate is decayed by a factor every 20 epoch
if (epoch + 1 % 20) == 0:
for param_group in srresnet2x1_optimizer.param_groups:
param_group["lr"] /= 10.0
print("Learning rate decay for srresnet2x1: lr={}".format(
srresnet2x1_optimizer.param_groups[0]["lr"]))
for param_group in srresnet2x2_optimizer.param_groups:
param_group["lr"] /= 10.0
print("Learning rate decay for srresnet2x2: lr={}".format(
srresnet2x2_optimizer.param_groups[0]["lr"]))
for param_group in srresnet_optimizer.param_groups:
param_group["lr"] /= 10.0
print("Learning rate decay for srresnet: lr={}".format(
srresnet_optimizer.param_groups[0]["lr"]))
for param_group in srcnn_optimizer.param_groups:
param_group["lr"] /= 10.0
print("Learning rate decay for srcnn: lr={}".format(
srcnn_optimizer.param_groups[0]["lr"]))
for param_group in disc_optimizer.param_groups:
param_group["lr"] /= 10.0
print("Learning rate decay for discnet: lr={}".format(
disc_optimizer.param_groups[0]["lr"]))
itbar = tqdm(enumerate(train_data_loader))
for ii, (hr, lr2x, lr4x, bc2x, bc4x) in itbar:
mini_batch = hr.size()[0]
hr_ = Variable(hr)
lr2x_ = Variable(lr2x)
lr4x_ = Variable(lr4x)
bc2x_ = Variable(bc2x)
bc4x_ = Variable(bc4x)
real_label = Variable(torch.ones(mini_batch))
fake_label = Variable(torch.zeros(mini_batch))
# cuda mode setting
if USE_GPU:
hr_ = hr_.cuda()
lr2x_ = lr2x_.cuda()
lr4x_ = lr4x_.cuda()
bc2x_ = bc2x_.cuda()
bc4x_ = bc4x_.cuda()
real_label = real_label.cuda()
fake_label = fake_label.cuda()
# =============================================================== #
# ================ Edge-based srresnet training ================= #
# =============================================================== #
sr2x_ = srresnet2x1(lr4x_)
sr4x_ = srresnet2x2(lr2x_)
bc2x_sr4x_ = srresnet2x2(bc2x_)
sr2x_bc4x_ = upsample(sr2x_)
'''===================== Train Discriminator ====================='''
if epoch + 1 > self.pretrain_epochs:
disc_optimizer.zero_grad()
#===== 2x disc loss =====#
real_decision_2x = discnet(lr2x_)
real_loss_2x = BCE_loss(
real_decision_2x, real_label.detach())
fake_decision_2x = discnet(sr2x_.detach())
fake_loss_2x = BCE_loss(
fake_decision_2x, fake_label.detach())
disc_loss_2x = real_loss_2x + fake_loss_2x
disc_loss_2x.backward()
disc_optimizer.step()
#===== 4x disc loss =====#
real_decision_4x = discnet(hr_)
real_loss_4x = BCE_loss(
real_decision_4x, real_label.detach())
fake_decision_4x = discnet(sr4x_.detach())
fake_loss_4x = BCE_loss(
fake_decision_4x, fake_label.detach())
disc_loss_4x = real_loss_4x + fake_loss_4x
disc_loss_4x.backward()
disc_optimizer.step()
disc_avg_loss.add(
(disc_loss_2x + disc_loss_4x).data.item())
'''=================== Train srresnet Generator ==================='''
edge_trade_off = [0.7, 0.2, 0.1, 0.05, 0.01, 0.3]
if epoch + 1 > self.pretrain_epochs:
a1, a2, a3 = 0.55, 0.1, 0.75
else:
a1, a2, a3 = 0.65, 0.0, 0.95
#============ calculate 2x loss ==============#
srresnet2x1_optimizer.zero_grad()
#### Edgenet Loss ####
pred = edgenet(sr2x_)
real = edgenet(lr2x_)
edge_loss_2x = BCE_loss(pred.detach(), real.detach())
# for i in range(6):
# edge_loss_2x += edge_trade_off[i] * \
# BCE_loss(pred[i].detach(), real[i].detach())
# edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])
#### Content Loss ####
content_loss_2x = MSE_loss(sr2x_, lr2x_) #+ 0.1*BCE_loss(1-sr2x_, 1-lr2x_)
#### Perceptual Loss ####
real_feature = featuremapping(lr2x_)
fake_feature = featuremapping(sr2x_)
vgg_loss_2x = MSE_loss(fake_feature, real_feature.detach())
#### Adversarial Loss ####
advs_loss_2x = BCE_loss(discnet(sr2x_), real_label) if epoch + 1 > self.pretrain_epochs else 0
#============ calculate scores ==============#
psnr_2x_score_process = batch_compare_filter(
sr2x_.cpu().data, lr2x, PSNR)
psnr_2x_avg.add(psnr_2x_score_process)
ssim_2x_score_process = batch_compare_filter(
sr2x_.cpu().data, lr2x, SSIM)
ssim_2x_avg.add(ssim_2x_score_process)
#============== loss backward ===============#
total_loss_2x = a1 * edge_loss_2x + a2 * advs_loss_2x + \
a3 * content_loss_2x + (1.0 - a3) * vgg_loss_2x
total_loss_2x.backward()
srresnet2x1_optimizer.step()
#============ calculate 4x loss ==============#
if is_fine_tune:
sr2x_ = srresnet2x1(lr4x_)
sr4x_ = srresnet2x2(sr2x_)
srresnet2x2_optimizer.zero_grad()
#### Edgenet Loss ####
pred = edgenet(sr4x_)
real = edgenet(hr_)
# edge_loss_4x = 0
edge_loss_4x = BCE_loss(pred.detach(), real.detach())
# for i in range(6):
# edge_loss_4x += edge_trade_off[i] * \
# BCE_loss(pred[i].detach(), real[i].detach())
# edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])
#### Content Loss ####
content_loss_4x = MSE_loss(sr4x_, hr_) #+ 0.1*BCE_loss(1-sr4x_, 1-hr_)
#### Perceptual Loss ####
real_feature = featuremapping(hr_)
fake_feature = featuremapping(sr4x_)
vgg_loss_4x = MSE_loss(fake_feature, real_feature.detach())
#### Adversarial Loss ####
advs_loss_4x = BCE_loss(discnet(sr4x_), real_label) if epoch + 1 > self.pretrain_epochs else 0
#============ calculate scores ==============#
psnr_4x_score_process = batch_compare_filter(
sr4x_.cpu().data, hr, PSNR)
psnr_4x_avg.add(psnr_4x_score_process)
ssim_4x_score_process = batch_compare_filter(
sr4x_.cpu().data, hr, SSIM)
ssim_4x_avg.add(ssim_4x_score_process)
psnr_bicubic_score = batch_compare_filter(
bc4x_.cpu().data, hr, PSNR)
psnr_bicubic_avg.add(psnr_bicubic_score)
ssim_bicubic_score = batch_compare_filter(
bc4x_.cpu().data, hr, SSIM)
ssim_bicubic_avg.add(ssim_bicubic_score)
psnr_2xcubic_score = batch_compare_filter(
bc2x_sr4x_.cpu().data, hr, PSNR)
psnr_2xcubic_avg.add(psnr_2xcubic_score)
ssim_2xcubic_score = batch_compare_filter(
bc2x_sr4x_.cpu().data, hr, SSIM)
ssim_2xcubic_avg.add(ssim_2xcubic_score)
psnr_4xcubic_score = batch_compare_filter(
sr2x_bc4x_.cpu().data, hr, PSNR)
psnr_4xcubic_avg.add(psnr_4xcubic_score)
ssim_4xcubic_score = batch_compare_filter(
sr2x_bc4x_.cpu().data, hr, SSIM)
ssim_4xcubic_avg.add(ssim_4xcubic_score)
#============== loss backward ===============#
total_loss_4x = a1 * edge_loss_4x + a2 * advs_loss_4x + \
a3 * content_loss_4x + (1.0 - a3) * vgg_loss_4x
total_loss_4x.backward()
srresnet2x2_optimizer.step()
total_avg_loss.add((total_loss_2x+total_loss_4x).data.item())
edge_avg_loss.add((edge_loss_2x+edge_loss_4x).data.item())
if epoch + 1 > self.pretrain_epochs:
disc_avg_loss.add((advs_loss_2x+advs_loss_4x).data.item())
if (ii+1) % self.plot_iter == self.plot_iter-1:
res = {'edge loss': edge_avg_loss.value()[0],
'generate loss': total_avg_loss.value()[0],
'discriminate loss': disc_avg_loss.value()[0]}
vis.plot_many(res, 'Deblur net Loss')
psnr_2x_score_origin = batch_compare_filter(
bc2x, lr2x, PSNR)
psnr_4x_score_origin = batch_compare_filter(bc4x, hr, PSNR)
res_psnr = {'2x_origin_psnr': psnr_2x_score_origin,
'2x_sr_psnr': psnr_2x_score_process,
'4x_origin_psnr': psnr_4x_score_origin,
'4x_sr_psnr': psnr_4x_score_process}
vis.plot_many(res_psnr, 'PSNR Score')
ssim_2x_score_origin = batch_compare_filter(
bc2x, lr2x, SSIM)
ssim_4x_score_origin = batch_compare_filter(bc4x, hr, SSIM)
res_ssim = {'2x_origin_ssim': ssim_2x_score_origin,
'2x_sr_ssim': ssim_2x_score_process,
'4x_origin_ssim': ssim_4x_score_origin,
'4x_sr_ssim': ssim_4x_score_process}
vis.plot_many(res_ssim, 'SSIM Score')
save_img(hr[0], os.path.join(t_save_dir.format("origin"), "{}.jpg".format(ii)))
save_img(lr4x[0], os.path.join(t_save_dir.format("lr4x"), "{}.jpg".format(ii)))
save_img(bc4x[0], os.path.join(t_save_dir.format("bicubic"), "{}.jpg".format(ii)))
save_img(bc2x[0], os.path.join(t_save_dir.format("bicubic2x"), "{}.jpg".format(ii)))
save_img(sr2x_.cpu().data[0], os.path.join(t_save_dir.format("srunit_2x"), "{}.jpg".format(ii)))
save_img(sr4x_.cpu().data[0], os.path.join(t_save_dir.format("srunit_common"), "{}.jpg".format(ii)))
save_img(bc2x_sr4x_.cpu().data[0], os.path.join(t_save_dir.format("srunit_2xbicubic"), "{}.jpg".format(ii)))
save_img(sr2x_bc4x_.cpu().data[0], os.path.join(t_save_dir.format("srunit_4xbicubic"), "{}.jpg".format(ii)))
# =============================================================== #
# ====================== srresnet training ====================== #
# =============================================================== #
sr4x_ = srresnet(lr4x_)
#============ calculate 4x loss ==============#
srresnet_optimizer.zero_grad()
#### Content Loss ####
content_loss_4x = MSE_loss(sr4x_, hr_)
#### Perceptual Loss ####
real_feature = featuremapping(hr_)
fake_feature = featuremapping(sr4x_)
vgg_loss_4x = MSE_loss(fake_feature, real_feature.detach())
#============ calculate scores ==============#
psnr_4x_score = batch_compare_filter(
sr4x_.cpu().data, hr, PSNR)
psnr_srresnet_avg.add(psnr_4x_score)
ssim_4x_score = batch_compare_filter(
sr4x_.cpu().data, hr, SSIM)
ssim_srresnet_avg.add(ssim_4x_score)
#============== loss backward ===============#
total_loss_4x = content_loss_4x + 0.2 * vgg_loss_4x
total_loss_4x.backward()
srresnet_optimizer.step()
save_img(sr4x_.cpu().data[0], os.path.join(t_save_dir.format("srresnet"), "{}.jpg".format(ii)))
# =============================================================== #
# ======================= srcnn training ======================== #
# =============================================================== #
sr4x_ = srcnn(bc4x_)
#============ calculate 4x loss ==============#
srcnn_optimizer.zero_grad()
#### Content Loss ####
content_loss_4x = MSE_loss(sr4x_, hr_)
#============ calculate scores ==============#
psnr_4x_score = batch_compare_filter(
sr4x_.cpu().data, hr, PSNR)
psnr_srcnn_avg.add(psnr_4x_score)
ssim_4x_score = batch_compare_filter(
sr4x_.cpu().data, hr, SSIM)
ssim_srcnn_avg.add(ssim_4x_score)
#============== loss backward ===============#
total_loss_4x = content_loss_4x
total_loss_4x.backward()
srcnn_optimizer.step()
save_img(sr4x_.cpu().data[0], os.path.join(t_save_dir.format("srcnn"), "{}.jpg".format(ii)))
#======================= Output result of total training processing =======================#
itcnt += 1
itbar.set_description("Epoch: [%2d] [%d/%d] PSNR_2x_Avg: %.6f, SSIM_2x_Avg: %.6f, PSNR_4x_Avg: %.6f, SSIM_4x_Avg: %.6f"
% ((epoch + 1), (ii + 1), len(train_data_loader),
psnr_2x_avg.value()[0], ssim_2x_avg.value()[
0],
psnr_4x_avg.value()[0], ssim_4x_avg.value()[0]))
if (ii+1) % self.plot_iter == self.plot_iter-1:
# test_ = deblurnet(torch.cat([y_.detach(), x_edge], 1))
hr_edge = edgenet(hr_)
sr2x_edge = edgenet(sr2x_)
sr4x_edge = edgenet(sr4x_)
vis.images(hr_edge.cpu().data, win='HR edge predict', opts=dict(
title='HR edge predict'))
vis.images(sr2x_edge.cpu().data, win='SR2X edge predict', opts=dict(
title='SR2X edge predict'))
vis.images(sr4x_edge.cpu().data, win='SR4X edge predict', opts=dict(
title='SR4X edge predict'))
sr4x_ = srresnet2x2(sr2x_)
vis.images(lr2x, win='LR2X image',
opts=dict(title='LR2X image'))
vis.images(lr4x, win='LR4X image',
opts=dict(title='LR4X image'))
vis.images(bc2x, win='BC2X image',
opts=dict(title='BC2X image'))
vis.images(bc4x, win='BC4X image',
opts=dict(title='BC4X image'))
vis.images(sr2x_.cpu().data, win='SR2X image',
opts=dict(title='SR2X image'))
vis.images(sr4x_.cpu().data, win='SR4X image',
opts=dict(title='SR4X image'))
vis.images(hr, win='HR image',
opts=dict(title='HR image'))
res = {
"bicubic PSNR": psnr_bicubic_avg.value()[0],
"bicubic SSIM": ssim_bicubic_avg.value()[0],
"srunit4x PSNR": psnr_4x_avg.value()[0],
"srunit4x SSIM": ssim_4x_avg.value()[0],
"2xbicubic PSNR": psnr_2xcubic_avg.value()[0],
"2xbicubic SSIM": ssim_2xcubic_avg.value()[0],
"4xbicubic PSNR": psnr_4xcubic_avg.value()[0],
"4xbicubic SSIM": ssim_4xcubic_avg.value()[0],
"srresnet PSNR": psnr_srresnet_avg.value()[0],
"srresnet SSIM": ssim_srresnet_avg.value()[0],
"srcnn PSNR": psnr_srcnn_avg.value()[0],
"srcnn SSIM": ssim_srcnn_avg.value()[0]
}
vis.metrics(res, "metrics")
if (epoch + 1) % self.save_epochs == 0:
self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
self.save_model(srresnet, os.path.join(self.save_dir, 'checkpoints', 'srresnet'), 'srresnet_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
self.save_model(srcnn, os.path.join(self.save_dir, 'checkpoints', 'srcnn'), 'srcnn_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
# Save final trained model and results
vis.save([self.env])
self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, self.num_epochs))
self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, self.num_epochs))
self.save_model(srcnn, os.path.join(self.save_dir, 'checkpoints', 'srresnet'), 'srresnet_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, self.num_epochs))
self.save_model(srcnn, os.path.join(self.save_dir, 'checkpoints', 'srcnn'), 'srcnn_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, self.num_epochs))
def test(self, sr2x1_path=None, sr2x2_path=None):
test_data_dir = os.path.join(self.data_dir, self.test_dataset)
result_data_dir = os.path.join(self.save_dir, "test_results", "2x2UnitNet_SR_"+self.test_dataset)
if not os.path.exists(result_data_dir):
os.makedirs(result_data_dir)
# judge whether model exists
if not os.path.exists(sr2x1_path):
raise Exception('sr2x1 resnet model not exists')
if not os.path.exists(sr2x2_path):
raise Exception('sr2x2 resnet model not exists')
# load network params
srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x1.load_state_dict(torch.load(sr2x1_path))
srresnet2x2.load_state_dict(torch.load(sr2x2_path))
if USE_GPU:
srresnet2x1.cuda()
srresnet2x2.cuda()
import torchnet as tnt
from tqdm import tqdm
from PIL import Image
psnr_4x_avg = tnt.meter.AverageValueMeter()
ssim_4x_avg = tnt.meter.AverageValueMeter()
srresnet2x1.eval()
srresnet2x2.eval()
# processing test data
iterbar = tqdm(os.listdir(test_data_dir))
for img_name in iterbar:
img = Image.open(os.path.join(test_data_dir, img_name)).convert("RGB")
transform = Transforms.RandomCrop(self.crop_size)
img = transform(img)
w, h = img.size[0], img.size[1]
w_lr4x, h_lr4x = int(
w // self.scale_factor), int(h // self.scale_factor)
w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor
# transform tensor
hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)
lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)
hr_ = Transforms.ToTensor()(hr).unsqueeze(0)
lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)
if USE_GPU:
hr_ = hr_.cuda()
lr4x_ = lr4x_.cuda()
sr4x_ = srresnet2x2(srresnet2x1(lr4x_))
# calculate PSNR & SSIM
psnr_4x_score = batch_compare_filter(
sr4x_.cpu().data, hr_, PSNR)
ssim_4x_score = batch_compare_filter(
sr4x_.cpu().data, hr_, SSIM)
psnr_4x_avg.add(psnr_4x_score)
ssim_4x_avg.add(ssim_4x_score)
# save image
save_img(sr4x_.cpu().data, os.path.join(result_data_dir, img_name))
print("final PSNR score: {}".format(psnr_4x_avg.value()[0]))
print("final SSIM score: {}".format(ssim_4x_avg.value()[0]))
def save_model(self, model, save_dir, model_name, mtype='pkl'):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if mtype == 'pkl':
save_path = os.path.join(save_dir, model_name+'.pkl')
torch.save(model.state_dict(), save_path)
elif mtype == 'pth':
save_path = os.path.join(save_dir, model_name+'.pth')
torch.save(model.state_dict(), save_path)
|
11568160
|
import pytest
from pythondi import inject, Provider, configure_after_clear
class Repo:
def __init__(self):
pass
class SQLRepo:
def __init__(self):
pass
class Usecase:
def __init__(self):
pass
class UserUsecase:
def __init__(self):
pass
def test_sync_inject_without_parameter():
provider = Provider()
provider.bind(Repo, SQLRepo)
configure_after_clear(provider)
@inject()
def func(repo: Repo):
assert isinstance(repo, SQLRepo)
func()
def test_sync_inject_without_parameter_multiple_bind():
provider = Provider()
provider.bind(Repo, SQLRepo)
provider.bind(Usecase, UserUsecase)
configure_after_clear(provider)
@inject()
def func(repo: Repo, usecase: Usecase):
assert isinstance(repo, SQLRepo)
assert isinstance(usecase, UserUsecase)
func()
def test_sync_inject_with_classes_argument():
provider = Provider()
provider.bind(classes={Repo: SQLRepo})
configure_after_clear(provider)
@inject()
def func(repo: Repo):
assert isinstance(repo, SQLRepo)
func()
def test_sync_inject_with_classes_argument_multiple_bind():
provider = Provider()
provider.bind(classes={Repo: SQLRepo, Usecase: UserUsecase})
configure_after_clear(provider)
@inject()
def func(repo: Repo, usecase: Usecase):
assert isinstance(repo, SQLRepo)
assert isinstance(usecase, UserUsecase)
func()
def test_sync_inject_with_parameter():
provider = Provider()
provider.bind(Repo, SQLRepo)
configure_after_clear(provider)
@inject(repo=SQLRepo)
def func(repo):
assert isinstance(repo, SQLRepo)
func()
def test_sync_inject_with_parameter_multiple_bind():
provider = Provider()
configure_after_clear(provider)
@inject(repo=SQLRepo, usecase=UserUsecase)
def func(repo, usecase):
assert isinstance(repo, SQLRepo)
assert isinstance(usecase, UserUsecase)
func()
@pytest.mark.asyncio
async def test_async_inject_without_parameter():
provider = Provider()
provider.bind(Repo, SQLRepo)
configure_after_clear(provider)
@inject()
async def func(repo: Repo):
assert isinstance(repo, SQLRepo)
await func()
@pytest.mark.asyncio
async def test_async_inject_without_parameter_multiple_bind():
provider = Provider()
provider.bind(Repo, SQLRepo)
provider.bind(Usecase, UserUsecase)
configure_after_clear(provider)
@inject()
async def func(repo: Repo, usecase: Usecase):
assert isinstance(repo, SQLRepo)
assert isinstance(usecase, UserUsecase)
await func()
@pytest.mark.asyncio
async def test_async_inject_with_classes_argument():
provider = Provider()
provider.bind(classes={Repo: SQLRepo})
configure_after_clear(provider)
@inject()
async def func(repo: Repo):
assert isinstance(repo, SQLRepo)
await func()
@pytest.mark.asyncio
async def test_async_inject_with_classes_argument_multiple_bind():
provider = Provider()
provider.bind(classes={Repo: SQLRepo, Usecase: UserUsecase})
configure_after_clear(provider)
@inject()
async def func(repo: Repo, usecase: Usecase):
assert isinstance(repo, SQLRepo)
assert isinstance(usecase, UserUsecase)
await func()
@pytest.mark.asyncio
async def test_async_inject_with_parameter():
provider = Provider()
provider.bind(Repo, SQLRepo)
configure_after_clear(provider)
@inject(repo=SQLRepo)
async def func(repo):
assert isinstance(repo, SQLRepo)
await func()
@pytest.mark.asyncio
async def test_async_inject_with_parameter_multiple_bind():
provider = Provider()
configure_after_clear(provider)
@inject(repo=SQLRepo, usecase=UserUsecase)
async def func(repo, usecase):
assert isinstance(repo, SQLRepo)
assert isinstance(usecase, UserUsecase)
await func()
@pytest.mark.asyncio
async def test_manual_provide_args_outside():
provider = Provider()
provider.bind(classes={Repo: SQLRepo})
configure_after_clear(provider)
class MockRepo:
pass
@inject()
async def func(repo: Repo):
return repo
result = await func(repo=MockRepo())
assert isinstance(result, MockRepo)
|
11568191
|
from typing import Optional, List, Dict, Any, Union
import discord
from personate.utils.logger import logger
import random
class UpdateableMessageWrapper:
'''This contains a discord.WebhookMessage or a discord.Message object, and has a method for updating its content, by passing down the kwargs from the call.'''
def __init__(self, message: discord.Message):
self.message = message
async def update(self, **kwargs):
return await self.message.edit(**kwargs)
class Face:
"""
This is maybe one of the simplest classes in the library. It manages webhooks and posts as the webhook with a specific appearance (avatar_url and username).
"""
def __init__(
self,
bot: discord.Bot,
avatar_url: str,
username: str,
loading_message: Optional[Union[List[str], str]] = None,
):
self.bot = bot
self.avatar_url = avatar_url
self.username = username
self.loading_message = loading_message
self.webhooks: Dict[int, discord.Webhook] = {}
logger.debug(
f"Face created with avatar_url: {avatar_url} and username: {username}"
)
async def get_webhook(self, channel_id: int) -> Optional[discord.Webhook]:
channel = self.bot.get_channel(channel_id)
if isinstance(channel, discord.TextChannel):
def predicate(webhook: discord.Webhook):
return webhook.user == self.bot.user
webhooks: List[discord.Webhook] = await channel.webhooks()
webhook = discord.utils.find(lambda m: predicate(m), webhooks)
if not webhook:
if self.bot.user:
try:
webhook = await channel.create_webhook(name=self.bot.user.name)
except discord.HTTPException:
pass
if webhook:
return webhook
logger.debug(
f"Was unable to find a webhook for channel: {channel_id}. This might be because the bot lacks the relevant permissions (Manage Webhooks), or for some other bizarre reason."
)
return None
async def send_custom(
self,
channel_id: int,
content: str,
avatar_url: str,
username: str,
**kwargs,
) -> UpdateableMessageWrapper:
"""Flexible method that handles different cases."""
channel = self.bot.get_channel(channel_id)
if not isinstance(channel, discord.TextChannel) or not channel:
raise ValueError(
f"Channel: {channel_id} is not a valid text channel. Please provide a valid text channel id."
)
webhook = await self.get_webhook(channel.id)
logger.debug(f"Sent message to channel: {channel.id} with content: {content}")
if webhook:
message = await webhook.send(
content=content,
avatar_url=avatar_url,
username=username,
wait=True,
**kwargs,
)
else:
message = await channel.send(content, **kwargs)
return UpdateableMessageWrapper(message)
async def send_loading(self, channel_id: int) -> UpdateableMessageWrapper:
if not self.loading_message:
self.loading_message = "...thinking..."
if isinstance(self.loading_message, str):
loading_message = self.loading_message
else:
loading_message = random.choice(self.loading_message)
logger.debug(
f"Sent loading message to channel: {int} with content: {loading_message}"
)
return await self.send_custom(
channel_id, loading_message, self.avatar_url, self.username
)
async def send(self, channel_id: int, content: str, **kwargs) -> UpdateableMessageWrapper:
logger.debug(f"Sent message to channel: {channel_id} with content: {content}")
return await self.send_custom(
channel_id, content, self.avatar_url, self.username, **kwargs
)
|
11568208
|
XK_kra = 0x3a2
XK_kappa = 0x3a2
XK_Rcedilla = 0x3a3
XK_Itilde = 0x3a5
XK_Lcedilla = 0x3a6
XK_Emacron = 0x3aa
XK_Gcedilla = 0x3ab
XK_Tslash = 0x3ac
XK_rcedilla = 0x3b3
XK_itilde = 0x3b5
XK_lcedilla = 0x3b6
XK_emacron = 0x3ba
XK_gcedilla = 0x3bb
XK_tslash = 0x3bc
XK_ENG = 0x3bd
XK_eng = 0x3bf
XK_Amacron = 0x3c0
XK_Iogonek = 0x3c7
XK_Eabovedot = 0x3cc
XK_Imacron = 0x3cf
XK_Ncedilla = 0x3d1
XK_Omacron = 0x3d2
XK_Kcedilla = 0x3d3
XK_Uogonek = 0x3d9
XK_Utilde = 0x3dd
XK_Umacron = 0x3de
XK_amacron = 0x3e0
XK_iogonek = 0x3e7
XK_eabovedot = 0x3ec
XK_imacron = 0x3ef
XK_ncedilla = 0x3f1
XK_omacron = 0x3f2
XK_kcedilla = 0x3f3
XK_uogonek = 0x3f9
XK_utilde = 0x3fd
XK_umacron = 0x3fe
|
11568224
|
from typing import Tuple
import tensorflow as tf
def get_inference_function(model: tf.keras.Model, input_shape: Tuple[int, int]):
"""Return convertible inference function with provided model."""
def inference_func(inputs):
return model(inputs, training=False)
tensor_spec = tf.TensorSpec(shape=(1, *input_shape, 3), dtype=tf.float32)
return tf.function(func=inference_func, input_signature=[tensor_spec])
|
11568252
|
import argparse
import logging
import sys
import os
parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.sys.path.insert(0, parentdir)
from builders import builder
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Controller for the fuzzing framework.')
subparsers = parser.add_subparsers(help='sub-command help.', dest="command")
subparsers.required = True
minimize_parser = subparsers.add_parser("minimize", help="Minimize seeds for a binary")
minimize_parser.add_argument("-p", "--package", required=True, type=str,
help="The package to be examined. Must be a pacman package.")
minimize_parser.add_argument("-Q", dest="qemu", action="store_true", default=False,
help="Activate qemu mode when inferring file types.")
minimize_parser.add_argument("-param", "--parameter", required=False, type=str,
help="The parameter to the json file. Use = to pass hyphens(-)",
default=None) # Must exists in docker
minimize_parser.add_argument("-s", "--seeds", required=True, type=str, help="Which seeds do we need?")
minimize_parser.add_argument("-b", "--binary", required=False, type=str, help="Path to the binary to fuzz.")
minimize_parser.add_argument("-v", "--output_volume", required=True, help="In which should the files be stored?")
minimize_parser.add_argument("-n", "--name", required=False, help="The name of the docker container", default=None)
minimize_parser.add_argument("-afile", "--afl_out_file", type=str, required=True,
help="Where should the afl configuration be stored?")
minimize_parser.add_argument("-tmintotal", "--tmin_total_time", type=int, required=False, default=None,
help="For how long should tmin run in total at max?")
minimize_parser.set_defaults(
which='minimize') # https://stackoverflow.com/questions/8250010/argparse-identify-which-subparser-was-used
fuzz_parser = subparsers.add_parser("fuzz", help="Fuzz the binary")
fuzz_parser.add_argument("-p", "--package", required=True, type=str,
help="The package to be examined. Must be a pacman package.")
fuzz_parser.add_argument("-t", "--timeout", required=False, type=float, help="The timeout for afl",
default=None) # Default timeout: 2 hours
fuzz_parser.add_argument("-Q", dest="qemu", action="store_true", default=False,
help="Activate qemu mode when inferring file types.")
fuzz_parser.add_argument("-param", "--parameter", required=False, type=str,
help="The parameter to the json file. Use = to pass hyphens(-)",
default=None) # Must exists in docker
fuzz_parser.add_argument("-s", "--seeds", required=False, type=str, help="Which seeds do we need?")
fuzz_parser.add_argument("-b", "--binary", required=False, type=str, help="Path to the binary to fuzz.")
fuzz_parser.add_argument("-v", "--output_volume", required=True, help="In which should the files be stored?")
fuzz_parser.add_argument("-n", "--name", required=False, help="The name of the docker container", default=None)
group = fuzz_parser.add_mutually_exclusive_group(required=True)
group.add_argument("-adir", "--afl_resume_dir", help="Resume from afl out dir.")
group.add_argument("-afile", "--afl_out_file", type=str,
help="Start over. Where should the afl configuration be stored/read from?")
fuzz_parser.set_defaults(
which='fuzz') # https://stackoverflow.com/questions/8250010/argparse-identify-which-subparser-was-used
eval_parser = subparsers.add_parser("evalfuzz", help="Evaluate the fuzzing for the binary")
eval_parser.add_argument("-p", "--package", required=False, type=str,
help="The package to be examined. Must be a pacman package.")
eval_parser.add_argument("-ft", "--fuzzer_timeout", required=False, type=float,
help="The timeout for afl (the whole fuzzer process)",
default=None) # Default timeout: None ( take the one from config)
eval_parser.add_argument("-t", "--timeout", required=False, type=float,
help="The timeout for afl (per run)",
default=None) # Default timeout: None ( take the one from config)
eval_parser.add_argument("-Q", dest="qemu", action="store_true", default=False,
help="Activate qemu mode when inferring file types.")
eval_parser.add_argument("-param", "--parameter", required=False, type=str,
help="The parameter to the json file. Use = to pass hyphens(-)",
default=None) # Must exists in docker
eval_parser.add_argument("-s", "--seeds", required=True, type=str, help="Which seeds do we need?")
eval_parser.add_argument("-b", "--binary", required=True, type=str, help="Path to the binary to fuzz.")
eval_parser.add_argument("-v", "--output_volume", required=True, help="In which should the files be stored?")
eval_parser.add_argument("-n", "--name", required=False, help="The name of the docker container", default=None)
group = eval_parser.add_mutually_exclusive_group(required=True)
group.add_argument("-afile", "--afl_out_file", type=str,
help="Start over. Where should the afl configuration be stored?")
eval_parser.set_defaults(which="evalfuzz")
args = parser.parse_args()
logfilename = os.path.join(args.output_volume, args.package + ".log")
logging.basicConfig(filename=logfilename, level=logging.INFO, format='%(levelname)s %(asctime)s: %(message)s',
filemode='a')
parameter = None
if args.parameter:
if args.parameter[0] == '"' and args.parameter[-1] == '"': # Parameter is enclosed by " ", we don't want that
parameter = args.parameter[1:-1]
else:
parameter = args.parameter
if args.which == "minimize" or args.which == "fuzz" or args.which == "eval":
use_qemu = args.qemu
b = builder.Builder(package=args.package, qemu=args.qemu)
if not b.install():
print("Could not install package, exiting")
sys.exit(-1)
use_qemu = b.qemu
if args.which == "minimize":
import minimzer
minimzer.minize(parameter=parameter, seeds_dir=args.seeds, binary_path=args.binary, package=args.package,
volume_path=args.output_volume, afl_config_file_name=args.afl_out_file,
qemu=use_qemu, name=args.name, tmin_total_time=args.tmin_total_time)
if args.which == "fuzz":
use_qemu = args.qemu
if args.afl_out_file:
import fuzzer_wrapper
res = fuzzer_wrapper.prepare_and_start_fuzzer(parameter=parameter, seeds_dir=args.seeds,
binary_path=args.binary, package=args.package,
volume_path=args.output_volume,
afl_config_file_name=args.afl_out_file, qemu=use_qemu,
name=args.name, timeout=args.timeout)
if not res:
sys.exit(-1)
else:
import fuzzer_wrapper
res = fuzzer_wrapper.resume_fuzzer(afl_dir=args.afl_resume_dir, binary_path=args.binary,
parameter=parameter, qemu=use_qemu, timeout=args.timeout)
if not res:
sys.exit(-1)
if args.which == "evalfuzz":
use_qemu = args.qemu
from evalscripts import eval_fuzzing
eval_fuzzing.eval_fuzzing(parameter=parameter, seeds=args.seeds, binary=args.binary, package=args.package,
output_volume=args.output_volume, afl_out_file=args.afl_out_file, qemu=use_qemu,
name=args.name, timeout=args.timeout, fuzzer_timeout=args.fuzzer_timeout)
|
11568271
|
import sqlite3
import os
from ... import ErsiliaBase
SLUGDB_FILE = ".slug.db"
class SlugDb(ErsiliaBase):
def __init__(self, config_json=None):
ErsiliaBase.__init__(self, config_json=config_json)
self.file_path = os.path.join(self.eos_dir, SLUGDB_FILE)
self._table = "slugs"
self.create_table()
def _connect(self):
return sqlite3.connect(self.file_path)
def create_table(self):
if self._table is None:
return
sql = """
CREATE TABLE IF NOT EXISTS {0} (
model_id text,
slug text,
PRIMARY KEY (model_id, slug)
);
""".format(
self._table
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
def insert(self, model_id, slug):
if self._table is None:
return
sql = """
INSERT OR IGNORE INTO {0} (model_id, slug) VALUES ('{1}', '{2}')
""".format(
self._table, model_id, slug
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
def delete_by_model_id(self, model_id):
if self._table is None:
return
sql = """
DELETE FROM {0}
WHERE model_id = '{1}'
""".format(
self._table, model_id
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
def delete_by_slug(self, slug):
if self._table is None:
return
sql = """
DELETE FROM {0}
WHERE slug = '{1}'
""".format(
self._table, slug
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
def delete(self, model_id, slug):
if self._table is None:
return
sql = """
DELETE FROM {0}
WHERE model_id = '{1}' AND slug = '{2}'
""".format(
self._table, model_id, slug
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
def models_of_slug(self, slug):
sql = """
SELECT model_id FROM {0}
WHERE slug = '{1}'
""".format(
self._table, slug
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
res = {x[0] for x in c.fetchall()}
conn.close()
return res
def slugs_of_model(self, model_id):
sql = """
SELECT slug FROM {0}
WHERE model_id = '{1}'
""".format(
self._table, model_id
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
res = {x[0] for x in c.fetchall()}
conn.close()
return res
def clean(self):
if self._table is None:
return
sql = """
DELETE FROM {0}
""".format(
self._table
)
conn = self._connect()
c = conn.cursor()
c.execute(sql)
conn.commit()
conn.close()
|
11568343
|
import pykd
import re
from common.v_0_1_1.common_utils import *
pyLog = PyLog(r'C:\local\tmp\TTT-taskhost-get-addUrl.txt')
util = Util(pyLog)
pyLog.log2Scr('='*10 + ' Start ' + '='*10)
util.runCmd(r'bc *;g')
#util.runCmd(r'bp rpcrt4!Invoke')
#util.runCmd(r'bp wininet!s_UrlCacheAddUrl')
util.runCmd(r'bp wininet!CCacheContainer::AddUrl')
util.runCmd(r'bp wininet!WxVerifySameDirectory')
#util.runCmd(r'bp esent!ErrorIOMgrIssueIO')
util.runCmd(r'bd *;g-;be *')
util.runCmdLog(r'bl', False)
'''
while True:
ret = util.runCmd(r'g')
if Util.ttt_test2end(ret):
pyLog.log2Scr('='*10 + ' End ' + '='*10)
break
util.runCmd(r'.time')
ret = util.runCmd(r'kP3')
pyLog.log(ret)
pyLog.flush()
'''
#bpList = [r'bp rpcrt4!Invoke', 'bp wininet!s_UrlCacheAddUrl', r'bp wininet!CCacheContainer::AddUrl', r'bp esent!ErrorIOMgrIssueIO']
bpList = [r'bp wininet!CCacheContainer::AddUrl']
for bpStr in bpList:
util.runCmd(r'bc *;g;g-')
util.runCmd(bpStr)
util.runCmdLog(r'bl', False)
while True:
ret = util.runCmd(r'g')
if Util.ttt_test2end(ret):
pyLog.log('='*10 + ' End ' + '='*10)
break
util.runCmd(r'.time')
util.runCmdLog(r'kL3', False)
util.runCmd(r'bd *;pt')
ret = util.runCmd(r'r')
pyLog.log(ret)
pyLog.flush()
util.runCmd(r'be *')
'''
2 Time Travel Position: 258A40000268
41 Time Travel Position: 258A4000029D
80 Time Travel Position: 258A40000740
86 Time Travel Position: 25954000008F
47 Time Travel Position: 25954000009D
8 Time Travel Position: 2595400000A3
22 Time Travel Position: 73B880000268
60 Time Travel Position: 73B88000029D
99 Time Travel Position: 73B88000054E
105 Time Travel Position: 73F34000008F
66 Time Travel Position: 73F34000009D
28 Time Travel Position: 73F3400000A3
'''
pyLog.close()
|
11568361
|
import numpy as np
# Function to retrun the list of isotope index for an input element
def get_list_iso_index(specie, inst):
specie_list = []
for i_gl in range(0,len(inst.history.isotopes)):
if (specie+'-') in inst.history.isotopes[i_gl]:
specie_list.append(i_gl)
return specie_list
# List of charge numbers (NEED to be as in the yields table)
Z_charge = []
for i in range(1,84):
# Exclude elements not considered in NuGrid yields
if (not i == 43) and (not i == 61):
Z_charge.append(i)
# List of elements (NuGrid yields)
elements = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', \
'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', \
'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', \
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', \
'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Sm', \
'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', \
'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi']
nb_elements = len(elements)
# List of atomic weigth to plot the mass fractions
atom_weigth = [1.0079, 4.0026, 6.941, 9.0122, 10.81, 12.011, 14.007, 15.999, 18.998, 20.180, \
22.990, 24.305, 26.982, 28.086, 30.974, 32.065, 35.453, 39.453, 39.098, \
40.078, 44.956, 47.867, 50.942, 51.996, 54.938, 55.845, 58.933, 58.693, \
63.546, 65.390, 69.723, 72.610, 74.922, 78.96, 79.904, 83.80, 85.468, \
87.62, 88.906, 91.224, 92.906, 95.94, 98.0, 101.07, 102.91, 106.42, 107.87, \
112.41, 114.82, 118.71, 121.76, 127.60, 126.90, 131.29, 132.91, 137.33, \
138.91, 140.12, 140.91, 144.24, 145.0, 150.36, 151.96, 157.25, 158.93, 162.50, \
164.93, 167.26, 168.93, 173.04, 174.97, 178.49, 180.95, 183.84, 186.21, \
190.23, 192.22, 195.08, 196.97, 200.59, 204.38, 207.2, 208.98]
# Read solar abundances (number densities)
solar_Z = []
solar_ab = []
solar_ab_path = 'Lodders_et_al_2009.txt'
with open("Lodders_et_al_2009.txt", 'r') as f:
not_finished = True
for line in f:
split_line = [str(x) for x in line.split()]
if not_finished:
solar_Z.append(int(split_line[0]))
solar_ab.append(10**(float(split_line[2])-12))
if split_line[1] == 'Bi':
not_finished = False
f.close()
# Convert number of atoms into masses
for i in range(len(solar_ab)):
solar_ab[i] *= atom_weigth[i]
# Normalize to 1.0
norm = 1.0/sum(solar_ab)
for i in range(len(solar_ab)):
solar_ab[i] *= norm
# Function to extract the contribution of individual sources
def get_individual_sources(inst, i_step_sol):
# Declare the abundances arrays
m_el_all = np.zeros(nb_elements)
m_el_agb = np.zeros(nb_elements)
m_el_massive = np.zeros(nb_elements)
m_el_sn1a = np.zeros(nb_elements)
m_el_nsm = np.zeros(nb_elements)
# Get the mass distrubution of individual sources
for i_el in range(0,nb_elements):
specie_list = get_list_iso_index(elements[i_el], inst)
for i_iso in range(0,len(specie_list)):
m_el_all[i_el] += inst.ymgal[i_step_sol][specie_list[i_iso]]
m_el_agb[i_el] += inst.ymgal_agb[i_step_sol][specie_list[i_iso]]
m_el_massive[i_el] += inst.ymgal_massive[i_step_sol][specie_list[i_iso]]
m_el_sn1a[i_el] += inst.ymgal_1a[i_step_sol][specie_list[i_iso]]
m_el_nsm[i_el] += inst.ymgal_nsm[i_step_sol][specie_list[i_iso]]
# Normalize each sources
norm_all_for_all = 1.0 / sum(m_el_all)
for i_el in range(0,nb_elements):
m_el_all[i_el] *= norm_all_for_all
m_el_agb[i_el] *= norm_all_for_all
m_el_massive[i_el] *= norm_all_for_all
m_el_sn1a[i_el] *= norm_all_for_all
m_el_nsm[i_el] *= norm_all_for_all
# Return abundances patterns
return m_el_all, m_el_agb, m_el_massive, m_el_sn1a, m_el_nsm
# Get the position of the element labels
yy = np.zeros(nb_elements)
for i in range(0,nb_elements):
yy[i] = 10**(np.log10(solar_ab[i])+1.0)
# Milky Way OMEGA+ parameters
#kwargs = {"special_timesteps":150, "t_star":1.0, "mgal":1.0,
# "m_DM_0":1.0e12, "sfe":3.0e-10, "mass_loading":0.5,
# "imf_yields_range":[1,30],
# "table":'yield_tables/agb_and_massive_stars_K10_K06_0.5HNe.txt',
# "exp_infall":[[100/2.2, 0.0, 0.68e9], [13.0/2.2, 1.0e9, 7.0e9]],
# "nsmerger_table":'yield_tables/r_process_arnould_2007.txt',
# "ns_merger_on":True, "nsm_dtd_power":[1e7, 10e9, -1]}
# Common parameters to all yields table
kwargs = dict()
kwargs["special_timesteps"] = 150
kwargs["t_star"] = 1.0
kwargs["mgal"] = 1.0
kwargs["m_DM_0"] = 1.0e12
kwargs["nsmerger_table"] = 'yield_tables/r_process_arnould_2007.txt'
kwargs["ns_merger_on"] = True
kwargs["nsm_dtd_power"] = [1e7, 10e9, -1]
kwargs["m_ej_nsm"] = 2.5e-2
# C15 LC18
kwargs["exp_infall"] = [[50.0, 0.0, 0.68e9], [7.0, 1.0e9, 7.0e9]]
kwargs["imf_yields_range"] = [1.0, 100.0]
kwargs["sfe"] = 2.2e-10
kwargs["mass_loading"] = 0.9
kwargs["transitionmass"] = 8.0
kwargs["Z_trans"] = -1
kwargs["table"]= "yield_tables/agb_and_massive_stars_C15_LC18_R_mix.txt"
# Timestep index where the Sun should aproximately form.
# The index is only valid with "special_timesteps=120".
# Do not modify.
i_t_Sun = 143
|
11568365
|
import logging
# Config for logger
class Config:
base_url = "wss://api.bale.ai/v1/bots/"
request_timeout = 5
use_graylog = False
graylog_host = "127.0.01"
graylog_port = 12201
log_level = logging.DEBUG # DEBUG | INFO | ERROR | WARNING | CRITICAL
log_facility_name = "python_bale_bot"
source = "bot_name"
|
11568373
|
import re
import musicbrainzngs
from .base import BaseScraper
musicbrainzngs.set_useragent("salmon", "1.0", "<EMAIL>")
class MusicBrainzBase(BaseScraper):
url = site_url = "https://musicbrainz.org"
release_format = "/release/{rls_id}"
regex = re.compile("^https?://(?:www\.)?musicbrainz.org/release/([a-z0-9\-]+)$")
async def create_soup(self, url):
rls_id = re.search(r"/release/([a-f0-9\-]+)$", url)[1]
return musicbrainzngs.get_release_by_id(
rls_id,
[
"artists",
"labels",
"recordings",
"release-groups",
"media",
"artist-credits",
"artist-rels",
"recording-level-rels",
],
)["release"]
|
11568392
|
from football_packing.packing import packing
from football_packing.plot_packing import plot_packing
|
11568398
|
from ibis.expr.types import TableExpr
import pytest
from sql_to_ibis import query
from sql_to_ibis.tests.utils import (
assert_ibis_equal_show_diff,
assert_state_not_change,
join_params,
resolved_columns,
)
@assert_state_not_change
def test_distinct(forest_fires):
"""
Test use of the distinct keyword
:return:
"""
my_table = query("select distinct area, rain from forest_fires")
ibis_table = forest_fires[["area", "rain"]].distinct()
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_columns_maintain_order_chosen(forest_fires):
my_table = query("select area, rain from forest_fires")
ibis_table = forest_fires[["area", "rain"]]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_subquery(forest_fires):
"""
Test ability to perform subqueries
:return:
"""
my_table = query("select * from (select area, rain from forest_fires) rain_area")
ibis_table = forest_fires[["area", "rain"]]
assert_ibis_equal_show_diff(ibis_table, my_table)
@join_params
@assert_state_not_change
def test_joins(
digimon_move_mon_join_columns,
sql_join: str,
ibis_join: str,
digimon_move_list,
digimon_mon_list,
):
my_table = query(
f"select * from digimon_mon_list {sql_join} join "
"digimon_move_list on "
"digimon_mon_list.attribute = digimon_move_list.attribute"
)
ibis_table = digimon_mon_list.join(
digimon_move_list,
predicates=digimon_mon_list.Attribute == digimon_move_list.Attribute,
how=ibis_join,
)[digimon_move_mon_join_columns]
assert_ibis_equal_show_diff(ibis_table, my_table)
@join_params
@assert_state_not_change
def test_join_specify_selection(
sql_join: str, ibis_join: str, digimon_move_list, digimon_mon_list
):
"""
Test join
:return:
"""
my_table = query(
f"""select power from digimon_mon_list {sql_join} join
digimon_move_list
on digimon_mon_list.attribute = digimon_move_list.attribute"""
)
ibis_table = digimon_mon_list.join(
digimon_move_list,
predicates=digimon_mon_list.Attribute == digimon_move_list.Attribute,
how=ibis_join,
)[digimon_move_list.Power.name("power")]
assert_ibis_equal_show_diff(ibis_table, my_table)
@join_params
@assert_state_not_change
def test_join_wo_specifying_table(
digimon_move_mon_join_columns,
sql_join: str,
ibis_join: str,
digimon_move_list,
digimon_mon_list,
):
"""
Test join where table isn't specified in join
:return:
"""
my_table = query(
f"""
select * from digimon_mon_list {sql_join} join
digimon_move_list
on mon_attribute = move_attribute
"""
)
ibis_table = digimon_mon_list.join(
digimon_move_list,
predicates=digimon_mon_list.mon_attribute == digimon_move_list.move_attribute,
how=ibis_join,
)[digimon_move_mon_join_columns]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_cross_joins(
digimon_move_mon_join_columns, digimon_move_list, digimon_mon_list
):
"""
Test right, left, inner, and outer joins
:return:
"""
my_table = query(
"""select * from digimon_mon_list cross join
digimon_move_list"""
)
ibis_table = digimon_mon_list.cross_join(digimon_move_list)[
digimon_move_mon_join_columns
]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_cross_join_with_selection(digimon_move_list, digimon_mon_list):
"""
Test right, left, inner, and outer joins
:return:
"""
my_table = query(
"""select power from digimon_mon_list cross join
digimon_move_list"""
)
ibis_table = digimon_mon_list.cross_join(digimon_move_list)[
digimon_move_list.Power.name("power")
]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_nested_subquery(forest_fires):
"""
Test nested subqueries
:return:
"""
my_table = query(
"""select * from
(select wind, rh from
(select * from forest_fires) fires) wind_rh"""
)
ibis_table = forest_fires[["wind", "RH"]].relabel({"RH": "rh"})
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_union(forest_fires):
"""
Test union in queries
:return:
"""
my_table = query(
"""
select * from forest_fires order by wind desc limit 5
union
select * from forest_fires order by wind asc limit 5
"""
)
ibis_table1 = forest_fires.sort_by(("wind", False)).head(5)
ibis_table2 = forest_fires.sort_by(("wind", True)).head(5)
ibis_table = ibis_table1.union(ibis_table2, distinct=True)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_union_distinct(forest_fires):
"""
Test union distinct in queries
:return:
"""
my_table = query(
"""
select * from forest_fires order by wind desc limit 5
union distinct
select * from forest_fires order by wind asc limit 5
"""
)
ibis_table1 = forest_fires.sort_by(("wind", False)).head(5)
ibis_table2 = forest_fires.sort_by(("wind", True)).head(5)
ibis_table = ibis_table1.union(ibis_table2, distinct=True)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_union_all(forest_fires):
"""
Test union distinct in queries
:return:
"""
my_table = query(
"""
select * from forest_fires order by wind desc limit 5
union all
select * from forest_fires order by wind asc limit 5
"""
)
ibis_table1 = forest_fires.sort_by(("wind", False)).head(5)
ibis_table2 = forest_fires.sort_by(("wind", True)).head(5)
ibis_table = ibis_table1.union(ibis_table2)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
@pytest.mark.parametrize("set_op", ["intersect", "intersect distinct"])
def test_intersect_distinct(forest_fires, set_op: str):
"""
Test intersect in queries
:return:
"""
my_table = query(
f"""
select * from forest_fires order by wind desc limit 5
{set_op}
select * from forest_fires order by wind asc limit 3
"""
)
ibis_table1 = forest_fires.sort_by(("wind", False)).head(5)
ibis_table2 = forest_fires.sort_by(("wind", True)).head(3)
ibis_table = ibis_table1.intersect(ibis_table2)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_except_distinct(forest_fires):
"""
Test except distinct in queries
:return:
"""
my_table = query(
"""
select * from forest_fires order by wind desc limit 5
except distinct
select * from forest_fires order by wind asc limit 3
"""
)
ibis_table1 = forest_fires.sort_by(("wind", False)).head(5)
ibis_table2 = forest_fires.sort_by(("wind", True)).head(3)
ibis_table = ibis_table1.difference(ibis_table2).distinct()
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_except_all(forest_fires):
"""
Test except distinct in queries
:return:
"""
my_table = query(
"""
select * from forest_fires order by wind desc limit 5
except all
select * from forest_fires order by wind asc limit 3
"""
)
ibis_table1 = forest_fires.sort_by(("wind", False)).head(5)
ibis_table2 = forest_fires.sort_by(("wind", True)).head(3)
ibis_table = ibis_table1.difference(ibis_table2)
assert_ibis_equal_show_diff(ibis_table, my_table)
@pytest.mark.xfail(reason="This needs to be solved", raises=AssertionError)
@pytest.mark.parametrize(
"sql",
[
"""
select * from
(
(select X, Y, rain from forest_fires) table1
join
(select X, Y, rain from forest_fires) table2
on table1.x = table2.x) sub
""",
"""
select * from
(select X, Y, rain from forest_fires) table1
join
(select X, Y, rain from forest_fires) table2
on table1.x = table2.x
""",
],
)
@assert_state_not_change
def test_joining_two_subqueries_with_overlapping_columns_same_table(sql, forest_fires):
my_table = query(sql)
columns = ["X", "Y", "rain"]
def get_select_rename_columns(alias: str):
my_columns = forest_fires.get_columns(columns)
renamed_columns = []
for i, column in enumerate(my_columns):
renamed_columns.append(column.name(f"{alias}.{columns[i]}"))
return my_columns, renamed_columns
select1, renamed1 = get_select_rename_columns("table1")
select2, renamed2 = get_select_rename_columns("table2")
subquery1 = forest_fires[select1]
subquery2 = forest_fires[select2]
joined = subquery1.join(
subquery2, predicates=subquery1.X == subquery2.X
).projection(renamed1 + renamed2)
assert_ibis_equal_show_diff(joined, my_table)
@pytest.mark.parametrize(
"sql",
[
"""
select * from
((select type, attribute, power from digimon_move_list) table1
join
(select type, attribute, digimon from
digimon_mon_list) table2
on table1.type = table2.type) sub
""",
"""
select * from
((select type, attribute, power from digimon_move_list) table1
join
(select type, attribute, digimon from digimon_mon_list) table2
on table1.type = table2.type) sub
""",
],
)
@assert_state_not_change
def test_joining_two_subqueries_with_overlapping_columns_different_tables(
sql, digimon_mon_list, digimon_move_list
):
my_table = query(sql)
subquery1 = digimon_move_list[
[
digimon_move_list.Type.name("type"),
digimon_move_list.Attribute.name("attribute"),
digimon_move_list.Power.name("power"),
]
]
subquery2 = digimon_mon_list[
[
digimon_mon_list.Type.name("type"),
digimon_mon_list.Attribute.name("attribute"),
digimon_mon_list.Digimon.name("digimon"),
]
]
ibis_table = subquery1.join(
subquery2, predicates=subquery1.type == subquery2.type
).projection(
[
subquery1.type.name("table1.type"),
subquery1.attribute.name("table1.attribute"),
subquery1.power.name("power"),
subquery2.type.name("table2.type"),
subquery2.attribute.name("table2.attribute"),
subquery2.digimon,
]
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_multi_column_joins(time_data):
my_table = query(
"""
SELECT
table1.team,
table1.start_time_count,
table2.start_time_count_d
FROM
(SELECT
team,
count(start_time)
AS start_time_count
FROM
time_data
GROUP BY team) table1
INNER JOIN
(SELECT team, count(start_time) AS start_time_count_d FROM
(SELECT distinct team, start_time FROM time_data) intermediate GROUP BY team
) table2
ON
table1.team = table2.team AND
table1.start_time_count = table2.start_time_count_d
"""
)
table1 = time_data.group_by(time_data.team).aggregate(
[time_data.start_time.count().name("start_time_count")]
)
intermediate = time_data.projection(
[time_data.team, time_data.start_time]
).distinct()
table2 = intermediate.group_by(intermediate.team).aggregate(
[intermediate.start_time.count().name("start_time_count_d")]
)
ibis_table = table1.join(
table2,
predicates=(
(table1.team == table2.team)
& (table1.start_time_count == table2.start_time_count_d)
),
how="inner",
).projection([table1.team, table1.start_time_count, table2.start_time_count_d])
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_column_values_in_subquery(digimon_move_list):
my_table = query(
"""
select move, type, power from
digimon_move_list
where
power in
( select max(power) as power
from digimon_move_list
group by type ) t1
"""
)
subquery = (
digimon_move_list.groupby(digimon_move_list.get_column("Type").name("type"))
.aggregate(digimon_move_list.Power.max().name("power"))
.drop(["type"])
)
ibis_table = digimon_move_list.filter(
digimon_move_list.Power.isin(subquery.get_column("power"))
).projection(
[
digimon_move_list.Move.name("move"),
digimon_move_list.Type.name("type"),
digimon_move_list.Power.name("power"),
]
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_column_values_in_other_table(digimon_move_list, digimon_mon_list):
my_table = query(
"""
select power from
digimon_move_list
where
type in
( select type
from digimon_mon_list ) t1
"""
)
digimon_mon_list_lower_name = digimon_mon_list.projection(
[digimon_mon_list.Type.name("type")]
)
ibis_table = digimon_move_list.filter(
digimon_move_list.Type.isin(digimon_mon_list_lower_name.type)
).projection(
[
digimon_move_list.Power.name("power"),
]
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_limit(forest_fires):
"""
Test limit clause
:return:
"""
my_table = query("""select * from forest_fires limit 10""")
ibis_table = forest_fires.head(10)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_join_with_overlapping_column_names(digimon_mon_list, digimon_move_list):
query_text = """
SELECT mon_list.attribute as attribute
FROM digimon_mon_list as mon_list
inner join digimon_move_list as move_list
on move_list.attribute=mon_list.attribute
where memory < 70
"""
my_table = query(query_text)
renamed_columns = resolved_columns(
digimon_mon_list, digimon_move_list, "mon_list", "move_list"
)
joined_tables = digimon_mon_list.join(
digimon_move_list,
predicates=digimon_move_list.Attribute == digimon_mon_list.Attribute,
how="inner",
)[renamed_columns]
filtered = joined_tables.filter(digimon_mon_list.Memory < 70)
ibis_table = filtered[[digimon_mon_list.Attribute.name("attribute")]]
assert_ibis_equal_show_diff(ibis_table, my_table)
def get_columns(table: TableExpr):
return table.get_columns(table.columns)
@join_params
@assert_state_not_change
def test_join_more_than_2_tables(
multitable_join_main_table,
multitable_join_lookup_table,
multitable_join_relationship_table,
multitable_join_promotion_table,
sql_join: str,
ibis_join: str,
):
query_text = f"""
SELECT multi_main.id
FROM multi_main
{sql_join} join multi_lookup
on multi_main.lookup_id = multi_lookup.id
{sql_join} join multi_relationship
on multi_main.relationship_id = multi_relationship.id
{sql_join} join multi_promotion
on multi_main.promotion_id = multi_promotion.id
"""
my_table = query(query_text)
join_type = ibis_join
ibis_table = (
multitable_join_main_table.join(
multitable_join_lookup_table,
predicates=multitable_join_main_table.lookup_id
== multitable_join_lookup_table.id,
how=join_type,
)
.join(
multitable_join_relationship_table,
predicates=multitable_join_main_table.relationship_id
== multitable_join_relationship_table.id,
how=join_type,
)
.join(
multitable_join_promotion_table,
predicates=multitable_join_main_table.promotion_id
== multitable_join_promotion_table.id,
how=join_type,
)
.projection([multitable_join_main_table.id])
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
@pytest.mark.skip("Need to implement this")
def test_cross_join_more_than_2_tables(
multitable_join_main_table,
multitable_join_lookup_table,
multitable_join_relationship_table,
multitable_join_promotion_table,
):
query_text = """
SELECT multi_main.id
FROM multi_main
cross join multi_lookup
cross join multi_relationship
cross join multi_promotion
"""
my_table = query(query_text)
ibis_table = (
multitable_join_main_table.cross_join(
multitable_join_lookup_table,
)
.cross_join(
multitable_join_relationship_table,
)
.cross_join(
multitable_join_promotion_table,
)
.projection([multitable_join_main_table.id])
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
@join_params
def test_join_without_overlapping_columns(
multitable_join_main_table,
multitable_join_promotion_table_no_overlap,
sql_join: str,
ibis_join: str,
):
my_table = query(
f"""
select id, promotion from multi_main {sql_join} join
multi_promotion_no_overlap
on id = other_id
"""
)
join_type = ibis_join
ibis_table = multitable_join_main_table.join(
multitable_join_promotion_table_no_overlap,
predicates=multitable_join_main_table.id
== multitable_join_promotion_table_no_overlap.other_id,
how=join_type,
)[
multitable_join_main_table.id,
multitable_join_promotion_table_no_overlap.promotion,
]
assert_ibis_equal_show_diff(ibis_table, my_table)
# @assert_state_not_change
# def test_join_with_alias():
# query_text = """
# SELECT mon_list.attribute as attribute
# FROM digimon_mon_list
# left join digimon_move_list
# on move_list.attribute=mon_list.attribute
# left join mon_list
# on mon_list.number=move_list.sp_cost
# """
# my_table = query(query_text)
|
11568450
|
import os
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module
class FolderDataset(BaseDataset):
extension_names = ['.jpg', '.png', '.bmp', '.jpeg']
def __init__(self, *args, **kwargs):
super(FolderDataset, self).__init__(*args, **kwargs)
@staticmethod
def parse_filename(text):
return text.split('_')[-1]
def get_name_list(self):
for item in os.listdir(self.root):
file_name, file_extension = os.path.splitext(item)
if file_extension in self.extension_names:
label = self.parse_filename(file_name)
if self.filter(label):
continue
else:
self.img_names.append(os.path.join(self.root, item))
self.gt_texts.append(label)
self.samples = len(self.gt_texts)
|
11568474
|
import os
import sys
import shutil
import json
import time
import random
import datetime
import argparse
import torch
import numpy as np
import tensorboardX
import utils
from data import Tree, PartGraphShapesDataset
import trainer as trainer_def
import model_gen as gen_model_def
import model_dis as dis_model_def
### parameter setup
parser = argparse.ArgumentParser()
parser.add_argument('--category', type=str, help='data category')
parser.add_argument('--exp_suffix', type=str, help='name suffix of the training run', default='nothing')
#parser.add_argument('--seed', type=int, help='random seed (for reproducibility) [-1 means to randomly sample one]', default=3124256514)
parser.add_argument('--seed', type=int, help='random seed (for reproducibility) [-1 means to randomly sample one]', default=-1)
parser.add_argument("--resume_ckpt", type=str, help='if to resume, specify the ckpt file [default: None, meaning train from scratch]', default=None)
# parameters for gan training
parser.add_argument('--dataset_mode', type=str, help='[gan] dataset mode [sample_by_template, sample_by_shape]', default='sample_by_shape')
parser.add_argument("--X", type=int, help='[gan] number of templates in a batch', default=6)
parser.add_argument("--Y", type=int, help='[gan] number of shapes per template in a batch', default=3)
parser.add_argument("--lr", type=float, help='[gan] learning rate', default=0.0001)
parser.add_argument('--num_workers', type=int, help='[gan] number of worker threads for data loading', default=6)
parser.add_argument("--n_critic", type=int, help='[gan] number of dis training steps per gen training step', default=10)
parser.add_argument("--max_epochs", type=int, help='[gan] max number of epochs to train', default=1000000)
parser.add_argument("--epochs_per_eval", type=int, help='[gan] number of training epochs per evaluation', default=50)
parser.add_argument('--num_visu', type=int, help='[gan] number of generated outputs to visualize [default: None --> no visu output]', default=5)
# Metrics related parameters
parser.add_argument("--epochs_per_metric", type=int, help='[metric] number of training epochs per metric computation', default=10)
parser.add_argument("--num_fake_per_metric", type=int, help='[metric] number of fake examples to generate per metric evaluation', default=1000)
parser.add_argument("--fid_mode", type=str, help='[metric] FID score mode [PointNet, DGCNN]', default='PointNet')
parser.add_argument("--num_point_per_shape", type=int, help='[gen] number of points per shape for fpd', default=2048)
# parameters for generator
parser.add_argument("--z_dim", type=int, help='[gen] the gaussian noise z dimension', default=256)
parser.add_argument("--num_point_per_part", type=int, help='[gen] number of points per part', default=1000)
parser.add_argument("--max_part_per_parent", type=int, help='[gen] max part per parent', default=10)
parser.add_argument("--template_feat_len", type=int, help='[gen] template feature length', default=64)
parser.add_argument("--template_symmetric_type", type=str, help='[gen] template symmetric type', default='max')
# parameters for discriminator
parser.add_argument("--pointnet_dis_score_multiplier", type=float, help='[dis] pointnet_dis_score_multiplier', default=1.0)
parser.add_argument("--pooling_type", type=str, help='[dis] pooling type [max, avg, or mix]', default='max')
parser.add_argument("--symmetric_type", type=str, help='[dis] symmetric type', default='max')
parser.add_argument("--final_activation", action='store_true', help='[dis] final sigmoid activation', default=False)
# shared parameters
parser.add_argument("--feat_len", type=int, help='[gen/dis] feature length', default=256)
parser.add_argument("--hidden_len", type=int, help='[gen/dis] hidden length', default=256)
# loss weights
parser.add_argument('--loss_weight_gp', type=float, help='[gan] coefficient for gradient-penalty loss term', default=1.0)
# parse args
args = parser.parse_args()
# generate other parameters
args.exp_name = 'exp_%s_%s' % (args.category, args.exp_suffix)
args.data_dir = './data/%s_geo' % args.category
args.pg_dir = './stats/part_trees/%s_all_no_other_less_than_10_parts-train' % args.category
with open(os.path.join(args.pg_dir, 'visu_pg_list.txt'), 'r') as fin:
args.visu_pg_list = [int(l.rstrip()) for l in fin.readlines()]
### preparation
# load category information
Tree.load_category_info(args.category)
# check if training run already exists. If so, delete it.
if os.path.exists(os.path.join('log', args.exp_name)):
response = input('A training run named "%s" already exists, overwrite? (y/n) ' % args.exp_name)
if response != 'y':
sys.exit()
if os.path.exists(os.path.join('log', args.exp_name)):
shutil.rmtree(os.path.join('log', args.exp_name))
# create directories for this run
if not os.path.exists(os.path.join('log', args.exp_name)):
os.makedirs(os.path.join('log', args.exp_name))
# file log
flog = open(os.path.join('log', args.exp_name, 'train_log.txt'), 'w')
# backup command
flog.write(' '.join(sys.argv) + '\n')
# set training device
device = torch.device('cuda:0')
# control randomness
if args.seed < 0:
args.seed = random.randint(1, 10000)
print('Random Seed: %d' % args.seed)
flog.write(f'Random Seed: {args.seed}\n')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
# save config
torch.save(args, os.path.join('log', args.exp_name, 'conf.pth'))
### main procedure
# get models
generator = gen_model_def.Network(args, device).to(device)
utils.printout(flog, str(generator))
discriminator = dis_model_def.Network(args, device).to(device)
utils.printout(flog, str(discriminator))
# create dataset and dataloader
train_dataset = PartGraphShapesDataset(args.data_dir, args.pg_dir, device, args.Y, mode=args.dataset_mode)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.X, shuffle=True, \
num_workers=args.num_workers, worker_init_fn=utils.worker_init_fn, collate_fn=utils.collate_feats)
# tensorboard logger
logger = tensorboardX.SummaryWriter(log_dir=os.path.join('log', args.exp_name))
# get a gan trainer
trainer = trainer_def.Trainer(args.exp_name, generator, discriminator, \
args=args, device=device, flog=flog, logger=logger)
# if to resume
if args.resume_ckpt is None:
start_epoch = 0
else:
start_epoch = int(args.resume_ckpt.split('/')[-1].split('.')[0].split('_')[1])
start_iteration = start_epoch * (len(train_dataset) // args.X)
print('start_epoch: %d, start_iteration: %d' % (start_epoch, start_iteration))
flog.write('start_epoch: %d, start_iteration: %d\n' % (start_epoch, start_iteration))
if start_epoch != 0:
print("Loading checkpoints from: %s" % (args.resume_ckpt))
flog.write("Loading checkpoints from: %s\n" % (args.resume_ckpt))
trainer.load_model(args.resume_ckpt)
# train
trainer.train(train_dataset, train_dataloader, \
start_iteration=start_iteration, start_epoch=start_epoch)
# exit
flog.close()
|
11568517
|
from typing import Optional
from ground.hints import (Maybe,
Scalar)
from reprit.base import generate_repr
from .angle import Angle
from .compound import (Compound,
Indexable,
Linear,
Location,
Relation,
Shaped)
from .geometry import (Coordinate,
Geometry)
from .iterable import non_negative_min
from .multipoint import Multipoint
from .packing import (MIN_MIX_NON_EMPTY_COMPONENTS,
pack_mix)
from .point import Point
class Mix(Indexable[Coordinate]):
__slots__ = '_components', '_discrete', '_linear', '_shaped'
def __init__(self,
discrete: Maybe[Multipoint[Coordinate]],
linear: Maybe[Linear[Coordinate]],
shaped: Maybe[Shaped[Coordinate]]) -> None:
"""
Initializes mix.
Time complexity:
``O(1)``
Memory complexity:
``O(1)``
"""
self._components = self._discrete, self._linear, self._shaped = (
discrete, linear, shaped)
__repr__ = generate_repr(__init__)
def __and__(self, other: Compound[Coordinate]) -> Compound[Coordinate]:
"""
Returns intersection of the mix with the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix & mix == mix
True
"""
discrete_part = self.discrete & other
linear_part = self.linear & other
shaped_part = self.shaped & other
context = self._context
if isinstance(linear_part, Multipoint):
shaped_part |= linear_part
linear_part = context.empty
elif isinstance(linear_part, Mix):
shaped_part |= linear_part.discrete
linear_part = linear_part.linear
if isinstance(shaped_part, Multipoint):
linear_part |= shaped_part
if isinstance(linear_part, Mix):
discrete_part |= linear_part.discrete
linear_part = linear_part.linear
shaped_part = context.empty
elif isinstance(shaped_part, Linear):
linear_part |= shaped_part
shaped_part = context.empty
elif isinstance(shaped_part, Mix):
linear_part = (linear_part | shaped_part.linear
| shaped_part.discrete)
shaped_part = shaped_part.shaped
if isinstance(linear_part, Multipoint):
discrete_part |= linear_part
linear_part = context.empty
elif isinstance(linear_part, Mix):
discrete_part |= linear_part.discrete
linear_part = linear_part.linear
return pack_mix(discrete_part, linear_part, shaped_part, context.empty,
context.mix_cls)
__rand__ = __and__
def __contains__(self, point: Point[Coordinate]) -> bool:
"""
Checks if the mix contains the point.
Time complexity:
``O(log elements_count)`` expected after indexing,
``O(elements_count)`` worst after indexing or without it
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> Point(0, 0) in mix
True
>>> Point(1, 1) in mix
True
>>> Point(2, 2) in mix
True
>>> Point(3, 3) in mix
True
>>> Point(4, 3) in mix
True
>>> Point(5, 2) in mix
True
>>> Point(6, 1) in mix
True
>>> Point(7, 0) in mix
False
"""
return bool(self.locate(point))
def __eq__(self, other: 'Mix[Coordinate]') -> bool:
"""
Checks if mixes are equal.
Time complexity:
``O(elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix == mix
True
"""
return self is other or (self._components == other._components
if isinstance(other, Mix)
else NotImplemented)
def __ge__(self, other: Compound[Coordinate]) -> bool:
"""
Checks if the mix is a superset of the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix >= mix
True
"""
return (other is self._context.empty
or self == other
or ((self.shaped is not self._context.empty
or not isinstance(other, Shaped)
and (not isinstance(other, Mix)
or other.shaped is self._context.empty))
and self.relate(other) in (Relation.EQUAL,
Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN)
if isinstance(other, Compound)
else NotImplemented))
def __gt__(self, other: Compound[Coordinate]) -> bool:
"""
Checks if the mix is a strict superset of the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix > mix
False
"""
return (other is self._context.empty
or self != other
and ((self.shaped is not self._context.empty
or not isinstance(other, Shaped)
and (not isinstance(other, Mix)
or other.shaped is self._context.empty))
and self.relate(other) in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN)
if isinstance(other, Compound)
else NotImplemented))
def __hash__(self) -> int:
"""
Returns hash value of the mix.
Time complexity:
``O(components_size)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> hash(mix) == hash(mix)
True
"""
return hash(self._components)
def __le__(self, other: Compound[Coordinate]) -> bool:
"""
Checks if the mix is a subset of the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix <= mix
True
"""
return (self == other
or (not isinstance(other, Multipoint)
and (self.shaped is self._context.empty
or not isinstance(other, Linear)
and (not isinstance(other, Mix)
or other.shaped is not self._context.empty))
and self.relate(other) in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE,
Relation.EQUAL)
if isinstance(other, Compound)
else NotImplemented))
def __lt__(self, other: Compound[Coordinate]) -> bool:
"""
Checks if the mix is a strict subset of the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix < mix
False
"""
return (self != other
and (not isinstance(other, Multipoint)
and (self.shaped is self._context.empty
or not isinstance(other, Linear)
and (not isinstance(other, Mix)
or other.shaped is not self._context.empty))
and self.relate(other) in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE)
if isinstance(other, Compound)
else NotImplemented))
def __or__(self, other: Compound[Coordinate]) -> Compound[Coordinate]:
"""
Returns union of the mix with the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix | mix == mix
True
"""
context = self._context
if isinstance(other, Multipoint):
return context.mix_cls(self.discrete
| (other - self.shaped - self.linear),
self.linear, self.shaped)
elif isinstance(other, Linear):
discrete_part, linear_part = self.discrete, self.linear
shaped_part = self.shaped | other
if isinstance(shaped_part, Linear):
linear_part = linear_part | shaped_part | discrete_part
shaped_part = context.empty
elif isinstance(shaped_part, Mix):
linear_part = linear_part | shaped_part.linear | discrete_part
shaped_part = shaped_part.shaped
else:
# other is subset of the shaped component
return pack_mix(discrete_part, linear_part, shaped_part,
context.empty, context.mix_cls)
if isinstance(linear_part, Mix):
discrete_part, linear_part = (linear_part.discrete,
linear_part.linear)
else:
discrete_part = context.empty
return pack_mix(discrete_part, linear_part, shaped_part,
context.empty, context.mix_cls)
elif isinstance(other, (Shaped, Mix)):
return self.shaped | other | self.linear | self.discrete
else:
return NotImplemented
__ror__ = __or__
def __rsub__(self, other: Compound[Coordinate]) -> Compound[Coordinate]:
"""
Returns difference of the other geometry with the mix.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
"""
return ((other - self.discrete) & (other - self.linear)
& other - self.shaped)
def __sub__(self, other: Compound[Coordinate]) -> Compound[Coordinate]:
"""
Returns difference of the mix with the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (EMPTY, Contour, Mix, Multipoint, Point,
... Polygon, Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix - mix is EMPTY
True
"""
return pack_mix(self.discrete - other, self.linear - other,
self.shaped - other, self._context.empty,
self._context.mix_cls)
def __xor__(self, other: Compound[Coordinate]) -> Compound[Coordinate]:
"""
Returns symmetric difference of the mix with the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (EMPTY, Contour, Mix, Multipoint, Point,
... Polygon, Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix ^ mix is EMPTY
True
"""
context = self._context
if isinstance(other, Multipoint):
rest_other = other - self.shaped - self.linear
return pack_mix(self.discrete ^ rest_other, self.linear,
self.shaped, context.empty, context.mix_cls)
elif isinstance(other, Linear):
discrete_part, linear_part = self.discrete, self.linear
shaped_part = self.shaped ^ other
if isinstance(shaped_part, Linear):
linear_part = linear_part ^ shaped_part ^ discrete_part
shaped_part = context.empty
elif isinstance(shaped_part, Mix):
linear_part = linear_part ^ shaped_part.linear ^ discrete_part
shaped_part = shaped_part.shaped
else:
# other is subset of the shaped component
return pack_mix(discrete_part, linear_part, shaped_part,
context.empty, context.mix_cls)
if isinstance(linear_part, Mix):
discrete_part, linear_part = (linear_part.discrete,
linear_part.linear)
else:
discrete_part = context.empty
return pack_mix(discrete_part, linear_part, shaped_part,
context.empty, context.mix_cls)
elif isinstance(other, (Shaped, Mix)):
return self.shaped ^ other ^ self.linear ^ self.discrete
else:
return NotImplemented
__rxor__ = __xor__
@property
def centroid(self) -> Point[Coordinate]:
"""
Returns centroid of the mix.
Time complexity:
``O(elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.centroid == Point(3, 3)
True
"""
return (self.linear
if self.shaped is self._context.empty
else self.shaped).centroid
@property
def discrete(self) -> Maybe[Multipoint[Coordinate]]:
"""
Returns disrete component of the mix.
Time complexity:
``O(1)``
Memory complexity:
``O(1)``
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.discrete == Multipoint([Point(3, 3)])
True
"""
return self._discrete
@property
def shaped(self) -> Maybe[Shaped[Coordinate]]:
"""
Returns shaped component of the mix.
Time complexity:
``O(1)``
Memory complexity:
``O(1)``
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> from gon.base import Contour
>>> mix.shaped == Polygon(Contour([Point(0, 0), Point(6, 0),
... Point(6, 6), Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4),
... Point(4, 4), Point(4, 2)])])
True
"""
return self._shaped
@property
def linear(self) -> Maybe[Linear[Coordinate]]:
"""
Returns linear component of the mix.
Time complexity:
``O(1)``
Memory complexity:
``O(1)``
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.linear == Segment(Point(6, 6), Point(6, 8))
True
"""
return self._linear
def distance_to(self, other: Geometry[Coordinate]) -> Scalar:
"""
Returns distance between the mix and the other geometry.
Time complexity:
``O(elements_count)``
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.distance_to(mix) == 0
True
"""
return non_negative_min(component.distance_to(other)
for component in self._components
if component is not self._context.empty)
def index(self) -> None:
"""
Pre-processes the mix to potentially improve queries.
Time complexity:
``O(elements_count * log elements_count)`` expected,
``O(elements_count ** 2)`` worst
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.index()
"""
if isinstance(self.discrete, Indexable):
self.discrete.index()
if isinstance(self.linear, Indexable):
self.linear.index()
if isinstance(self.shaped, Indexable):
self.shaped.index()
def locate(self, point: Point[Coordinate]) -> Location:
"""
Finds location of the point relative to the mix.
Time complexity:
``O(log elements_count)`` expected after indexing,
``O(elements_count)`` worst after indexing or without it
Memory complexity:
``O(1)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.locate(Point(0, 0)) is Location.BOUNDARY
True
>>> mix.locate(Point(1, 1)) is Location.INTERIOR
True
>>> mix.locate(Point(2, 2)) is Location.BOUNDARY
True
>>> mix.locate(Point(3, 3)) is Location.BOUNDARY
True
>>> mix.locate(Point(4, 3)) is Location.BOUNDARY
True
>>> mix.locate(Point(5, 2)) is Location.INTERIOR
True
>>> mix.locate(Point(6, 1)) is Location.BOUNDARY
True
>>> mix.locate(Point(7, 0)) is Location.EXTERIOR
True
"""
for candidate in self._components:
location = candidate.locate(point)
if location is not Location.EXTERIOR:
return location
return Location.EXTERIOR
def relate(self, other: Compound[Coordinate]) -> Relation:
"""
Finds relation between the mix and the other geometry.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.relate(mix) is Relation.EQUAL
True
"""
return (self._relate_discrete(other)
if isinstance(other, Multipoint)
else (self._relate_linear(other)
if isinstance(other, Linear)
else (self._relate_shaped(other)
if isinstance(other, Shaped)
else (self._relate_mix(other)
if isinstance(other, Mix)
else other.relate(self).complement))))
def rotate(self,
angle: Angle,
point: Optional[Point[Coordinate]] = None) -> 'Mix[Coordinate]':
"""
Rotates the mix by given angle around given point.
Time complexity:
``O(elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Angle, Contour, Mix, Multipoint, Point,
... Polygon, Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.rotate(Angle(1, 0)) == mix
True
>>> (mix.rotate(Angle(0, 1), Point(1, 1))
... == Mix(Multipoint([Point(-1, 3)]),
... Segment(Point(-4, 6), Point(-6, 6)),
... Polygon(Contour([Point(2, 0), Point(2, 6), Point(-4, 6),
... Point(-4, 0)]),
... [Contour([Point(0, 2), Point(-2, 2), Point(-2, 4),
... Point(0, 4)])])))
True
"""
return self._context.mix_cls(self.discrete.rotate(angle, point),
self.linear.rotate(angle, point),
self.shaped.rotate(angle, point))
def scale(self,
factor_x: Coordinate,
factor_y: Optional[Coordinate] = None) -> Compound[Coordinate]:
"""
Scales the mix by given factor.
Time complexity:
``O(elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.scale(1) == mix
True
>>> (mix.scale(1, 2)
... == Mix(Multipoint([Point(3, 6)]),
... Segment(Point(6, 12), Point(6, 16)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 12),
... Point(0, 12)]),
... [Contour([Point(2, 4), Point(2, 8), Point(4, 8),
... Point(4, 4)])])))
True
"""
if factor_y is None:
factor_y = factor_x
return (self._context.mix_cls(self.discrete.scale(factor_x, factor_y),
self.linear.scale(factor_x, factor_y),
self.shaped.scale(factor_x, factor_y))
if factor_x and factor_y
else ((self.discrete.scale(factor_x, factor_y)
| self.linear.scale(factor_x, factor_y)
| self.shaped.scale(factor_x, factor_y))
if factor_x or factor_y
else
self._context.multipoint_cls(
[self._context.point_cls(factor_x, factor_y)])))
def translate(self,
step_x: Coordinate,
step_y: Coordinate) -> 'Mix[Coordinate]':
"""
Translates the mix by given step.
Time complexity:
``O(elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> (mix.translate(1, 2)
... == Mix(Multipoint([Point(4, 5)]),
... Segment(Point(7, 8), Point(7, 10)),
... Polygon(Contour([Point(1, 2), Point(7, 2), Point(7, 8),
... Point(1, 8)]),
... [Contour([Point(3, 4), Point(3, 6), Point(5, 6),
... Point(5, 4)])])))
True
"""
return self._context.mix_cls(self.discrete.translate(step_x, step_y),
self.linear.translate(step_x, step_y),
self.shaped.translate(step_x, step_y))
def validate(self) -> None:
"""
Checks if the mix is valid.
Time complexity:
``O(elements_count * log elements_count)``
Memory complexity:
``O(elements_count)``
where
.. code-block:: python
elements_count = discrete_size + linear_size\
+ shaped_vertices_count
discrete_size = len(points)
linear_size = len(segments)
shaped_vertices_count = (sum(len(polygon.border.vertices)
+ sum(len(hole.vertices)
for hole in polygon.holes)
for polygon in polygons)
points = [] if self.discrete is EMPTY else self.discrete.points
segments = ([]
if self.linear is EMPTY
else ([self.linear]
if isinstance(self.linear, Segment)
else self.linear.segments))
polygons = ([]
if self.shaped is EMPTY
else (self.shaped.polygons
if isinstance(self.linear, Multipolygon)
else [self.shaped]))
>>> from gon.base import (Contour, Mix, Multipoint, Point, Polygon,
... Segment)
>>> mix = Mix(Multipoint([Point(3, 3)]),
... Segment(Point(6, 6), Point(6, 8)),
... Polygon(Contour([Point(0, 0), Point(6, 0), Point(6, 6),
... Point(0, 6)]),
... [Contour([Point(2, 2), Point(2, 4), Point(4, 4),
... Point(4, 2)])]))
>>> mix.validate()
"""
if (sum(component is not self._context.empty for component in
self._components)
< MIN_MIX_NON_EMPTY_COMPONENTS):
raise ValueError('At least {count} components should not be empty.'
.format(count=MIN_MIX_NON_EMPTY_COMPONENTS))
for component in self._components:
component.validate()
if (not self.discrete.disjoint(self.linear)
or not self.discrete.disjoint(self.shaped)):
raise ValueError('Discrete component should be disjoint '
'from other components.')
shaped_linear_relation = self.shaped.relate(self.linear)
if shaped_linear_relation in (Relation.CROSS, Relation.COMPONENT,
Relation.ENCLOSED, Relation.WITHIN):
raise ValueError('Linear component should not {} shaped component.'
.format('cross'
if (shaped_linear_relation
is Relation.CROSS)
else 'be subset of'))
elif (shaped_linear_relation is Relation.TOUCH
and any(polygon.border.relate(self.linear)
in (Relation.OVERLAP, Relation.COMPOSITE)
or any(hole.relate(self.linear)
in (Relation.OVERLAP, Relation.COMPOSITE)
for hole in polygon.holes)
for polygon in (
self.shaped.polygons
if isinstance(self.shaped,
self._context.multipolygon_cls)
else [self.shaped]))):
raise ValueError('Linear component should not overlap '
'shaped component borders.')
def _relate_linear(self, other: Linear[Coordinate]) -> Relation:
if self.shaped is self._context.empty:
linear_relation = self.linear.relate(other)
if linear_relation is Relation.DISJOINT:
discrete_relation = self.discrete.relate(other)
return (Relation.TOUCH
if discrete_relation is Relation.COMPOSITE
else discrete_relation)
elif linear_relation is Relation.COMPOSITE:
discrete_relation = self.discrete.relate(other)
return (linear_relation
if discrete_relation is linear_relation
else Relation.OVERLAP)
else:
return (Relation.COMPONENT
if linear_relation is Relation.EQUAL
else linear_relation)
else:
shaped_relation = self.shaped.relate(other)
if shaped_relation is Relation.DISJOINT:
linear_relation = self.linear.relate(other)
if linear_relation is Relation.DISJOINT:
discrete_relation = self.discrete.relate(other)
return (Relation.TOUCH
if discrete_relation is Relation.COMPOSITE
else discrete_relation)
elif linear_relation in (Relation.TOUCH,
Relation.CROSS,
Relation.COMPONENT):
return linear_relation
else:
return (Relation.COMPONENT
if linear_relation is Relation.EQUAL
else Relation.TOUCH)
elif (shaped_relation is Relation.TOUCH
or shaped_relation is Relation.CROSS):
rest_other = other - self.shaped
linear_relation = self.linear.relate(rest_other)
return (Relation.COMPONENT
if (linear_relation is Relation.EQUAL
or linear_relation is Relation.COMPONENT)
else shaped_relation)
else:
return shaped_relation
def _relate_mix(self, other: 'Mix[Coordinate]') -> Relation:
if self.shaped is other.shaped is self._context.empty:
linear_components_relation = self.linear.relate(other.linear)
if linear_components_relation is Relation.DISJOINT:
return (linear_components_relation
if (self._relate_discrete(other.discrete)
is other._relate_discrete(self.discrete)
is linear_components_relation)
else Relation.TOUCH)
elif linear_components_relation is Relation.COMPOSITE:
discrete_relation = other._relate_discrete(self.discrete)
return (linear_components_relation
if discrete_relation is Relation.COMPONENT
else Relation.OVERLAP)
elif linear_components_relation is Relation.EQUAL:
other_discrete_relation = self.discrete.relate(other.discrete)
return (Relation.OVERLAP
if other_discrete_relation is Relation.DISJOINT
else other_discrete_relation)
elif linear_components_relation is Relation.COMPONENT:
other_discrete_relation = self._relate_discrete(other.discrete)
return (linear_components_relation
if other_discrete_relation is Relation.COMPONENT
else Relation.OVERLAP)
else:
return linear_components_relation
elif self.shaped is self._context.empty:
linear_relation = other._relate_linear(self.linear)
if linear_relation is Relation.CROSS:
return linear_relation
discrete_relation = other._relate_discrete(self.discrete)
if linear_relation is Relation.DISJOINT:
return (discrete_relation
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (Relation.TOUCH
if discrete_relation is Relation.COMPONENT
else Relation.CROSS))
elif linear_relation is Relation.TOUCH:
return (Relation.CROSS
if discrete_relation in (Relation.CROSS,
Relation.ENCLOSED,
Relation.WITHIN)
else linear_relation)
elif linear_relation is Relation.COMPONENT:
return (Relation.TOUCH
if discrete_relation is Relation.DISJOINT
else (discrete_relation
if (discrete_relation is Relation.TOUCH
or discrete_relation is Relation.CROSS)
else (Relation.COMPOSITE
if discrete_relation is Relation.COMPONENT
else Relation.ENCLOSES)))
else:
return (Relation.CROSS
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (Relation.COVER
if discrete_relation is Relation.WITHIN
else Relation.ENCLOSES))
elif other.shaped is self._context.empty:
other_linear_relation = self._relate_linear(other.linear)
if other_linear_relation is Relation.CROSS:
return other_linear_relation
other_discrete_relation = self._relate_discrete(other.discrete)
if other_linear_relation is Relation.DISJOINT:
return (other_discrete_relation
if other_discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (Relation.TOUCH
if other_discrete_relation is Relation.COMPONENT
else Relation.CROSS))
elif other_linear_relation is Relation.TOUCH:
return (Relation.CROSS
if other_discrete_relation in (Relation.CROSS,
Relation.ENCLOSED,
Relation.WITHIN)
else other_linear_relation)
elif other_linear_relation is Relation.COMPONENT:
return (Relation.TOUCH
if (other_discrete_relation is Relation.DISJOINT
or other_discrete_relation is Relation.TOUCH)
else (other_discrete_relation
if (other_discrete_relation is Relation.CROSS
or (other_discrete_relation
is Relation.COMPONENT))
else Relation.ENCLOSED))
elif other_linear_relation is Relation.ENCLOSED:
return (Relation.CROSS
if other_discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else other_linear_relation)
else:
return (Relation.CROSS
if other_discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (Relation.ENCLOSED
if other_discrete_relation is Relation.COMPONENT
else other_linear_relation))
shaped_components_relation = self.shaped.relate(other.shaped)
if (shaped_components_relation is Relation.DISJOINT
or shaped_components_relation is Relation.TOUCH):
if self.linear is other.linear is self._context.empty:
other_discrete_relation = self._relate_discrete(other.discrete)
if other_discrete_relation is Relation.CROSS:
return other_discrete_relation
elif (other_discrete_relation is Relation.ENCLOSED
or other_discrete_relation is Relation.WITHIN):
return Relation.CROSS
else:
discrete_relation = other._relate_discrete(self.discrete)
if (discrete_relation
is other_discrete_relation
is Relation.DISJOINT):
return shaped_components_relation
elif discrete_relation is Relation.CROSS:
return discrete_relation
elif (discrete_relation is Relation.ENCLOSED
or discrete_relation is Relation.WITHIN):
return Relation.CROSS
else:
return Relation.TOUCH
elif self.linear is self._context.empty:
other_linear_relation = self._relate_linear(other.linear)
if other_linear_relation is Relation.CROSS:
return other_linear_relation
elif (other_linear_relation is Relation.ENCLOSED
or other_linear_relation is Relation.WITHIN):
return Relation.CROSS
else:
discrete_relation = other._relate_discrete(self.discrete)
if discrete_relation is Relation.CROSS:
return discrete_relation
elif (discrete_relation is Relation.ENCLOSED
or discrete_relation is Relation.WITHIN):
return Relation.CROSS
elif other.discrete is self._context.empty:
return (shaped_components_relation
if (discrete_relation
is other_linear_relation
is Relation.DISJOINT)
else Relation.TOUCH)
else:
other_discrete_relation = self._relate_discrete(
other.discrete)
if other_discrete_relation is Relation.CROSS:
return other_discrete_relation
elif (other_discrete_relation is Relation.ENCLOSED
or other_discrete_relation is Relation.WITHIN):
return Relation.CROSS
elif (discrete_relation
is other_discrete_relation
is other_linear_relation
is Relation.DISJOINT):
return shaped_components_relation
else:
return Relation.TOUCH
elif other.linear is self._context.empty:
linear_relation = other._relate_linear(self.linear)
if linear_relation is Relation.CROSS:
return linear_relation
elif (linear_relation is Relation.ENCLOSED
or linear_relation is Relation.WITHIN):
return Relation.CROSS
else:
other_discrete_relation = self._relate_discrete(
other.discrete)
if other_discrete_relation is Relation.CROSS:
return other_discrete_relation
elif (other_discrete_relation is Relation.ENCLOSED
or other_discrete_relation is Relation.WITHIN):
return Relation.CROSS
elif self.discrete is self._context.empty:
return (shaped_components_relation
if (linear_relation
is other_discrete_relation
is Relation.DISJOINT)
else Relation.TOUCH)
else:
discrete_relation = other._relate_discrete(
self.discrete)
if discrete_relation is Relation.CROSS:
return discrete_relation
elif (discrete_relation is Relation.ENCLOSED
or discrete_relation is Relation.WITHIN):
return Relation.CROSS
elif (discrete_relation
is linear_relation
is other_discrete_relation
is Relation.DISJOINT):
return shaped_components_relation
else:
return Relation.TOUCH
else:
other_linear_relation = self._relate_linear(other.linear)
if other_linear_relation is Relation.CROSS:
return other_linear_relation
elif (other_linear_relation is Relation.ENCLOSED
or other_linear_relation is Relation.WITHIN):
return Relation.CROSS
else:
linear_relation = other._relate_linear(self.linear)
if linear_relation is Relation.CROSS:
return linear_relation
elif (linear_relation is Relation.ENCLOSED
or linear_relation is Relation.WITHIN):
return Relation.CROSS
elif self.discrete is self._context.empty:
other_discrete_relation = self._relate_discrete(
other.discrete)
return (other_discrete_relation
if other_discrete_relation is Relation.CROSS
else
(Relation.CROSS
if (other_discrete_relation
is Relation.ENCLOSED
or other_discrete_relation
is Relation.WITHIN)
else (shaped_components_relation
if (other_discrete_relation
is linear_relation
is other_linear_relation
is Relation.DISJOINT)
else Relation.TOUCH)))
elif other.discrete is self._context.empty:
discrete_relation = other._relate_discrete(
self.discrete)
return (discrete_relation
if discrete_relation is Relation.CROSS
else
(Relation.CROSS
if (discrete_relation is Relation.ENCLOSED
or discrete_relation is Relation.WITHIN)
else (shaped_components_relation
if (discrete_relation
is linear_relation
is other_linear_relation
is Relation.DISJOINT)
else Relation.TOUCH)))
else:
other_discrete_relation = self._relate_discrete(
other.discrete)
if other_discrete_relation is Relation.CROSS:
return other_discrete_relation
elif (other_discrete_relation is Relation.ENCLOSED
or other_discrete_relation is Relation.WITHIN):
return Relation.CROSS
else:
discrete_relation = other._relate_discrete(
self.discrete)
return (discrete_relation
if discrete_relation is Relation.CROSS
else (Relation.CROSS
if (discrete_relation
is Relation.ENCLOSED
or discrete_relation
is Relation.WITHIN)
else
(shaped_components_relation
if (discrete_relation
is linear_relation
is other_discrete_relation
is other_linear_relation
is Relation.DISJOINT)
else Relation.TOUCH)))
elif shaped_components_relation in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE):
if self.linear is self._context.empty:
discrete_relation = (other._relate_discrete(self.discrete)
.complement)
return (shaped_components_relation
if discrete_relation is shaped_components_relation
else (Relation.ENCLOSES
if discrete_relation in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE)
else Relation.OVERLAP))
else:
linear_relation = other._relate_linear(self.linear).complement
if linear_relation is shaped_components_relation:
if self.discrete is self._context.empty:
return shaped_components_relation
else:
discrete_relation = other._relate_discrete(
self.discrete).complement
return (shaped_components_relation
if (discrete_relation
is shaped_components_relation)
else
(Relation.ENCLOSES
if discrete_relation in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE)
else Relation.OVERLAP))
elif linear_relation in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE):
if self.discrete is self._context.empty:
return Relation.ENCLOSES
else:
discrete_relation = other._relate_discrete(
self.discrete).complement
return (Relation.ENCLOSES
if discrete_relation in (Relation.COVER,
Relation.ENCLOSES,
Relation.COMPOSITE)
else Relation.OVERLAP)
else:
return Relation.OVERLAP
elif shaped_components_relation is Relation.EQUAL:
linear_components_relation = self.linear.relate(other.linear)
if self.linear is other.linear is self._context.empty:
discrete_components_relation = self.discrete.relate(
other.discrete)
return (
shaped_components_relation
if (self.discrete is other.discrete is self._context.empty
or discrete_components_relation is Relation.EQUAL)
else
(discrete_components_relation
if (discrete_components_relation is Relation.COMPOSITE
or discrete_components_relation is Relation.COMPONENT)
else Relation.OVERLAP))
elif self.linear is self._context.empty:
discrete_components_relation = other._relate_discrete(
self.discrete)
return (
Relation.COMPOSITE
if (discrete_components_relation is Relation.EQUAL
or discrete_components_relation is Relation.COMPONENT)
else Relation.OVERLAP)
elif other.linear is self._context.empty:
discrete_components_relation = self._relate_discrete(
other.discrete)
return (
Relation.COMPONENT
if (discrete_components_relation is Relation.EQUAL
or discrete_components_relation is Relation.COMPONENT)
else Relation.OVERLAP)
elif linear_components_relation is Relation.COMPOSITE:
discrete_components_relation = other._relate_discrete(
self.discrete)
return (
linear_components_relation
if (self.discrete is self._context.empty
or discrete_components_relation is Relation.EQUAL
or discrete_components_relation is Relation.COMPONENT)
else Relation.OVERLAP)
elif linear_components_relation is Relation.EQUAL:
discrete_components_relation = self.discrete.relate(
other.discrete)
return (
shaped_components_relation
if (self.discrete is other.discrete is self._context.empty
or discrete_components_relation is Relation.EQUAL)
else
(Relation.COMPOSITE
if self.discrete is self._context.empty
else
(Relation.COMPONENT
if other.discrete is self._context.empty
else
(discrete_components_relation
if
(discrete_components_relation is Relation.COMPONENT
or discrete_components_relation is Relation.COMPOSITE)
else Relation.OVERLAP))))
elif linear_components_relation is Relation.COMPONENT:
discrete_components_relation = self._relate_discrete(
other.discrete)
return (
linear_components_relation
if (other.discrete is self._context.empty
or discrete_components_relation is Relation.EQUAL
or discrete_components_relation is Relation.COMPONENT)
else Relation.OVERLAP)
else:
return Relation.OVERLAP
elif shaped_components_relation in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN):
if other.linear is self._context.empty:
discrete_relation = self._relate_discrete(other.discrete)
return (shaped_components_relation
if discrete_relation is shaped_components_relation
else (Relation.ENCLOSED
if discrete_relation in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN)
else Relation.OVERLAP))
else:
linear_relation = self._relate_linear(other.linear)
if linear_relation is shaped_components_relation:
if other.discrete is self._context.empty:
return shaped_components_relation
else:
discrete_relation = self._relate_discrete(
other.discrete)
return (shaped_components_relation
if (discrete_relation
is shaped_components_relation)
else
(Relation.ENCLOSED
if discrete_relation in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN)
else Relation.OVERLAP))
elif linear_relation in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN):
if other.discrete is self._context.empty:
return Relation.ENCLOSED
else:
discrete_relation = self._relate_discrete(
other.discrete)
return (Relation.ENCLOSED
if discrete_relation in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN)
else Relation.OVERLAP)
else:
return Relation.OVERLAP
else:
return shaped_components_relation
def _relate_discrete(self, other: Multipoint[Coordinate]) -> Relation:
if self.shaped is self._context.empty:
linear_relation = self.linear.relate(other)
if linear_relation is Relation.DISJOINT:
discrete_relation = self.discrete.relate(other)
return (discrete_relation
if discrete_relation is Relation.DISJOINT
else (Relation.COMPONENT
if (discrete_relation is Relation.COMPONENT
or discrete_relation is Relation.EQUAL)
else Relation.TOUCH))
elif linear_relation is Relation.TOUCH:
rest_other = other - self.linear
discrete_relation = self.discrete.relate(rest_other)
return (Relation.COMPONENT
if (discrete_relation is Relation.EQUAL
or discrete_relation is Relation.COMPONENT)
else linear_relation)
else:
return linear_relation
else:
shaped_relation = self.shaped.relate(other)
if shaped_relation in (Relation.COMPONENT,
Relation.ENCLOSED,
Relation.WITHIN):
return shaped_relation
elif (shaped_relation is Relation.TOUCH
or shaped_relation is Relation.CROSS):
rest_other = other - self.shaped
if self.linear is self._context.empty:
discrete_relation = self.discrete.relate(rest_other)
return (Relation.COMPONENT
if (discrete_relation is Relation.EQUAL
or discrete_relation is Relation.COMPONENT)
else shaped_relation)
else:
linear_relation = self.linear.relate(rest_other)
if linear_relation is Relation.DISJOINT:
discrete_relation = self.discrete.relate(rest_other)
return ((Relation.COMPONENT
if shaped_relation is Relation.TOUCH
else Relation.ENCLOSED)
if (discrete_relation is Relation.COMPONENT
or discrete_relation is Relation.EQUAL)
else shaped_relation)
elif linear_relation is Relation.TOUCH:
rest_other -= self.linear
discrete_relation = self.discrete.relate(rest_other)
return (Relation.COMPONENT
if (discrete_relation is Relation.COMPONENT
or discrete_relation is Relation.EQUAL)
else shaped_relation)
else:
return (Relation.COMPONENT
if shaped_relation is Relation.TOUCH
else Relation.ENCLOSED)
else:
linear_relation = self.linear.relate(other)
if linear_relation is Relation.DISJOINT:
discrete_relation = self.discrete.relate(other)
return (shaped_relation
if discrete_relation is Relation.DISJOINT
else (Relation.COMPONENT
if (discrete_relation is Relation.COMPONENT
or discrete_relation is Relation.EQUAL)
else Relation.TOUCH))
elif linear_relation is Relation.TOUCH:
rest_other = other - self.linear
discrete_relation = self.discrete.relate(rest_other)
return (shaped_relation
if discrete_relation is Relation.DISJOINT
else (Relation.COMPONENT
if (discrete_relation is Relation.COMPONENT
or discrete_relation is Relation.EQUAL)
else Relation.TOUCH))
else:
return linear_relation
def _relate_shaped(self, other: Shaped[Coordinate]) -> Relation:
if self.shaped is self._context.empty:
linear_relation = self.linear.relate(other)
if (linear_relation is Relation.DISJOINT
or linear_relation is Relation.TOUCH):
discrete_relation = self.discrete.relate(other)
return (linear_relation
if discrete_relation is Relation.DISJOINT
else (discrete_relation
if (discrete_relation is Relation.TOUCH
or discrete_relation is Relation.CROSS)
else (Relation.TOUCH
if (discrete_relation
is Relation.COMPOSITE)
else Relation.CROSS)))
elif (linear_relation is Relation.COVER
or linear_relation is Relation.ENCLOSES):
discrete_relation = self.discrete.relate(other)
return (Relation.CROSS
if (discrete_relation is Relation.DISJOINT
or discrete_relation is Relation.TOUCH)
else (discrete_relation
if (discrete_relation is linear_relation
or discrete_relation is Relation.CROSS)
else Relation.ENCLOSES))
elif linear_relation is Relation.COMPOSITE:
discrete_relation = self.discrete.relate(other)
return (Relation.TOUCH
if discrete_relation is Relation.DISJOINT
else (discrete_relation
if (discrete_relation is Relation.TOUCH
or discrete_relation is Relation.CROSS)
else (linear_relation
if discrete_relation is linear_relation
else Relation.CROSS)))
else:
return linear_relation
else:
shaped_relation = self.shaped.relate(other)
if shaped_relation is Relation.DISJOINT:
linear_relation = self.linear.relate(other)
if linear_relation is Relation.DISJOINT:
discrete_relation = self.discrete.relate(other)
return (discrete_relation
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (Relation.TOUCH
if discrete_relation is Relation.COMPOSITE
else Relation.CROSS))
elif (linear_relation is Relation.TOUCH
or linear_relation is Relation.COMPOSITE):
discrete_relation = self.discrete.relate(other)
return (Relation.TOUCH
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.COMPOSITE)
else Relation.CROSS)
else:
return Relation.CROSS
elif shaped_relation is Relation.TOUCH:
linear_relation = self.linear.relate(other)
if linear_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.COMPOSITE):
discrete_relation = self.discrete.relate(other)
return (shaped_relation
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.COMPOSITE)
else Relation.CROSS)
else:
return Relation.CROSS
elif (shaped_relation is Relation.COVER
or shaped_relation is Relation.COMPOSITE):
if self.linear is self._context.empty:
discrete_relation = self.discrete.relate(other)
return (Relation.OVERLAP
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (shaped_relation
if discrete_relation is shaped_relation
else Relation.ENCLOSES))
else:
linear_relation = self.linear.relate(other)
if linear_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS):
return Relation.OVERLAP
elif self.discrete is self._context.empty:
return (shaped_relation
if linear_relation is shaped_relation
else Relation.ENCLOSES)
else:
discrete_relation = self.discrete.relate(other)
return (Relation.OVERLAP
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else (shaped_relation
if (discrete_relation
is linear_relation
is shaped_relation)
else Relation.ENCLOSES))
elif shaped_relation is Relation.ENCLOSES:
if self.linear is self._context.empty:
discrete_relation = self.discrete.relate(other)
return (Relation.OVERLAP
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else Relation.ENCLOSES)
else:
linear_relation = self.linear.relate(other)
if linear_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS):
return Relation.OVERLAP
elif self.discrete is self._context.empty:
return shaped_relation
else:
discrete_relation = self.discrete.relate(other)
return (Relation.OVERLAP
if discrete_relation in (Relation.DISJOINT,
Relation.TOUCH,
Relation.CROSS)
else Relation.ENCLOSES)
else:
return (Relation.COMPONENT
if shaped_relation is Relation.EQUAL
else shaped_relation)
|
11568548
|
from setuptools import setup, find_packages
import sys
import pathlib
import platform
parent = pathlib.Path(__file__).parent
# get the readme for use in our long description
readme = (parent / "README.md").read_text()
python_version = platform.python_version().rsplit('.', maxsplit=1)[0]
mac_v, _, _ = platform.mac_ver()
if mac_v != '':
mac_v_split = mac_v.split('.')
mac_major_version = mac_v_split[0]
mac_minor_version = mac_v_split[1]
mac_version = '.'.join([mac_major_version, mac_minor_version])
else:
mac_major_version = None
mac_version = None
requirements = [
"pillow~=8.3.1",
"requests"
]
tf_req = "tensorflow~=2.5.0;platform_machine!='armv7l'"
onnx_req = "onnxruntime~=1.8.1;platform_machine!='armv7l'"
tflite_req = None
# get the right TF Lite runtime packages based on OS and python version: https://www.tensorflow.org/lite/guide/python#install_just_the_tensorflow_lite_interpreter
tflite_python = None
tflite_platform = None
tflite_machine = None
# get the right python string for the version
if python_version == '3.6':
tflite_python = 'cp36-cp36m'
elif python_version == '3.7':
tflite_python = 'cp37-cp37m'
elif python_version == '3.8':
tflite_python = 'cp38-cp38'
elif python_version == '3.9':
tflite_python = 'cp39-cp39'
# get the right platform and machine strings for the tflite_runtime wheel URL
sys_platform = sys.platform.lower()
machine = platform.machine().lower()
if sys_platform == 'linux':
tflite_platform = sys_platform
tflite_machine = machine
elif sys_platform == 'win32':
tflite_platform = 'win'
tflite_machine = machine
elif sys_platform == 'darwin' and machine == 'x86_64':
if mac_version == '10.15':
tflite_platform = 'macosx_10_15'
elif mac_major_version == '11':
tflite_platform = 'macosx_11_0'
tflite_machine = machine
# add it to the requirements, or print the location to find the version to install
if tflite_python and tflite_platform and tflite_machine:
tflite_req = f"tflite_runtime @ https://github.com/google-coral/pycoral/releases/download/v2.0.0/tflite_runtime-2.5.0.post1-{tflite_python}-{tflite_platform}_{tflite_machine}.whl"
else:
print(
f"Couldn't find tflite_runtime for your platform {sys.platform}, machine {platform.machine()}, python version {python_version}, and mac version {mac_version}. If you are trying to use TensorFlow Lite, please see the install guide for the right version: https://www.tensorflow.org/lite/guide/python#install_just_the_tensorflow_lite_interpreter"
)
setup(
name="lobe",
version="0.5.0",
description="Lobe Python SDK",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/lobe/lobe-python",
license="MIT",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=requirements,
extras_require={
'all': [tf_req, onnx_req],
'tf': [tf_req],
'onnx': [onnx_req],
#'tflite': [tflite_req],
}
)
|
11568560
|
import subprocess
import sys
import os
import configparser
from utils import sync_process, get_machine_list
if __name__ == "__main__":
pool = list()
if len(sys.argv) < 2:
print("Usage: python cleanup.py <bin>")
sys.exit(-1)
binName = sys.argv[1]
print("Clean up " + binName)
config = configparser.ConfigParser()
config.read("conf/params.ini")
workdir = config['DEFAULT']['workdir']
hostPath = os.path.join(os.getcwd(), 'conf', 'hosts')
num_machines = int(config['DEFAULT']['number_machines'])
machines = get_machine_list(hostPath)
if num_machines > len(machines):
print("Specify too many number of machines")
sys.exit(-1)
for i in range(num_machines):
command = ["ssh", machines[i], "pkill " + binName]
print(command)
pool.append(subprocess.Popen(command))
sync_process(pool)
for i in range(num_machines):
command = ["ssh", machines[i], "rm -rf " + workdir + "/temp*"]
print(command)
pool.append(subprocess.Popen(command))
sync_process(pool)
|
11568564
|
import unittest
import feed
import entities
import util
class TestStopTime(unittest.TestCase):
def test_point(self):
pass
def test_stops(self):
pass
|
11568568
|
from swissdutch.constants import FloatStatus, Colour, ColourPref
class Player:
def __init__(self, name, rating, title=None, pairing_no=None,
score=0, float_status=FloatStatus.none, opponents=(),
colour_hist=()):
self._name = name
self._rating = rating
self._title = title
self._pairing_no = pairing_no
self._score = score
self._float_status = float_status
self._opponents = opponents
self._colour_hist = colour_hist
def __eq__(self, other):
return (self._name == other.name
and self._rating == other.rating
and self._title == other.title
and self._pairing_no == other.pairing_no
and self._score == other.score
and self._float_status == other.float_status
and self._opponents == other.opponents
and self._colour_hist == other.colour_hist
if isinstance(other, Player) else NotImplemented)
def __repr__(self):
return ('sn:{0}, r:{1}, t:{2}, pn:{3}, s:{4}, f:{5}, op:{6}, ch:{7}'
.format(self._name, self._rating, self._title, self._pairing_no,
self._score, self._float_status, self._opponents, self._colour_hist))
def __hash__(self):
return hash(repr(self))
@property
def name(self):
return self._name
@property
def rating(self):
return self._rating
@property
def title(self):
return self._title
@property
def pairing_no(self):
return self._pairing_no
@pairing_no.setter
def pairing_no(self, n):
self._pairing_no = n
@property
def score(self):
return self._score
@property
def float_status(self):
return self._float_status
@property
def colour_hist(self):
return self._colour_hist
@property
def opponents(self):
return self._opponents
@property
def colour_preference(self):
cd = sum(self._colour_hist)
cd2 = sum([c for c in self._colour_hist if c != Colour.none][-2:])
cp = max(cd, cd2)
return ColourPref(cp)
@property
def expected_colour(self):
col = Colour.none
pref = self.colour_preference
if pref > 0:
col = Colour.black
elif pref < 0:
col = Colour.white
else:
last_col = next((c for c in reversed(self._colour_hist)
if c != Colour.none), Colour.none)
if last_col == Colour.white:
col = Colour.black
elif last_col == Colour.black:
col = Colour.white
return col
def pair_both(self, opponent, colour):
opp_col = Colour.black if colour == Colour.white else Colour.white
self.pair(opponent, colour)
opponent.pair(self, opp_col)
def pair(self, opponent, colour):
self._opponents += (opponent.pairing_no,)
self._colour_hist += (colour,)
float_stat = FloatStatus.none
if opponent.score > self._score:
float_stat = FloatStatus.up
elif opponent.score < self._score:
float_stat = FloatStatus.down
self._set_float_status(float_stat)
def bye(self, bye_value):
self._opponents += (0,)
self._colour_hist += (Colour.none,)
self._float_status = FloatStatus.down
self._score += bye_value
def _set_float_status(self, float_status):
if float_status != FloatStatus.none:
self._float_status = float_status
return
if self._float_status < 0:
self._float_status += 1
elif self._float_status > 0:
self._float_status -= 1
|
11568633
|
import os
import random
from lft.app.vote import DefaultVote
from lft.consensus.round import RoundMessages
def test_vote():
round_messages = RoundMessages()
assert not round_messages.votes
vote = _random_vote()
round_messages.add_vote(vote)
assert round_messages.votes
assert vote.id in round_messages.get_votes(vote.data_id)
assert vote.id not in round_messages.get_votes(os.urandom(16))
assert vote in round_messages
assert not (_random_vote() in round_messages)
assert vote.id not in round_messages
assert not(vote.id in round_messages)
for vote_id, vote in round_messages.votes:
assert vote_id == vote.id
assert vote is vote
def _random_vote():
return DefaultVote(id_=os.urandom(16),
data_id=os.urandom(16),
commit_id=os.urandom(16),
voter_id=os.urandom(16),
epoch_num=random.randint(0, 100),
round_num=random.randint(0, 100))
|
11568649
|
TIMEOPT_CONFIG_FILE = "cfg_softConstraints_talos.yaml"
from .common_talos import *
SCRIPT_PATH = "memmo"
ENV_NAME = "multicontact/ground"
kp_am = 1.
w_am = 0.5
DURATION_SS = 1.2
EFF_T_PREDEF = 0.2
EFF_T_DELAY = 0.05
p_max = 0.07
FEET_MAX_VEL = 100.
FEET_MAX_ANG_VEL = 100.
|
11568659
|
from sightseer import Sightseer
from sightseer.zoo import YOLOv3Client
yolo = YOLOv3Client()
yolo.load_model()
ss = Sightseer()
frames = ss.load_vidsource("./test_data/img/london.mp4")
print (frames.shape)
preds, det_frames = yolo.framewise_predict(frames, stride=10, verbose=False)
ss.render_footage(det_frames)
|
11568662
|
import unittest
import os
from pymongo import MongoClient, GEO2D, DESCENDING
from bson import json_util
from conversiontools.csv2geojson import *
from conversiontools.kml2geojson import *
from conversiontools.shp2geojson import *
from conversiontools.validategeojson import *
from conversiontools.geojson2mongo import *
from conversiontools.geojsonurl2mongo import *
class conversionTest(unittest.TestCase):
"""TestCase for the API conversion module"""
def setUp(self):
self.data_input_folder = "data-load/example/"
self.data_output_folder = "data-outputs/"
self.database = "testdatabase"
self.client = MongoClient('localhost', 27017)
self.db = self.client[self.database]
os.chdir("../conversiontools")
def validategeojson_test(self):
""" Testing that GeoJSON validation functions correctly """
valid_input_geojson = self.data_input_folder + "cupcakes.geojson"
invalid_input_geojson = self.data_input_folder + "cupcakesmalformed.geojson"
self.assertTrue(validate_geojson_from_file(valid_input_geojson))
self.assertFalse(validate_geojson_from_file(invalid_input_geojson))
def csv2geojson_test(self):
""" Testing that CSV correctly translates to GeoJSON """
input_csv = self.data_input_folder + "significantmonth.csv"
output_geojson = self.data_output_folder + "significantmonth.geojson"
csv_to_geojson(input_csv, output_geojson)
self.assertTrue(os.path.exists(output_geojson) == 1, msg="Current working directory is: , {0}".format(os.getcwd()) )
num_lines = sum(1 for line in open(output_geojson))
self.assertTrue(num_lines == 369)
self.assertTrue(validate_geojson_from_file(output_geojson))
def shp2geojson_test(self):
""" Testing that Shapefile correctly translates to GeoJSON """
input_shp = self.data_input_folder + "London Shapefile/Greater_London_Const_Region.shp"
output_geojson = self.data_output_folder + "Greater_London_Const_Region"
shapefile_to_geojson(input_shp, output_geojson)
output_geojson_suffixed = output_geojson + ".geojson"
self.assertTrue(os.path.exists(output_geojson_suffixed) == 1, msg="Current working directory is: {0}".format(os.getcwd()) )
num_lines = sum(1 for line in open(output_geojson_suffixed))
self.assertTrue(num_lines == 136195)
self.assertTrue(validate_geojson_from_file(output_geojson_suffixed))
def kml2geojson_test(self):
""" Testing that CSV correctly translates to GeoJSON """
input_kml = self.data_input_folder + "placemark.kml"
output_geojson = self.data_output_folder + "placemark.geojson"
kml_to_geojson(input_kml, output_geojson)
num_lines = sum(1 for line in open(output_geojson))
self.assertTrue(os.path.exists(output_geojson) == 1, msg="Current working directory is: , {0}".format(os.getcwd()) )
self.assertTrue(num_lines == 19)
self.assertTrue(validate_geojson_from_file(output_geojson))
def geojson2mongo_test(self):
""" Testing that GeoJSON correctly gets put into the MongoDB instance database """
database = self.database
db = self.db
input_geojson = self.data_input_folder + "cupcakes.geojson"
self.assertTrue(validate_geojson_from_file(input_geojson))
feature_collection_to_mongodb(database, input_geojson, "testcupcakes")
feature_collection = db["testcupcakes"].find({ })
count = feature_collection.count()
test_collection = json_util.dumps(feature_collection)
self.assertTrue(count == 74, msg="Record count is " + str(count))
def geojsonurl2mongo_test(self):
""" Testing that GeoJSON correctly gets put into the MongoDB instance database """
database = self.database
db = self.db
input_geojson_url = "https://raw.githubusercontent.com/lyzidiamond/learn-geojson/master/geojson/cupcakes.geojson"
feature_collection_endpoint_to_mongodb(database, input_geojson_url, "testurlcupcakes")
feature_collection = db["testurlcupcakes"].find({ })
count = feature_collection.count()
test_collection = json_util.dumps(feature_collection)
self.assertTrue(count == 74, msg="Record count is " + str(count))
def tearDown(self):
self.db["testcupcakes"].drop();
self.db["testurlcupcakes"].drop();
if __name__ == '__main__':
unittest.main()
|
11568674
|
from __future__ import absolute_import
import unittest
import numpy as np
from sklearn.datasets import load_iris as load_data
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from scikitplot.cluster import plot_elbow_curve
class TestPlotElbow(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_data(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_n_clusters_in_clf(self):
np.random.seed(0)
class DummyClusterer:
def __init__(self):
pass
def fit(self):
pass
def fit_predict(self):
pass
clf = DummyClusterer()
self.assertRaises(TypeError, plot_elbow_curve, clf, self.X)
def test_cluster_ranges(self):
np.random.seed(0)
clf = KMeans()
plot_elbow_curve(clf, self.X, cluster_ranges=range(1, 10))
def test_ax(self):
np.random.seed(0)
clf = KMeans()
fig, ax = plt.subplots(1, 1)
out_ax = plot_elbow_curve(clf, self.X)
assert ax is not out_ax
out_ax = plot_elbow_curve(clf, self.X, ax=ax)
assert ax is out_ax
def test_n_jobs(self):
np.random.seed(0)
clf = KMeans()
plot_elbow_curve(clf, self.X, n_jobs=2)
def test_show_cluster_time(self):
np.random.seed(0)
clf = KMeans()
plot_elbow_curve(clf, self.X, show_cluster_time=False)
|
11568675
|
from __future__ import print_function
import sys
import glob
from vispy import app, visuals, scene
from vispy.scene import ViewBox
from vispy.scene.visuals import Markers, Line, XYZAxis
import vispy
vispy.app.use_app(backend_name="PyQt5", call_reuse=True)
import numpy as np
import mesh_lib
class canvasCreater:
def load_config(self):
"""
Reads desired measurements from "config.txt" file.
Structure described in file
"""
measurements = []
with open("config.txt", "r") as f:
for line in f:
if line.startswith("#"):
continue
if len(line) < 5:
continue
measurements.append(line.split())
return measurements
def debug(self, measurements, file_path, add_geodesic):
# Model object
model = mesh_lib.Model(file_path)
model_point_coordinates = model.get_coords()
canvas = scene.SceneCanvas(keys='interactive')
view = canvas.central_widget.add_view()
# all model - GREEN
points = Markers(parent=view.scene)
points.set_data(
pos=model_point_coordinates,
edge_color=None,
face_color=(0, 1, 0, .3),
size=5
)
for m in measurements: # measurements in config file
# parsing key vertexes and description text
point_1 = int(m[1]) + 1
point_2 = int(m[2]) + 1
point_3 = int(m[3]) + 1
text = " ".join(m[4:])
# coordinates of key vertexes
key_coords = model.get_coords((point_1, point_2, point_3))
# plane that goes through all three key vertexes
plane = mesh_lib.get_plane(key_coords)
# key vertexes WHITE
points = Markers()
points.set_data(
pos=key_coords,
edge_color=None,
face_color=(1, 1, 1, 1),
size=5
)
# "C" - circumference
if m[0] == "C":
# 3 segments of path (indexes)
p_1 = model.get_path(point_1, point_2)
p_2 = model.get_path(point_2, point_3)
p_3 = model.get_path(point_3, point_1)
# full path
path = p_1 + p_2[1:] + p_3[1:]
# "L" - Length
if m[0] == "L":
# 2 segments of path (indexes)
p_1 = model.get_path(point_1, point_2)
p_2 = model.get_path(point_2, point_3)
# full path
path = p_1 + p_2[1:]
# geodesic
geodesic_coordinates = model.get_coords(path)
geodesic_length = mesh_lib.get_length(geodesic_coordinates)
print("{0}:".format(text))
print(
" Geodesic distance: {0} cm".format(
round(100 * geodesic_length, 3)
)
)
if add_geodesic: # if debug_full
# geodesic line - RED
line = Line(parent=view.scene)
line.set_data(
pos=geodesic_coordinates,
color=(1, 0, 0, 1)
)
# approximated
flattened_coordinates = mesh_lib.get_projections(plane, geodesic_coordinates)
flattened_length = mesh_lib.get_length(flattened_coordinates)
print(
" Approximated distance: {0} cm".format(
round(100 * flattened_length, 3)
)
)
# flattened line - BLUE
line = Line(parent=view.scene)
line.set_data(
pos=flattened_coordinates,
color=(0, 0, 1, 1)
)
view.camera = 'turntable'
view.camera.fov = 45
view.camera.distance = 3
axis = XYZAxis(parent=view.scene)
return canvas
# app.run()
def calculate_all(self, measurements, models_folder, results_folder): # AAAAAAAllll of this
# for each *.obj file in models_folder
for model_path in glob.glob("{0}/*.obj".format(models_folder)):
# creating model object
model = mesh_lib.Model(model_path)
# parsing the filenames
model_name = model_path.split("\\")[1].split(".")[0]
results_file = "{0}/{1}.txt".format(results_folder, model_name)
# forming the results file
with open(results_file, "w") as fout:
### Segment almost identical to debug function
for m in measurements:
point_1 = int(m[1]) + 1
point_2 = int(m[2]) + 1
point_3 = int(m[3]) + 1
text = " ".join(m[4:])
key_coords = model.get_coords((point_1, point_2, point_3))
plane = mesh_lib.get_plane(key_coords)
if m[0] == "C":
p_1 = model.get_path(point_1, point_2)
p_2 = model.get_path(point_2, point_3)
p_3 = model.get_path(point_3, point_1)
path = p_1 + p_2[1:] + p_3[1:]
if m[0] == "L":
p_1 = model.get_path(point_1, point_2)
p_2 = model.get_path(point_2, point_3)
path = p_1 + p_2[1:]
geodesic_coordinates = model.get_coords(path)
flattened_coordinates = mesh_lib.get_projections(
plane,
geodesic_coordinates
)
flattened_length = mesh_lib.get_length(flattened_coordinates)
# output that is going to results file is formed here:
output = "{0}: {1} cm.\n".format(text, round(100 * flattened_length, 3))
# writing to file
fout.write(output)
# progress displayed by print messages
print("{0}.txt finished.".format(model_name))
if __name__ == "__main__":
# getting info from config file
canvasObj = canvasCreater()
measurements = canvasObj.load_config()
# folder names
m_f = "models"
r_f = "results"
# debug or all file calculations
if len(sys.argv) == 3 and sys.argv[1] == "debug":
canvasObj.debug(measurements, "{0}/{1}".format(m_f, sys.argv[2]), False)
elif len(sys.argv) == 3 and sys.argv[1] == "debug_full":
canvasObj.debug(measurements, "{0}/{1}".format(m_f, sys.argv[2]), True)
elif len(sys.argv) == 1:
canvasObj.calculate_all(measurements, m_f, r_f)
else:
print("Wrong input format, refer to instruction manual.")
|
11568684
|
from pagarme.resources import handler_request
from pagarme.resources.routes import bulk_anticipation_routes
def cancel(recipient_id, bulk_anticipation_id):
return \
handler_request.post(bulk_anticipation_routes.CANCEL_ANTICIPATION.format(recipient_id, bulk_anticipation_id))
def confirm(recipient_id, bulk_anticipation_id):
return \
handler_request.post(bulk_anticipation_routes.CONFIRM_ANTICIPATION.format(recipient_id, bulk_anticipation_id))
def create(recipient_id, dictionary):
return handler_request.post(bulk_anticipation_routes.BASE_URL.format(recipient_id), dictionary)
def delete(recipient_id, bulk_anticipation_id):
return \
handler_request.delete(bulk_anticipation_routes.DELETE_ANTICIPATION.format(recipient_id, bulk_anticipation_id))
def find_all(recipient_id):
return handler_request.get(bulk_anticipation_routes.GET_ALL_ANTICIPATIONS.format(recipient_id))
def limits(recipient_id, dictionary):
return handler_request.get(bulk_anticipation_routes.GET_ANTICIPATION_LIMITS.format(recipient_id), dictionary)
|
11568810
|
from ipaddress import ip_address
import pytest
from ocflib.infra.net import ipv4_to_ipv6
from ocflib.infra.net import ipv6_to_ipv4
from ocflib.infra.net import is_ocf_ip
from ocflib.infra.net import OCF_DNS_RESOLVER
from ocflib.infra.net import OCF_GATEWAY_V4
from ocflib.infra.net import OCF_GATEWAY_V6
from ocflib.infra.net import OCF_SUBNET_V4
from ocflib.infra.net import OCF_SUBNET_V6
TEST_IPV4_IPV6 = (
('192.168.3.11', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
('192.168.3.11', 'fdf8:f53e:61e4::18'),
('192.168.3.11', 'fc00:db20:35b:7399::5'),
)
def test_constants_are_sane():
assert OCF_DNS_RESOLVER in OCF_SUBNET_V4
assert OCF_GATEWAY_V4 in OCF_SUBNET_V4
assert OCF_GATEWAY_V6 in OCF_SUBNET_V6
@pytest.mark.parametrize('ipv4,ipv6', TEST_IPV4_IPV6)
def test_4to6(ipv4, ipv6):
assert ipv4_to_ipv6(ip_address(ipv4)) == ip_address(ipv6)
@pytest.mark.parametrize('ipv4', [
# string not ok
'172.16.17.32',
# wrong subnet
ip_address('192.168.127.12'),
# wrong version
ip_address('fdf8:f53e:61e4::18'),
])
def test_4to6_failure(ipv4):
with pytest.raises(AssertionError):
ipv4_to_ipv6(ipv4)
@pytest.mark.parametrize('ipv4,ipv6', TEST_IPV4_IPV6)
def test_6to4(ipv4, ipv6):
assert ipv6_to_ipv4(ip_address(ipv6)) == ip_address(ipv4)
@pytest.mark.parametrize('ipv6', [
# string not ok
'fdf8:f53e:61e4::18',
# wrong version
ip_address('192.168.127.12'),
# wrong subnet entirely
ip_address('fdf8:f53e:61e4::18:10'),
# not in compat subnet
ip_address('fc00:db20:35b:7399::5'),
])
def test_6to4_failure(ipv6):
with pytest.raises(AssertionError):
ipv6_to_ipv4(ipv6)
@pytest.mark.parametrize('ip,expected', [
(ip_address('172.16.17.32'), True),
(ip_address('172.16.31.10'), True),
(ip_address('172.16.17.32'), True),
(ip_address('fc00:db20:35b:7399::5'), True),
(ip_address('fdf8:f53e:61e4::18'), True),
(ip_address('fc00:db20:35b:7399::5'), True),
(ip_address('8.8.8.8'), False),
(ip_address('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'), False),
(ip_address('fdf8:f53e:61e4::18:10'), False),
])
def test_is_ocf_ip(ip, expected):
assert is_ocf_ip(ip) is expected
def test_is_ocf_ip_failure():
with pytest.raises(AssertionError):
is_ocf_ip('172.16.17.32')
|
11568818
|
from .lang_identifier import LangIdentifier, identify_lang, identify_topn_langs
from . import models
|
11568826
|
import MQTTConst as mqttConst
from machine import Timer
import json
import os
import _thread
class _basicJSONParser:
def setString(self, srcString):
self._rawString = srcString
self._dictionObject = None
def regenerateString(self):
return json.dumps(self._dictionaryObject)
def getAttributeValue(self, srcAttributeKey):
return self._dictionaryObject.get(srcAttributeKey)
def setAttributeValue(self, srcAttributeKey, srcAttributeValue):
self._dictionaryObject[srcAttributeKey] = srcAttributeValue
def validateJSON(self):
try:
self._dictionaryObject = json.loads(self._rawString)
except ValueError:
return False
return True
class deviceShadow:
def __init__(self, srcShadowName, srcIsPersistentSubscribe, srcShadowManager):
if srcShadowName is None or srcIsPersistentSubscribe is None or srcShadowManager is None:
raise TypeError("None type inputs detected.")
self._shadowName = srcShadowName
# Tool handler
self._shadowManagerHandler = srcShadowManager
self._basicJSONParserHandler = _basicJSONParser()
# Properties
self._isPersistentSubscribe = srcIsPersistentSubscribe
self._lastVersionInSync = -1 # -1 means not initialized
self._isGetSubscribed = False
self._isUpdateSubscribed = False
self._isDeleteSubscribed = False
self._shadowSubscribeCallbackTable = dict()
self._shadowSubscribeCallbackTable["get"] = None
self._shadowSubscribeCallbackTable["delete"] = None
self._shadowSubscribeCallbackTable["update"] = None
self._shadowSubscribeCallbackTable["delta"] = None
self._shadowSubscribeStatusTable = dict()
self._shadowSubscribeStatusTable["get"] = 0
self._shadowSubscribeStatusTable["delete"] = 0
self._shadowSubscribeStatusTable["update"] = 0
self._tokenPool = dict()
self._dataStructureLock = _thread.allocate_lock()
def _doNonPersistentUnsubscribe(self, currentAction):
self._shadowManagerHandler.shadowUnsubscribe(self._shadowName, currentAction)
def _generalCallback(self, client, userdata, message):
# In Py3.x, message.payload comes in as a bytes(string)
# json.loads needs a string input
self._dataStructureLock.acquire()
currentTopic = message.topic
currentAction = self._parseTopicAction(currentTopic) # get/delete/update/delta
currentType = self._parseTopicType(currentTopic) # accepted/rejected/delta
payloadUTF8String = message.payload.decode('utf-8')
# get/delete/update: Need to deal with token, timer and unsubscribe
if currentAction in ["get", "delete", "update"]:
# Check for token
self._basicJSONParserHandler.setString(payloadUTF8String)
if self._basicJSONParserHandler.validateJSON(): # Filter out invalid JSON
currentToken = self._basicJSONParserHandler.getAttributeValue(u"clientToken")
if currentToken is not None and currentToken in self._tokenPool.keys(): # Filter out JSON without the desired token
# Sync local version when it is an accepted response
if currentType == "accepted":
incomingVersion = self._basicJSONParserHandler.getAttributeValue(u"version")
# If it is get/update accepted response, we need to sync the local version
if incomingVersion is not None and incomingVersion > self._lastVersionInSync and currentAction != "delete":
self._lastVersionInSync = incomingVersion
# If it is a delete accepted, we need to reset the version
else:
self._lastVersionInSync = -1 # The version will always be synced for the next incoming delta/GU-accepted response
# Cancel the timer and clear the token
self._tokenPool[currentToken].cancel()
del self._tokenPool[currentToken]
# Need to unsubscribe?
self._shadowSubscribeStatusTable[currentAction] -= 1
if not self._isPersistentSubscribe and self._shadowSubscribeStatusTable.get(currentAction) <= 0:
self._shadowSubscribeStatusTable[currentAction] = 0
self._doNonPersistentUnsubscribe(currentAction)
# Custom callback
if self._shadowSubscribeCallbackTable.get(currentAction) is not None:
self._shadowManagerHandler.insertShadowCallback(self._shadowSubscribeCallbackTable[currentAction], payloadUTF8String, currentType, currentToken)
# delta: Watch for version
else:
currentType += "/" + self._parseTopicShadowName(currentTopic)
# Sync local version
self._basicJSONParserHandler.setString(payloadUTF8String)
if self._basicJSONParserHandler.validateJSON(): # Filter out JSON without version
incomingVersion = self._basicJSONParserHandler.getAttributeValue(u"version")
if incomingVersion is not None and incomingVersion > self._lastVersionInSync:
self._lastVersionInSync = incomingVersion
# Custom callback
if self._shadowSubscribeCallbackTable.get(currentAction) is not None:
self._shadowManagerHandler.insertShadowCallback(self._shadowSubscribeCallbackTable[currentAction], payloadUTF8String, currentType, None)
self._dataStructureLock.release()
def _parseTopicAction(self, srcTopic):
ret = None
fragments = srcTopic.decode('utf-8').split('/')
if fragments[5] == "delta":
ret = "delta"
else:
ret = fragments[4]
return ret
def _parseTopicType(self, srcTopic):
fragments = srcTopic.decode('utf-8').split('/')
return fragments[5]
def _parseTopicShadowName(self, srcTopic):
fragments = srcTopic.decode('utf-8').split('/')
return fragments[2]
def _timerHandler(self, args):
srcActionName = args[0]
srcToken = args[1]
self._dataStructureLock.acquire()
# Remove the token
del self._tokenPool[srcToken]
# Need to unsubscribe?
self._shadowSubscribeStatusTable[srcActionName] -= 1
if not self._isPersistentSubscribe and self._shadowSubscribeStatusTable.get(srcActionName) <= 0:
self._shadowSubscribeStatusTable[srcActionName] = 0
self._shadowManagerHandler.shadowUnsubscribe(self._shadowName, srcActionName)
# Notify time-out issue
if self._shadowSubscribeCallbackTable.get(srcActionName) is not None:
self._shadowSubscribeCallbackTable[srcActionName]("REQUEST TIME OUT", "timeout", srcToken)
self._dataStructureLock.release()
def shadowGet(self, srcCallback, srcTimeout):
self._dataStructureLock.acquire()
# Update callback data structure
self._shadowSubscribeCallbackTable["get"] = srcCallback
# Update number of pending feedback
self._shadowSubscribeStatusTable["get"] += 1
# clientToken
currentToken = mqttConst.UUID(bytes=os.urandom(16), version=4).urn[9:]
self._tokenPool[currentToken] = None
self._basicJSONParserHandler.setString("{}")
self._basicJSONParserHandler.validateJSON()
self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken)
currentPayload = self._basicJSONParserHandler.regenerateString()
self._dataStructureLock.release()
# Two subscriptions
if not self._isPersistentSubscribe or not self._isGetSubscribed:
self._shadowManagerHandler.shadowSubscribe(self._shadowName, "get", self._generalCallback)
self._isGetSubscribed = True
# One publish
self._shadowManagerHandler.shadowPublish(self._shadowName, "get", currentPayload)
# Start the timer
self._tokenPool[currentToken] = Timer.Alarm(self._timerHandler, srcTimeout,arg=("get", currentToken),periodic=False)
return currentToken
def shadowDelete(self, srcCallback, srcTimeout):
self._dataStructureLock.acquire()
# Update callback data structure
self._shadowSubscribeCallbackTable["delete"] = srcCallback
# Update number of pending feedback
self._shadowSubscribeStatusTable["delete"] += 1
# clientToken
currentToken = mqttConst.UUID(bytes=os.urandom(16), version=4).urn[9:]
self._tokenPool[currentToken] = None
self._basicJSONParserHandler.setString("{}")
self._basicJSONParserHandler.validateJSON()
self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken)
currentPayload = self._basicJSONParserHandler.regenerateString()
self._dataStructureLock.release()
# Two subscriptions
if not self._isPersistentSubscribe or not self._isDeleteSubscribed:
self._shadowManagerHandler.shadowSubscribe(self._shadowName, "delete", self._generalCallback)
self._isDeleteSubscribed = True
# One publish
self._shadowManagerHandler.shadowPublish(self._shadowName, "delete", currentPayload)
# Start the timer
self._tokenPool[currentToken] = Timer.Alarm(self._timerHandler,srcTimeout, arg=("delete", currentToken), periodic=False)
return currentToken
def shadowUpdate(self, srcJSONPayload, srcCallback, srcTimeout):
# Validate JSON
JSONPayloadWithToken = None
currentToken = None
self._basicJSONParserHandler.setString(srcJSONPayload)
if self._basicJSONParserHandler.validateJSON():
self._dataStructureLock.acquire()
# clientToken
currentToken = mqttConst.UUID(bytes=os.urandom(16), version=4).urn[9:]
self._tokenPool[currentToken] = None
self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken)
JSONPayloadWithToken = self._basicJSONParserHandler.regenerateString()
# Update callback data structure
self._shadowSubscribeCallbackTable["update"] = srcCallback
# Update number of pending feedback
self._shadowSubscribeStatusTable["update"] += 1
self._dataStructureLock.release()
# Two subscriptions
if not self._isPersistentSubscribe or not self._isUpdateSubscribed:
self._shadowManagerHandler.shadowSubscribe(self._shadowName, "update", self._generalCallback)
self._isUpdateSubscribed = True
# One publish
self._shadowManagerHandler.shadowPublish(self._shadowName, "update", JSONPayloadWithToken)
# Start the timer
self._tokenPool[currentToken] = Timer.Alarm(self._timerHandler, srcTimeout, arg=("update", currentToken), periodic=False)
else:
raise ValueError("Invalid JSON file.")
return currentToken
def shadowRegisterDeltaCallback(self, srcCallback):
self._dataStructureLock.acquire()
# Update callback data structure
self._shadowSubscribeCallbackTable["delta"] = srcCallback
self._dataStructureLock.release()
# One subscription
self._shadowManagerHandler.shadowSubscribe(self._shadowName, "delta", self._generalCallback)
def shadowUnregisterDeltaCallback(self):
self._dataStructureLock.acquire()
# Update callback data structure
del self._shadowSubscribeCallbackTable["delta"]
self._dataStructureLock.release()
# One unsubscription
self._shadowManagerHandler.shadowUnsubscribe(self._shadowName, "delta")
|
11568834
|
import azure.functions as func
from .globals import getLogger, EXTERNAL_DEPENDENCY_URL
from .utils import call_internal_api, call_external_api
logger = getLogger(__name__)
class FunctionLogic:
@classmethod
def run(cls, req: func.HttpRequest) -> func.HttpResponse:
"""Azure Function business logic
Args:
req([HttpRequest]): [Incoming HTTP request]
"""
logger.info("new invocation received")
# TRACK DEPENDENCY (IN-PROC):
# This long running code is logged as a dependency
# it uses a method decorator
call_internal_api(delay=3.0)
# TRACK DEPENDENCY (HTTP):
# This uses Opencensus' requests extension
call_external_api(url=EXTERNAL_DEPENDENCY_URL)
# TRACES (SEVERITY)
# create log entries with different severity levels (warning, exception)
logger.warning("log warning message")
try:
assert 1 == 0
except Exception as ex:
logger.exception(ex)
return func.HttpResponse(
"This HTTP triggered function executed successfully.",
status_code=200,
)
|
11568851
|
from classicML.api.models import BaseModel
from classicML.api.models import AveragedOneDependentEstimator
from classicML.api.models import AODE
from classicML.api.models import BackPropagationNeuralNetwork
from classicML.api.models import BPNN
from classicML.api.models import DecisionTreeClassifier
from classicML.api.models import LinearDiscriminantAnalysis
from classicML.api.models import LDA
from classicML.api.models import LogisticRegression
from classicML.api.models import NaiveBayesClassifier
from classicML.api.models import NB
from classicML.api.models import RadialBasisFunctionNetwork
from classicML.api.models import RBF
from classicML.api.models import SuperParentOneDependentEstimator
from classicML.api.models import SPODE
from classicML.api.models import SupportVectorClassifier
from classicML.api.models import SVC
|
11568916
|
from typing import Union, Tuple, List
import torch
from skrl.memories.torch import Memory # from .base import Memory
class CustomMemory(Memory):
def __init__(self, memory_size: int, num_envs: int = 1, device: Union[str, torch.device] = "cuda:0") -> None:
"""
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: 1)
:type num_envs: int, optional
:param device: Device on which a torch tensor is or will be allocated (default: "cuda:0")
:type device: str or torch.device, optional
"""
super().__init__(memory_size, num_envs, device)
def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[torch.Tensor]]:
"""Sample a batch from memory
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: 1)
:type mini_batches: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of torch.Tensor list
"""
# ================================
# - sample a batch from memory.
# It is possible to generate only the sampling indexes and call self.sample_by_index(...)
# ================================
|
11568945
|
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import TimestampType
from visions.types.date_time import DateTime
@DateTime.contains_op.register
def datetime_contains(sequence: DataFrame, state: dict) -> bool:
if len(sequence.schema) != 1:
return False
dtype = sequence.schema[0].dataType
return isinstance(dtype, TimestampType)
|
11568966
|
from invariances.model.cinn import ConditionalTransformer
def get_model(name):
# TODO: add the other models
_models = {
"alexnet_conv5_animals": lambda: ConditionalTransformer.from_pretrained("animals"),
}
return _models[name]()
|
11568968
|
from .prop import prop
from ramda.private.asserts import assert_equal
class TestObject:
def __init__(self, val):
self.val = val
test_object = TestObject("foo")
def prop_nocurry_test():
assert_equal(prop("val", test_object), "foo")
def prop_curry_test():
assert_equal(prop("val")(test_object), "foo")
def prop_nocurry_property_test():
assert_equal(prop("val")({"val": "foo"}), "foo")
|
11569008
|
import os
from pathlib import Path
import requests
from picktrue.meta import UA, ImageItem
from picktrue.utils import retry
def normalize_proxy_string(proxy):
if 'socks5' in proxy:
if 'socks5h' not in proxy:
proxy = proxy.replace('socks5', 'socks5h')
return proxy
def get_proxy(proxy_string=None):
if proxy_string is None:
return {}
proxy = normalize_proxy_string(proxy_string)
proxies = {
'proxies': {
'http': proxy,
'https': proxy,
}
}
return proxies
class DummySite:
@property
def dir_name(self):
raise NotImplementedError()
@property
def fetcher(self):
raise NotImplementedError()
@property
def tasks(self):
raise NotImplementedError()
class DummyFetcher:
def __init__(self, proxies=None):
self.session = requests.session()
if proxies is not None:
self.session.proxies = proxies
self.session.headers.update(UA)
@staticmethod
def _safe_name(name):
name = name.replace("/", " ")
name = name.replace("\\", " ")
name = name.strip()
name = name.replace(" ", '-')
return name
@staticmethod
def _safe_path(path):
return Path(path).absolute()
@retry()
def get(self, url, **kwargs):
"""
:rtype: requests.Response
"""
if 'timeout' in kwargs:
kwargs.pop('timeout')
return self.session.get(url, timeout=(2, 30), **kwargs)
def get_save_path(self, base_path, image_name, image: ImageItem):
save_path = os.path.join(
base_path,
image_name,
)
return save_path
def save(self, content, task_item):
"""
:type content: bytearray
:type task_item: picktrue.meta.TaskItem
"""
image = task_item.image
image_name = image.name
if callable(image.name):
image_name = image.name(image.url, content)
save_path = self.get_save_path(
base_path=task_item.base_save_path,
image_name=image_name,
image=image,
)
save_path = self._safe_path(save_path)
if os.path.exists(save_path):
return
with open(save_path, "wb") as f:
f.write(content)
f.flush()
|
11569054
|
import logging
import string
from collections import Counter
from typing import Dict, List, Any
import numpy as np
import pandas as pd
import torch
from transfer_nlp.common.tokenizers import CustomTokenizer
from transfer_nlp.embeddings.embeddings import Embedding
from transfer_nlp.loaders.loaders import DatasetSplits, DataFrameDataset
from transfer_nlp.loaders.vectorizers import Vectorizer
from transfer_nlp.loaders.vocabulary import Vocabulary, SequenceVocabulary
from transfer_nlp.plugins.config import register_plugin
from transfer_nlp.plugins.predictors import PredictorABC
logger = logging.getLogger(__name__)
# Vectorizer class
@register_plugin
class NewsVectorizer(Vectorizer):
def __init__(self, data_file: str, cutoff: int):
super().__init__(data_file=data_file)
self.cutoff = cutoff
self.tokenizer = CustomTokenizer()
df = pd.read_csv(data_file)
target_vocab = Vocabulary(add_unk=False)
for category in sorted(set(df.category)):
target_vocab.add_token(category)
word_counts = Counter()
max_title = 0
for title in df.title:
tokens = self.tokenizer.tokenize(text=title)
max_title = max(max_title, len(tokens))
for token in tokens:
if token not in string.punctuation:
word_counts[token] += 1
data_vocab = SequenceVocabulary()
for word, word_count in word_counts.items():
if word_count >= self.cutoff:
data_vocab.add_token(word)
self.data_vocab = data_vocab
self.target_vocab = target_vocab
self.max_title = max_title + 2
def vectorize(self, title: str) -> np.array:
tokens = self.tokenizer.tokenize(text=title)
indices = [self.data_vocab.begin_seq_index]
indices.extend(self.data_vocab.lookup_token(token)
for token in tokens)
indices.append(self.data_vocab.end_seq_index)
vector_length = self.max_title
out_vector = np.zeros(vector_length, dtype=np.int64)
out_vector[:len(indices)] = indices
out_vector[len(indices):] = self.data_vocab.mask_index
return out_vector
# Dataset class
@register_plugin
class NewsDataset(DatasetSplits):
def __init__(self, data_file: str, batch_size: int, vectorizer: Vectorizer):
self.df = pd.read_csv(data_file)
# preprocessing
self.vectorizer: Vectorizer = vectorizer
self.df['x_in'] = self.df.apply(lambda row: self.vectorizer.vectorize(row.title), axis=1)
self.df['y_target'] = self.df.apply(lambda row: self.vectorizer.target_vocab.lookup_token(row.category), axis=1)
train_df = self.df[self.df.split == 'train'][['x_in', 'y_target']]
val_df = self.df[self.df.split == 'val'][['x_in', 'y_target']]
test_df = self.df[self.df.split == 'test'][['x_in', 'y_target']]
super().__init__(train_set=DataFrameDataset(train_df), train_batch_size=batch_size,
val_set=DataFrameDataset(val_df), val_batch_size=batch_size,
test_set=DataFrameDataset(test_df), test_batch_size=batch_size)
@register_plugin
class NewsClassifier(torch.nn.Module):
def __init__(self, data: DatasetSplits, embedding_size: int, num_channels: int,
hidden_dim: int, dropout_p: float, padding_idx: int = 0, glove_path: str = None):
super(NewsClassifier, self).__init__()
self.num_embeddings = len(data.vectorizer.data_vocab)
self.num_classes = len(data.vectorizer.target_vocab)
self.num_channels: int = num_channels
self.embedding_size: int = embedding_size
self.hidden_dim: int = hidden_dim
self.padding_idx: int = padding_idx
if glove_path:
logger.info("Using pre-trained word embeddings...")
self.embeddings = Embedding(glove_filepath=glove_path, data=data).embeddings
self.embeddings = torch.from_numpy(self.embeddings).float()
glove_size = len(self.embeddings[0])
self.emb: torch.nn.Embedding = torch.nn.Embedding(embedding_dim=glove_size,
num_embeddings=self.num_embeddings,
padding_idx=self.padding_idx,
_weight=self.embeddings)
else:
logger.info("Not using pre-trained word embeddings...")
self.emb: torch.nn.Embedding = torch.nn.Embedding(embedding_dim=self.embedding_size,
num_embeddings=self.num_embeddings,
padding_idx=self.padding_idx)
self.convnet = torch.nn.Sequential(
torch.nn.Conv1d(in_channels=self.embedding_size,
out_channels=self.num_channels, kernel_size=3),
torch.nn.ELU(),
torch.nn.Conv1d(in_channels=self.num_channels, out_channels=self.num_channels,
kernel_size=3, stride=2),
torch.nn.ELU(),
torch.nn.Conv1d(in_channels=self.num_channels, out_channels=self.num_channels,
kernel_size=3, stride=1),
torch.nn.ELU(),
torch.nn.Conv1d(in_channels=self.num_channels, out_channels=self.num_channels,
kernel_size=3), # Experimental change from 3 to 2
torch.nn.ELU()
)
self._dropout_p: float = dropout_p
self.dropout = torch.nn.Dropout(p=dropout_p)
self.fc1: torch.nn.Linear = torch.nn.Linear(self.num_channels, self.hidden_dim)
self.fc2: torch.nn.Linear = torch.nn.Linear(self.hidden_dim, self.num_classes)
def forward(self, x_in: torch.Tensor, apply_softmax: bool = False) -> torch.Tensor:
"""
:param x_in: input data tensor
:param apply_softmax: flag for the softmax activation
should be false if used with the Cross Entropy losses
:return: the resulting tensor. tensor.shape should be (batch, num_classes)
"""
# embed and permute so features are channels
x_embedded = self.emb(x_in).permute(0, 2, 1)
features = self.convnet(x_embedded)
# average and remove the extra dimension
remaining_size = features.size(dim=2)
features = torch.nn.functional.avg_pool1d(features, remaining_size).squeeze(dim=2)
features = self.dropout(features)
# mlp classifier
intermediate_vector = torch.nn.functional.relu(self.dropout(self.fc1(features)))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = torch.nn.functional.softmax(prediction_vector, dim=1)
return prediction_vector
# Predictors
@register_plugin
class NewsPredictor(PredictorABC):
"""
Toy example: we want to make predictions on inputs of the form {"inputs": ["hello world", "foo", "bar"]}
"""
def __init__(self, data: DatasetSplits, model: torch.nn.Module):
super().__init__(vectorizer=data.vectorizer, model=model)
def json_to_data(self, input_json: Dict) -> Dict:
return {
'x_in': torch.LongTensor([self.vectorizer.vectorize(title=input_string) for input_string in input_json['inputs']])}
def output_to_json(self, outputs: List[Dict[str, Any]]) -> Dict[str, Any]:
return {
"outputs": outputs}
def decode(self, output: torch.tensor) -> List[Dict[str, Any]]:
probabilities = torch.nn.functional.softmax(output, dim=1)
probability_values, indices = probabilities.max(dim=1)
return [{
"class": self.vectorizer.target_vocab.lookup_index(index=int(res[1])),
"probability": float(res[0])} for res in zip(probability_values, indices)]
|
11569101
|
DEBUG = True
SECRET_KEY = 'insecurekeyfordev'
# Change localhost to your Docker Machine IP if you're using Docker Toolbox.
SERVER_NAME = 'localhost:8000'
|
11569115
|
from django.urls import path
from .apps import LibiAccountConfig
from .views import (
AccountView,
TokenView,
)
app_name = LibiAccountConfig.name
urlpatterns = [
path('', AccountView.as_view(), name='account'),
path('token', TokenView.as_view(), name='token'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.