id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1614524
|
import numpy as np
import pandas as pd
import pytest
import tabmat as tm
@pytest.fixture()
def X():
df = pd.read_pickle("tests/real_matrix.pkl")
X_split = tm.from_pandas(df, np.float64)
wts = np.ones(df.shape[0]) / df.shape[0]
X_std = X_split.standardize(wts, True, True)[0]
return X_std
def test_full_sandwich(X):
X_dense = tm.DenseMatrix(X.toarray())
r = np.random.rand(X.shape[0])
simple = X_dense.sandwich(r)
fancy = X.sandwich(r)
np.testing.assert_almost_equal(simple, fancy, 12)
def test_split_sandwich_rows_cols(X):
X_split = X.mat
X_split_dense = tm.DenseMatrix(X_split.toarray())
r = np.random.rand(X.shape[0])
rows = np.arange(X.shape[0])
cols = np.arange(X.shape[1])
simple = X_split_dense.sandwich(r, rows, cols)
fancy = X_split.sandwich(r, rows, cols)
np.testing.assert_almost_equal(simple, fancy, 12)
|
1614545
|
class Attr:
COURSES = "courses"
DEPT = "dept"
INSTRUCTORS = "instructors"
NAME = "name"
NUMBER = "number"
PATH = "path"
PAGES = "pages"
COURSE_SURVEY_TRANSPARENCY_PAGE_PATHS = [
"course_surveys_authentication",
"course_surveys_infrastructure",
"course_surveys_upload",
"course_surveys_visualization",
"course_surveys_database",
"course_surveys_management",
"course_surveys_permissions",
"course_surveys_search",
]
COURSE_SURVEY_PREFIX = "Course surveys "
|
1614557
|
import inspect
import re
from hashlib import sha256
from typing import List
from .csv import csv
from .json import json
from .pandas import pandas
from .parquet import parquet
from .text import text
def hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, hash_python_lines(inspect.getsource(parquet).splitlines())),
"text": (text.__name__, hash_python_lines(inspect.getsource(text).splitlines())),
}
_EXTENSION_TO_MODULE = {
"csv": "csv",
"tsv": "csv",
"json": "json",
"jsonl": "json",
"parquet": "parquet",
"txt": "text",
}
|
1614559
|
from itertools import count
import logging
import os
def iter_files(compilation_unit):
'''Yield all file paths in the given compilation unit.
Yield absolute paths if possible. Paths are not guaranteed to be unique.'''
topdie = compilation_unit.get_top_DIE()
def iter_files_raw():
# Yield base file (main source file, given as parameter to C compiler)
yield topdie.attributes['DW_AT_name'].value
# Get the DWARF line program
lineprog = compilation_unit.dwarfinfo.line_program_for_CU(compilation_unit)
# Yield other files (usually headers)
for file_entry in lineprog.header.file_entry:
filename = file_entry.name
# Add directory prefix
dir_idx = file_entry.dir_index
if dir_idx:
directory = lineprog.header.include_directory[dir_idx - 1]
filename = os.path.join(directory, filename)
yield filename
try:
basedir = topdie.attributes['DW_AT_comp_dir'].value
except KeyError:
# Base directory can not be
logging.warn('compilation unit %s uses relative file paths', topdie.attributes['DW_AT_name'].value)
basedir = '.'
# yield postprocessed paths
return (os.path.normpath(os.path.join(basedir, x)) for x in iter_files_raw())
class Locations(object):
def __init__(self, debuginfo):
self.debuginfo = debuginfo
self.locations = {}
self.id_gen = count()
# Map compilation units to the list of their paths
self.cufiles = { compilation_unit.cu_offset : list(iter_files(compilation_unit))
for compilation_unit in debuginfo.dwarf.iter_CUs() }
# Map file paths to their IDs, don't store duplicates
files = sorted(set(sum(self.cufiles.itervalues(), [])))
self.files = { path: idx for idx, path in enumerate(files) }
for each in self.files:
if not os.path.isfile(each):
logging.warn('source file does not exist: %s', each)
def get_source_file(self, compilation_unit, idx):
'''Get the path of the source file with the given ID from the compilation unit.
This method uses the original symbol indexes from the DWARF file. The returned paths are normalized and
converted to absolute (if possible).
The first parameter, compilation_unit, is the compilation unit object supplied by elftools.'''
return self.cufiles[compilation_unit.cu_offset][idx]
def insert_flc(self, filename, line, column):
'''Insert a new location (file-line-column) and return its ID in the database.
If the same location already exists in the database, the ID of the existing location is returned and no new
records are created.'''
flc = (self.files[filename], line, column)
try:
ret = self.locations[flc]
except KeyError:
ret = self.id_gen.next()
self.locations[flc] = ret
return ret
def insert_DIE_flc(self, die):
'''Insert a new location basing on the location attributes of the given DIE and return its ID in the database.
Internally, this method uses insert_flc.'''
def get_attr_val(what):
try:
return die.attributes[what].value
except KeyError:
return None
file_idx = get_attr_val('DW_AT_decl_file')
if file_idx is None:
return None
line = get_attr_val('DW_AT_decl_line') or 0
column = get_attr_val('DW_AT_decl_column') or 0
filename = self.get_source_file(die.cu, file_idx)
return self.insert_flc(filename, line, column)
def getfile(self, idx):
if idx is None:
return None
for k, v in self.files.iteritems():
if v == idx:
return k
raise KeyError
def get(self, idx):
if idx is None:
return (None, 0, 0)
for k, v in self.locations.iteritems():
if v == idx:
f, l, c = k
return self.getfile(f), l, c
raise KeyError
def getstr(self, idx):
e = [str(x) for x in self.get(idx) if x]
if e:
return ':'.join(e)
else:
return '<unknown location>'
def store(self, conn):
logging.debug('Storing %i files and %i locations.', len(self.files), len(self.locations))
# Store files
query = 'insert into files (path, id) values (?, ?)'
items = self.files.iteritems()
conn.executemany(query, items)
conn.commit()
# Store locations
query = 'insert into locations (id, file, line, col) values (?, ?, ?, ?)'
items = ((lid, file_id, line, col) for (file_id, line, col), lid in self.locations.iteritems())
conn.executemany(query, items)
conn.commit()
|
1614560
|
from .sink import AvroSink
from .source import AvroSource
from .serializer import AvroSerializer
from .deserializer import AvroDeserializer
__all__ = (
'AvroSink',
'AvroSource',
'AvroSerializer',
'AvroDeserializer',
)
|
1614583
|
import numpy as np
import scipy.linalg as spla
import __builtin__
try:
profile = __builtin__.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func): return func
def chol2inv(chol):
return spla.cho_solve((chol, False), np.eye(chol.shape[0]))
def matrixInverse(M):
return chol2inv(spla.cholesky(M, lower=False))
def PCA_reduce(X, Q):
"""
A helpful function for linearly reducing the dimensionality of the data X
to Q.
:param X: data array of size N (number of points) x D (dimensions)
:param Q: Number of latent dimensions, Q < D
:return: PCA projection array of size N x Q.
"""
assert Q <= X.shape[1], 'Cannot have more latent dimensions than observed'
evecs, evals = np.linalg.eigh(np.cov(X.T))
i = np.argsort(evecs)[::-1]
W = evals[:, i]
W = W[:, :Q]
return (X - X.mean(0)).dot(W)
class ObjectiveWrapper(object):
def __init__(self):
self.previous_x = None
def __call__(self, params, params_args, obj, idxs, alpha, prop_mode):
params_dict = unflatten_dict(params, params_args)
f, grad_dict = obj.objective_function(
params_dict, idxs, alpha=alpha, prop_mode=prop_mode)
g, _ = flatten_dict(grad_dict)
g_is_fin = np.isfinite(g)
if np.all(g_is_fin):
self.previous_x = params
return f, g
else:
print("Warning: inf or nan in gradient: replacing with zeros")
return f, np.where(g_is_fin, g, 0.)
# def objective_wrapper(params, params_args, obj, idxs, alpha):
# params_dict = unflatten_dict(params, params_args)
# f, grad_dict = obj.objective_function(
# params_dict, idxs, alpha=alpha)
# g, _ = flatten_dict(grad_dict)
# g_is_fin = np.isfinite(g)
# if np.all(g_is_fin):
# return f, g
# else:
# print("Warning: inf or nan in gradient: replacing with zeros")
# return f, np.where(g_is_fin, g, 0.)
def flatten_dict(params):
keys = params.keys()
shapes = {}
ind = np.zeros(len(keys), dtype=int)
vec = np.array([])
for i, key in enumerate(sorted(keys)):
val = params[key]
shapes[key] = val.shape
val_vec = val.ravel()
vec = np.concatenate((vec, val_vec))
ind[i] = val_vec.shape[0]
indices = np.cumsum(ind)[:-1]
return vec, (keys, indices, shapes)
def unflatten_dict(params, params_args):
keys, indices, shapes = params_args[0], params_args[1], params_args[2]
vals = np.split(params, indices)
params_dict = {}
for i, key in enumerate(sorted(keys)):
params_dict[key] = np.reshape(vals[i], shapes[key])
return params_dict
def adam(func, init_params, callback=None, maxiter=1000,
step_size=0.001, b1=0.9, b2=0.999, eps=1e-8,
args=None, disp=True, return_cost=False):
"""Adam as described in http://arxiv.org/pdf/1412.6980.pdf."""
x = init_params
m = np.zeros_like(x)
v = np.zeros_like(x)
fs = []
for i in range(maxiter):
f, g = func(x, *args)
if disp and i % 10 == 0:
print 'iter %d \t obj %.3f' % (i, f)
# print '%.3f' % f
if callback:
callback(x, i, args)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x = x - step_size * mhat / (np.sqrt(vhat) + eps)
fs.append(f)
if return_cost:
return x, np.array(fs)
else:
return x
|
1614603
|
from tests.utils import logging
from airflow_kubernetes_job_operator.kube_api import KubeResourceKind
from airflow_kubernetes_job_operator.kube_api import KubeApiRestClient
from airflow_kubernetes_job_operator.kube_api import GetPodLogs
from airflow_kubernetes_job_operator.kube_api import kube_logger
KubeResourceKind.register_global_kind(
KubeResourceKind("HCjob", "hc.dto.cbsinteractive.com/v1alpha1", parse_kind_state=KubeResourceKind.parse_state_job)
)
kube_logger.level = logging.DEBUG
client = KubeApiRestClient()
logger = GetPodLogs(
name="logs-tester",
namespace=client.get_default_namespace(),
follow=True,
)
logger.pipe_to_logger()
rslt = client.query_async(logger)
label = f"{logger.namespace}/{logger.name}"
logging.info(f"Waiting for logs @ {label}...")
logger.wait_until_running()
logging.info(f"Starting logs watch @ {label}...")
logger.join()
logging.info(f"Watch for logs ended @ {label}")
|
1614635
|
from objective_functions.recon import elbo_loss, sigmloss1dcentercrop
from unimodals.MVAE import LeNetEncoder, DeLeNet
from training_structures.MVAE_mixed import train_MVAE, test_MVAE
from datasets.avmnist.get_data import get_dataloader
import torch
from torch import nn
from unimodals.common_models import MLP
from fusions.MVAE import ProductOfExperts
import sys
import os
sys.path.append(os.getcwd())
traindata, validdata, testdata = get_dataloader(
'/data/yiwei/avmnist/_MFAS/avmnist')
classes = 10
n_latent = 200
fuse = ProductOfExperts((1, 40, n_latent))
channels = 6
encoders = [LeNetEncoder(1, channels, 3, n_latent).cuda(
), LeNetEncoder(1, channels, 5, n_latent).cuda()]
decoders = [DeLeNet(1, channels, 3, n_latent).cuda(),
DeLeNet(1, channels, 5, n_latent).cuda()]
head = MLP(n_latent, 40, classes).cuda()
elbo = elbo_loss([sigmloss1dcentercrop(28, 34),
sigmloss1dcentercrop(112, 130)], [1.0, 1.0], 0.0)
train_MVAE(encoders, decoders, head, fuse, traindata, validdata, elbo, 20)
mvae = torch.load('best1.pt')
head = torch.load('best2.pt')
test_MVAE(mvae, head, testdata)
|
1614658
|
from .gradient_descent_2d import GradientDescent2D
class Momentum2D(GradientDescent2D):
def __init__(self, alpha=3e-2, max_iterations=150, start_point=1.0,
epsilon=1e-3, momentum=0.3, random=False):
self.momentum = momentum
super().__init__(alpha=alpha, max_iterations=max_iterations,
start_point=start_point, epsilon=epsilon, random=random)
def printStats(self):
print('=' * 80)
print('[INFO]\t\tHyperparameters for Momentum 2D')
print('=' * 80)
print(f'[INFO] Learning Rate: {self.alpha}')
print(f'[INFO] Maximum Iterations: {self.maxIterations}')
print(f'[INFO] Starting Point of x: {self.x}')
print(f'[INFO] Epsilon for checking convergence: {self.epsilon}')
print(f'[INFO] Momentum: {self.momentum}')
print('=' * 80)
def run(self):
# log the starting point of x
self.history['x'].append(self.x)
change = 0.0
for i in range(self.maxIterations):
# keeping track of prev X for checking convergence
self.prevX = self.x
gradient = self.grad_f(self.x)
newChange = self.alpha * gradient + self.momentum * change
self.x = self.x - (newChange)
change = newChange
# log metrics
self.history['x'].append(self.x)
self.history['grads'].append(gradient)
if self.isConverged():
print('[INFO] Gradient Descent using Momentum converged at iteration', i + 1)
break
|
1614665
|
import sys
if sys.version_info >= (3, 0):
from .diff_match_patch import __author__, __doc__, diff_match_patch, patch_obj
else:
from .diff_match_patch_py2 import __author__, __doc__, diff_match_patch, patch_obj
__version__ = "20200713"
__packager__ = "<NAME> (<EMAIL>)"
|
1614671
|
import argparse
import os
from functools import partial
from multiprocessing.pool import Pool
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from tqdm import tqdm
import cv2
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
from preprocessing.utils import get_original_video_paths
from PIL import Image
from facenet_pytorch.models.mtcnn import MTCNN
import numpy as np
detector = MTCNN(margin=0, thresholds=[0.65, 0.75, 0.75], device="cpu")
def save_landmarks(ori_id, root_dir):
ori_id = ori_id[:-4]
ori_dir = os.path.join(root_dir, "crops", ori_id)
landmark_dir = os.path.join(root_dir, "landmarks", ori_id)
os.makedirs(landmark_dir, exist_ok=True)
for frame in range(320):
if frame % 10 != 0:
continue
for actor in range(2):
image_id = "{}_{}.png".format(frame, actor)
landmarks_id = "{}_{}".format(frame, actor)
ori_path = os.path.join(ori_dir, image_id)
landmark_path = os.path.join(landmark_dir, landmarks_id)
if os.path.exists(ori_path):
try:
image_ori = cv2.imread(ori_path, cv2.IMREAD_COLOR)[...,::-1]
frame_img = Image.fromarray(image_ori)
batch_boxes, conf, landmarks = detector.detect(frame_img, landmarks=True)
if landmarks is not None:
landmarks = np.around(landmarks[0]).astype(np.int16)
np.save(landmark_path, landmarks)
except Exception as e:
print(e)
pass
def parse_args():
parser = argparse.ArgumentParser(
description="Extract image landmarks")
parser.add_argument("--root-dir", help="root directory", default="/mnt/sota/datasets/deepfake")
args = parser.parse_args()
return args
def main():
args = parse_args()
ids = get_original_video_paths(args.root_dir, basename=True)
os.makedirs(os.path.join(args.root_dir, "landmarks"), exist_ok=True)
with Pool(processes=os.cpu_count()) as p:
with tqdm(total=len(ids)) as pbar:
func = partial(save_landmarks, root_dir=args.root_dir)
for v in p.imap_unordered(func, ids):
pbar.update()
if __name__ == '__main__':
main()
|
1614672
|
from __future__ import annotations
from typing import Dict, List, Optional, Tuple, Union
import warp.yul.ast as ast
from warp.yul.AstMapper import AstMapper
Scope = Dict[str, Optional[ast.Literal]]
class VariableInliner(AstMapper):
"""This class inlines the value of variables by tracking their values
across declarations and assignments.
"""
def __init__(self):
super().__init__()
self.scope: List[Scope] = []
def visit_block(self, node: ast.Block) -> ast.Block:
self.scope.append(dict())
visited_node = super().visit_block(node)
self.scope.pop()
return visited_node
def visit_variable_declaration(
self, node: ast.VariableDeclaration
) -> ast.VariableDeclaration:
current_scope = self.scope[-1]
if isinstance(node.value, ast.Literal):
assert len(node.variables) == 1
current_scope[node.variables[0].name] = node.value
elif isinstance(node.value, ast.Identifier):
assert len(node.variables) == 1
current_scope[node.variables[0].name] = self._val_lookup(node.value.name)
elif node.value is None:
for var in node.variables:
current_scope[var.name] = ast.Literal(0)
else:
assert isinstance(node.value, ast.FunctionCall)
return ast.VariableDeclaration(
variables=node.variables,
value=self.visit(node.value) if node.value is not None else None,
)
def visit_assignment(self, node: ast.Assignment) -> ast.Assignment:
if isinstance(node.value, ast.Literal):
assert len(node.variable_names) == 1
var_name = node.variable_names[0].name
self._assign_value(var_name, node.value)
elif isinstance(node.value, ast.Identifier):
assert len(node.variable_names) == 1
var_name = node.variable_names[0].name
ident_value = self._val_lookup(node.value.name)
self._assign_value(var_name, ident_value)
else:
assert isinstance(node.value, ast.FunctionCall)
for var in node.variable_names:
# Can't calculate the value, invalidate the old one
self._assign_value(var.name, None)
return ast.Assignment(
variable_names=node.variable_names,
value=self.visit(node.value),
)
def visit_identifier(
self, node: ast.Identifier
) -> Union[ast.Literal, ast.Identifier]:
return self._val_lookup(node.name) or node
def _info_lookup(self, var_name: str) -> Optional[Tuple[ast.Literal, Scope]]:
for scope in reversed(self.scope):
value = scope.get(var_name)
if value:
return (value, scope)
return None
def _val_lookup(self, var_name: str) -> Optional[ast.Literal]:
val_and_scope = self._info_lookup(var_name)
if val_and_scope:
return val_and_scope[0]
return None
def _scope_lookup(self, var_name: str) -> Optional[Scope]:
val_and_scope = self._info_lookup(var_name)
if val_and_scope:
return val_and_scope[1]
return None
def _assign_value(self, var_name: str, value: Optional[ast.Literal]):
definition_scope = self._scope_lookup(var_name)
if definition_scope:
definition_scope[var_name] = None
self.scope[-1][var_name] = value
|
1614715
|
import pytest
from spacy.cli.project.run import project_run
from spacy.cli.project.assets import project_assets
from pathlib import Path
@pytest.mark.skip(reason="Import currently fails")
def test_fastapi_project():
root = Path(__file__).parent
project_assets(root)
project_run(root, "install", capture=True)
# This is ugly, but we only have the dependency here
from fastapi.testclient import TestClient
from scripts.main import app, ModelName
model_names = [model.value for model in ModelName]
assert model_names
client = TestClient(app)
response = client.get("/models")
assert response.status_code == 200
assert response.json() == model_names
articles = [{"text": "This is a text"}, {"text": "This is another text"}]
data = {"articles": articles, "model": model_names[0]}
response = client.post("/process/", json=data)
assert response.status_code == 200
result = response.json()["result"]
assert len(result) == len(articles)
assert [{"text": entry["text"]} for entry in result] == articles
assert all("ents" in entry for entry in result)
|
1614719
|
import numpy as np
import pandas as pd
class CellDischargeData:
"""
Battery cell data from discharge test.
"""
def __init__(self, path):
"""
Initialize with path to discharge data file.
Parameters
----------
path : str
Path to discharge data file.
Attributes
----------
time : vector
Time vector for battery test data [s]
current : vector
Current from battery during test [A]
voltage : vector
Voltage from battery during test [V]
data : vector
Data flags from battery test [-]
dt : vector
Time step [s]
"""
df = pd.read_csv(path)
self.time = df['Time(s)'].values
self.current = df['Current(A)'].values
self.voltage = df['Voltage(V)'].values
self.data = df['Data'].fillna(' ').values
self.ti = 0
self.tf = 0
def get_ids(self):
"""
Find indices in data that represent the `S` flag. Start and stop
procedures in the experiment are depicted by the `S` flag.
Returns
-------
ids : vector
Indices of start and stop points in data.
"""
ids = np.where(self.data == 'S')[0]
return ids
def get_idx(self):
"""
Find indices in discharge data that represent a single section.
Returns
-------
id0, id1, id2, id3 : tuple
Indices representing section of discharge data.
id0 = start of discharge
id1 = end of discharge
id2 = start of charge
id3 = end of charge
"""
ids = self.get_ids()
if max(abs(self.current)) > 35:
# 2c and 3c discharge tests
id0 = ids[3]
id1 = ids[4]
id2 = ids[5]
id3 = ids[6]
else:
# 1c discharge test
id0 = ids[2]
id1 = ids[3]
id2 = ids[4]
id3 = ids[5]
return id0, id1, id2, id3
@classmethod
def process(cls, path):
"""
Process the original discharge data for one section. This section of
data is used for model development.
"""
data = cls(path)
id0, id1, id2, id3 = data.get_idx()
data.ti = data.time[id0]
data.tf = data.time[id2]
data.current = data.current[id0:id2 + 1]
data.voltage = data.voltage[id0:id2 + 1]
data.time = data.time[id0:id2 + 1] - data.time[id0:id2 + 1].min()
return data
@classmethod
def process_discharge_only(cls, path):
"""
Process the original discharge data for just the discharge portion.
"""
data = cls(path)
id0, id1, _, _ = data.get_idx()
data.ti = data.time[id0]
data.tf = data.time[id1]
data.current = data.current[id0:id1 + 1]
data.voltage = data.voltage[id0:id1 + 1]
data.time = data.time[id0:id1 + 1] - data.time[id0:id1 + 1][0]
return data
|
1614749
|
import unittest
import pubsub
class TestPubSub(unittest.TestCase):
def test_subscribe(self):
sub = pubsub.subscribe('test')
pubsub.publish('test', 'hello world')
self.assertEqual(next(sub.listen())['data'], 'hello world')
def test_unsubscribe(self):
sub = pubsub.subscribe('test')
pubsub.publish('test', 'hello world 1')
sub.unsubscribe()
pubsub.publish('test', 'hello world 2')
msgs = list(sub.listen(block=False))
self.assertEqual(len(msgs), 1)
self.assertEqual(msgs[0]['data'], 'hello world 1')
if __name__ == '__main__':
unittest.main()
|
1614766
|
import densecap.util
def test_IoU_overlap():
box0 = (0, 0, 100, 100)
box1 = (50, 50, 100, 100)
box2 = (0, 50, 100, 100)
box3 = (50, 0, 100, 100)
assert densecap.util.iou(box0, box1) == 50 ** 2 / (2 * 100 ** 2 - 50 ** 2)
assert densecap.util.iou(box0, box2) == (50 * 100) / (2 * 100 ** 2 - 50 * 100)
assert densecap.util.iou(box0, box3) == (50 * 100) / (2 * 100 ** 2 - 50 * 100)
def test_IoU_no_overlap():
box1 = (0, 0, 100, 100)
box2 = (100, 100, 20, 20)
box3 = (0, 120, 40, 40)
assert densecap.util.iou(box1, box2) == 0.0
assert densecap.util.iou(box1, box3) == 0.0
def test_IoU_inside():
box1 = (0, 0, 100, 100)
box2 = (10, 10, 20, 20)
box3 = (0, 30, 40, 40)
assert densecap.util.iou(box1, box2) == 20 ** 2 / 100 ** 2
assert densecap.util.iou(box2, box1) == 20 ** 2 / 100 ** 2
assert densecap.util.iou(box3, box1) == 40 ** 2 / 100 ** 2
def test_IoU_with_self():
box1 = (0, 0, 10, 10)
assert densecap.util.iou(box1, box1) == 1.0
|
1614787
|
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
#import data
data = pd.read_csv('airline_tweets.csv')
print(data)
#show one column
print(data['text'])
#show two columns
print(data[['text','airline_sentiment']])
#show one record
print(data.loc[0,:])
#show some more records
print(data.loc[0:5,:])
#show one column of one record
print(data.loc[0,'text'])
#select columns
data = data[['tweet_id','text','airline_sentiment','airline']]
print(data)
#convert sentiment into numbers
def sentiment2int(sentiment):
if sentiment == 'positive':
return 1
elif sentiment == 'neutral':
return 0
elif sentiment == 'negative':
return -1
else:
return np.NaN
data['rating'] = data['airline_sentiment'].apply(sentiment2int)
print(data)
#alternatively, use encoder
encoder = LabelEncoder()
encoder.fit(data['airline_sentiment'])
data['encoded'] = encoder.transform(data['airline_sentiment'])
print(data)
#average sentiment of airlines
filter = data['airline'] == 'Virgin America'
virgin = data[filter]
print(virgin['rating'].mean())
#tfidf
vectorizer = TfidfVectorizer(min_df=3, stop_words='english',ngram_range=(1, 2))
vectorizer.fit(data['text'])
X = vectorizer.transform(data['text'])
#get labels
y = np.array(data['rating'])
print(X)
print(X.shape)
print(y)
print(y.shape)
#test train split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, stratify=y, random_state=1234)
print(X_train.shape)
print(X_test.shape)
#naive bayes
nb = MultinomialNB()
nb.fit(X_train,y_train)
print(nb.score(X_test,y_test))
nb_preds = nb.predict(X_test)
print(nb_preds)
#logistic regression
lr = LogisticRegression(penalty='l1',C=1)
lr.fit(X_train,y_train)
print(lr.score(X_test,y_test))
lr_preds = lr.predict(X_test)
print(lr_preds)
#random forest
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train,y_train)
print(rf.score(X_test,y_test))
rf_preds = rf.predict(X_test)
print(rf_preds)
|
1614799
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import (
CONF_ID,
CONF_TEMPERATURE,
DEVICE_CLASS_TEMPERATURE,
ICON_BRIEFCASE_DOWNLOAD,
STATE_CLASS_MEASUREMENT,
UNIT_METER_PER_SECOND_SQUARED,
ICON_SCREEN_ROTATION,
UNIT_DEGREE_PER_SECOND,
UNIT_CELSIUS,
)
CODEOWNERS = ["@fabaff"]
DEPENDENCIES = ["i2c"]
CONF_ACCEL_X = "accel_x"
CONF_ACCEL_Y = "accel_y"
CONF_ACCEL_Z = "accel_z"
CONF_GYRO_X = "gyro_x"
CONF_GYRO_Y = "gyro_y"
CONF_GYRO_Z = "gyro_z"
mpu6886_ns = cg.esphome_ns.namespace("mpu6886")
MPU6886Component = mpu6886_ns.class_(
"MPU6886Component", cg.PollingComponent, i2c.I2CDevice
)
accel_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_METER_PER_SECOND_SQUARED,
icon=ICON_BRIEFCASE_DOWNLOAD,
accuracy_decimals=2,
state_class=STATE_CLASS_MEASUREMENT,
)
gyro_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_DEGREE_PER_SECOND,
icon=ICON_SCREEN_ROTATION,
accuracy_decimals=2,
state_class=STATE_CLASS_MEASUREMENT,
)
temperature_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_CELSIUS,
accuracy_decimals=1,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(MPU6886Component),
cv.Optional(CONF_ACCEL_X): accel_schema,
cv.Optional(CONF_ACCEL_Y): accel_schema,
cv.Optional(CONF_ACCEL_Z): accel_schema,
cv.Optional(CONF_GYRO_X): gyro_schema,
cv.Optional(CONF_GYRO_Y): gyro_schema,
cv.Optional(CONF_GYRO_Z): gyro_schema,
cv.Optional(CONF_TEMPERATURE): temperature_schema,
}
)
.extend(cv.polling_component_schema("60s"))
.extend(i2c.i2c_device_schema(0x68))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
for d in ["x", "y", "z"]:
accel_key = f"accel_{d}"
if accel_key in config:
sens = await sensor.new_sensor(config[accel_key])
cg.add(getattr(var, f"set_accel_{d}_sensor")(sens))
accel_key = f"gyro_{d}"
if accel_key in config:
sens = await sensor.new_sensor(config[accel_key])
cg.add(getattr(var, f"set_gyro_{d}_sensor")(sens))
if CONF_TEMPERATURE in config:
sens = await sensor.new_sensor(config[CONF_TEMPERATURE])
cg.add(var.set_temperature_sensor(sens))
|
1614804
|
from .organizations import ActionBatchOrganizations
from .networks import ActionBatchNetworks
from .devices import ActionBatchDevices
from .appliance import ActionBatchAppliance
from .camera import ActionBatchCamera
from .cellularGateway import ActionBatchCellularGateway
from .insight import ActionBatchInsight
from .sm import ActionBatchSm
from .switch import ActionBatchSwitch
from .wireless import ActionBatchWireless
# Batch class
class Batch:
def __init__(self):
# Action Batch helper API endpoints by section
self.organizations = ActionBatchOrganizations()
self.networks = ActionBatchNetworks()
self.devices = ActionBatchDevices()
self.appliance = ActionBatchAppliance()
self.camera = ActionBatchCamera()
self.cellularGateway = ActionBatchCellularGateway()
self.insight = ActionBatchInsight()
self.sm = ActionBatchSm()
self.switch = ActionBatchSwitch()
self.wireless = ActionBatchWireless()
|
1614813
|
from retrieval.elastic_reranking_retriever import ElasticRerankingRetriever
import click
@click.command()
@click.option('--delete/--no-delete', type=bool, help='')
@click.option('--load/--no-load', type=bool, help='')
@click.option('--search', default='', type=str, help='')
def run(delete, load, search):
ret = ElasticRerankingRetriever('tcp://localhost:8786')
if load:
ret.build_index('/ssd/ankur/contracts/c_pdfs.parquet', '/ssd/ankur/contracts/c_sections.parquet')
if delete:
ret.delete(dataset_id='o')
if search != '':
results = ret.search(search)[:10]
for ind, result in enumerate(results):
c = result['context']
print(f'{ind}. Score: {result["score"]} Contexts: {c if len(c) >= 150 else c}')
if __name__ == '__main__':
run()
|
1614827
|
from conans import ConanFile, CMake, tools
import os
class OverpeekEngineConan(ConanFile):
name = "overpeek-engine"
version = "0.1"
license = "MIT"
author = "<NAME>"
url = "https://github.com/Overpeek/overpeek-engine"
description = "A minimal 2D game engine/library."
topics = ("game-engine", "cpp17", "opengl")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "oe_build_tests": [
True, False], "oe_build_mode": ["opengl", "shaderc", "vulkan"]}
generators = "cmake"
requires = "glad/0.1.33", "box2d/2.4.0", "libzip/1.7.3", "enet/1.3.16", "ms-gsl/3.1.0", "entt/3.5.2", "fmt/7.0.3", "spdlog/1.8.0", "stb/20200203", "minimp3/20200304", "glm/0.9.9.5", "gcem/1.12.0", "nlohmann_json/3.9.1"
default_options = {"shared": False, "oe_build_tests": True, "oe_build_mode": "opengl",
"glad:gl_version": "4.6", "libzip:crypto": False, "fmt:header_only": True, "spdlog:header_only": True}
keep_imports = True
exports_sources = "*"
def requirements(self):
# Or add a new requirement!
if self.options.oe_build_mode == "shaderc" or self.options.oe_build_mode == "vulkan":
self.requires("shaderc/2019.0")
if self.options.oe_build_mode == "vulkan":
self.requires("vulkan-headers/1.1.101.0")
if self.settings.os != "Emscripten":
self.requires("glfw/3.3.2")
def source(self):
# This small hack might be useful to guarantee proper /MT /MD linkage
# in MSVC if the packaged project doesn't have variables to set it
# properly
tools.replace_in_file("CMakeLists.txt",
'include(${CMAKE_CURRENT_BINARY_DIR}/conanbuildinfo.cmake)',
'include(conanbuildinfo.cmake)')
def build(self):
cmake = CMake(self)
defs = {"OE_BUILD_MODE": 0, "OE_BUILD_TESTS": True}
if self.options.oe_build_mode == "vulkan":
defs["OE_BUILD_MODE"] = 2
elif self.options.oe_build_mode == "shaderc":
defs["OE_BUILD_MODE"] = 1
defs["OE_BUILD_TESTS"] = self.options.oe_build_tests
cmake.configure(defs=defs)
cmake.build()
if tools.get_env("OE_RUN_TESTS", False):
self.run(
"cd tests && ctest -j{} --output-on-failure".format(tools.cpu_count()))
def imports(self):
""" self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dll", dst="tests", keep_path=False)"""
self.copy("*/license*", dst="licenses", folder=True, ignore_case=True)
def package(self):
self.copy("*.hpp", dst="include/engine", src="src/engine")
self.copy("bin/*", dst="bin", keep_path=False)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["engine"]
|
1614891
|
from solver import *
from armatures import *
from models import *
import numpy as np
import config
np.random.seed(20160923)
pose_glb = np.zeros([1, 3]) # global rotation
########################## mano settings #########################
n_pose = 12 # number of pose pca coefficients, in mano the maximum is 45
n_shape = 10 # number of shape pca coefficients
pose_pca = np.random.normal(size=n_pose)
shape = np.random.normal(size=n_shape)
mesh = KinematicModel(config.MANO_MODEL_PATH, MANOArmature, scale=1000)
########################## smpl settings ##########################
# note that in smpl and smpl-h no pca for pose is provided
# therefore in the model we fake an identity matrix as the pca coefficients
# to make the code compatible
# n_pose = 23 * 3 # degrees of freedom, (n_joints - 1) * 3
# n_shape = 10
# pose_pca = np.random.uniform(-0.2, 0.2, size=n_pose)
# shape = np.random.normal(size=n_shape)
# mesh = KinematicModel(config.SMPL_MODEL_PATH, SMPLArmature, scale=10)
########################## smpl-h settings ##########################
# n_pose = 51 * 3
# n_shape = 16
# pose_pca = np.random.uniform(-0.2, 0.2, size=n_pose)
# shape = np.random.normal(size=n_shape)
# mesh = KinematicModel(config.SMPLH_MODEL_PATH, SMPLHArmature, scale=10)
########################## solving example ############################
wrapper = KinematicPCAWrapper(mesh, n_pose=n_pose)
solver = Solver(verbose=True)
_, keypoints = \
mesh.set_params(pose_pca=pose_pca, pose_glb=pose_glb, shape=shape)
params_est = solver.solve(wrapper, keypoints)
shape_est, pose_pca_est, pose_glb_est = wrapper.decode(params_est)
print('----------------------------------------------------------------------')
print('ground truth parameters')
print('pose pca coefficients:', pose_pca)
print('pose global rotation:', pose_glb)
print('shape: pca coefficients:', shape)
print('----------------------------------------------------------------------')
print('estimated parameters')
print('pose pca coefficients:', pose_pca_est)
print('pose global rotation:', pose_glb_est)
print('shape: pca coefficients:', shape_est)
mesh.set_params(pose_pca=pose_pca)
mesh.save_obj('./gt.obj')
mesh.set_params(pose_pca=pose_pca_est)
mesh.save_obj('./est.obj')
print('ground truth and estimated meshes are saved into gt.obj and est.obj')
|
1614927
|
import unittest
from tda.orders.common import *
from tda.orders.equities import *
from .utils import has_diff, no_duplicates
import imp
from unittest.mock import patch
class EquityOrderBuilderLegacy(unittest.TestCase):
def test_import_EquityOrderBuilder(self):
import sys
assert sys.version_info[0] == 3
if sys.version_info[1] >= 7:
with self.assertRaisesRegex(
ImportError, 'EquityOrderBuilder has been deleted'):
from tda.orders import EquityOrderBuilder
def test_import_EquityOrderBuilder_pre_3_7(self):
import sys
assert sys.version_info[0] == 3
if sys.version_info[1] < 7:
from tda import orders
imp.reload(orders)
from tda.orders import EquityOrderBuilder
with self.assertRaisesRegex(NotImplementedError,
'EquityOrderBuilder has been deleted'):
EquityOrderBuilder('args')
def test_other_import(self):
with self.assertRaisesRegex(ImportError, 'bogus'):
from tda.orders import bogus
def test_attribute_access(self):
with self.assertRaisesRegex(AttributeError, 'bogus'):
import tda
print(tda.orders.bogus)
class BuilderTemplates(unittest.TestCase):
def test_equity_buy_market(self):
self.assertFalse(has_diff({
'orderType': 'MARKET',
'session': 'NORMAL',
'duration': 'DAY',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'BUY',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_buy_market('GOOG', 10).build()))
def test_equity_buy_limit(self):
self.assertFalse(has_diff({
'orderType': 'LIMIT',
'session': 'NORMAL',
'duration': 'DAY',
'price': '199.99',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'BUY',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_buy_limit('GOOG', 10, 199.99).build()))
def test_equity_sell_market(self):
self.assertFalse(has_diff({
'orderType': 'MARKET',
'session': 'NORMAL',
'duration': 'DAY',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'SELL',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_sell_market('GOOG', 10).build()))
def test_equity_sell_limit(self):
self.assertFalse(has_diff({
'orderType': 'LIMIT',
'session': 'NORMAL',
'duration': 'DAY',
'price': '199.99',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'SELL',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_sell_limit('GOOG', 10, 199.99).build()))
def test_equity_sell_short_market(self):
self.assertFalse(has_diff({
'orderType': 'MARKET',
'session': 'NORMAL',
'duration': 'DAY',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'SELL_SHORT',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_sell_short_market('GOOG', 10).build()))
def test_equity_sell_short_limit(self):
self.assertFalse(has_diff({
'orderType': 'LIMIT',
'session': 'NORMAL',
'duration': 'DAY',
'price': '199.99',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'SELL_SHORT',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_sell_short_limit('GOOG', 10, 199.99).build()))
def test_equity_buy_to_cover_market(self):
self.assertFalse(has_diff({
'orderType': 'MARKET',
'session': 'NORMAL',
'duration': 'DAY',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'BUY_TO_COVER',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_buy_to_cover_market('GOOG', 10).build()))
def test_equity_buy_to_cover_limit(self):
self.assertFalse(has_diff({
'orderType': 'LIMIT',
'session': 'NORMAL',
'duration': 'DAY',
'price': '199.99',
'orderStrategyType': 'SINGLE',
'orderLegCollection': [{
'instruction': 'BUY_TO_COVER',
'quantity': 10,
'instrument': {
'symbol': 'GOOG',
'assetType': 'EQUITY',
}
}]
}, equity_buy_to_cover_limit('GOOG', 10, 199.99).build()))
|
1614934
|
import re
import logging
from python_terraform import Terraform, IsFlagged
from terrestrial.errors import TerrestrialFatalError
from .tfconfig import TerraformConfig
KWARGS_MAPPING = {
'plan': {
'input': False
},
'apply': {
'input': False,
'auto_approve': True
},
'destroy': {
'input': False,
'auto_approve': True
}
}
class TerraformWorker:
def __init__(self, config_path, workspace, isolate=True, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.isolate = isolate
self.config_path = config_path
self.tf = Terraform(working_dir=self.config_path)
self.workspace = workspace
def __getattr__(self, item):
def wrapper(*args, **kwargs):
kwargs.update({'no_color': IsFlagged})
if item in KWARGS_MAPPING:
kwargs.update(KWARGS_MAPPING[item])
rc, stdout, stderr = self.tf.cmd(
item, *args, **kwargs)
return rc, stdout.strip(), stderr.strip()
return wrapper
@property
def config_path(self):
return self._config_path
@config_path.setter
def config_path(self, c):
self._config = TerraformConfig(c)
if self.isolate:
self._config_path = self._config.clone()
else:
self._config_path = self._config.path
@property
def workspace(self):
return self._workspace
@workspace.setter
def workspace(self, w):
expr = re.compile('^[a-z0-9\-_.~]{1,255}$', re.IGNORECASE)
if not re.match(expr, w):
raise TerrestrialFatalError(
'Workspace name must contain only URL safe characters.')
rc, stdout, stderr = self.tf.cmd(
'workspace', 'new', w, '-no-color')
if rc != 0:
if 'already exists' in stderr:
rc, stdout, stderr = self.tf.cmd(
'workspace', 'select', w ,'-no-color')
if rc != 0:
raise TerrestrialFatalError(
f'Failed to set workspace to {self.workspace}')
self.logger.debug(
f'Switched workspace to {self.workspace}')
self._workspace = w
def __enter__(self):
return self
def __exit__(self, exc_t, exc_v, traceback):
self._config.close()
|
1614984
|
import os
from os import listdir
from os.path import isfile, join
import logging
import numpy as np
from ase import Atoms
import mff
from mff import models, calculators, utility
from mff import configurations as cfg
def get_potential(confs):
pot = 0
for conf in confs:
el1 = conf[:, 3]
el2 = conf[:, 4]
dist = np.sum(conf[:, :3]**2, axis=1)**0.5
pot += np.sum(el1**0.5*el2**0.5*pot_profile(dist))
return pot
def pot_profile(dist):
return ((dist-1)**2 - 0.5)*np.exp(-dist)
def force_profile(dist):
a = (dist-1)**2 - 0.5
da = 2*(dist-1)
b = np.exp(-dist)
db = -np.exp(-dist)
return a*db+b*da
def get_potentials(many_confs):
pots = np.zeros(len(many_confs))
for i, confs in enumerate(many_confs):
pots[i] = get_potential(confs)
return pots
def get_force(conf):
el1 = conf[:, 3]
el2 = conf[:, 4]
dist = np.sum(conf[:, :3]**2, axis=1)**0.5
vers = conf[:, :3]/dist[:, None]
force = np.sum(vers * (el1[:, None]**0.5*el2[:, None]
** 0.5*(force_profile(dist[:, None]))), axis=0)
return force
def get_forces(many_confs):
forces = np.zeros((len(many_confs), 3))
for i, confs in enumerate(many_confs):
forces[i] = get_force(confs)
return forces
def generate_confs(n, elements, r_cut):
phi = np.random.uniform(0, 2*np.pi, size=n*2)
costheta = np.random.uniform(-1, 1, size=n*2)
u = np.random.uniform(0, 1, size=n*2)
theta = np.arccos(costheta)
r = r_cut * u**(1/3)
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
xyz = np.vstack((x, y, z)).T
glob_confs = []
loc_confs = []
for i in range(n):
conf1 = np.zeros((2, 5))
conf2 = np.zeros((2, 5))
conf3 = np.zeros((2, 5))
conf1[0, :3] = xyz[2*i]
conf1[1, :3] = xyz[2*i+1]
conf2[0, :3] = -xyz[2*i]
conf2[1, :3] = xyz[2*i+1] - xyz[2*i]
conf3[0, :3] = xyz[2*i] - xyz[2*i+1]
conf3[1, :3] = -xyz[2*i+1]
if len(elements) == 1:
conf1[:, 3] = elements
conf1[:, 4] = elements
conf2[:, 3] = elements
conf2[:, 4] = elements
conf3[:, 3] = elements
conf3[:, 4] = elements
elif len(elements) >= 2:
a, b, c = np.random.choice(elements), np.random.choice(
elements), np.random.choice(elements)
conf1[:, 3] = a
conf1[0, 4] = b
conf1[1, 4] = c
conf2[:, 3] = b
conf2[0, 4] = a
conf2[1, 4] = c
conf3[:, 3] = c
conf3[0, 4] = b
conf3[1, 4] = a
this_conf = np.array([conf1, conf2, conf3])
glob_confs.append(this_conf)
loc_confs.append(conf1)
loc_confs.append(conf2)
loc_confs.append(conf3)
loc_confs = np.array(loc_confs)
glob_confs = np.array(glob_confs)
return (glob_confs, loc_confs)
def fit_test(m, loc_confs, forces, glob_confs, energies, ntr, ntest, elements, fit_type, r_cut, ncores = 1):
if fit_type == 'force':
m.fit(loc_confs[:ntr], forces[:ntr], ncores=ncores)
elif fit_type == 'energy':
m.fit_energy(glob_confs[:ntr], energies[:ntr], ncores=ncores)
elif fit_type == 'force_and_energy':
m.fit_force_and_energy(
loc_confs[:ntr], forces[:ntr], glob_confs[:ntr], energies[:ntr], ncores=ncores)
pred_forces = m.predict(loc_confs[-ntest:], ncores=ncores)
pred_energies = m.predict_energy(glob_confs[-ntest:], ncores=ncores)
# print("MAEF: %.4f eV/A " %(np.mean(np.sum(forces[-ntest:] - pred_forces, axis = 1)**2)**0.5))
# print("MAEE: %.4f eV" %( np.mean(abs(energies[-ntest:] - pred_energies))))
mtype = str(type(m)).split('.')[-1].split("'")[0]
if mtype == "TwoBodySingleSpeciesModel" or mtype == "ThreeBodySingleSpeciesModel" or mtype == "TwoBodyManySpeciesModel" or mtype == "ThreeBodyManySpeciesModel":
m.build_grid(0.0, 5, ncores=2)
if mtype == "TwoBodySingleSpeciesModel":
calc = calculators.TwoBodySingleSpecies(r_cut*2, m.grid)
elif mtype == "ThreeBodySingleSpeciesModel":
calc = calculators.ThreeBodySingleSpecies(r_cut*2, m.grid)
elif mtype == "TwoBodyManySpeciesModel":
calc = calculators.TwoBodyManySpecies(r_cut*2, elements, m.grid)
elif mtype == "ThreeBodyManySpeciesModel":
calc = calculators.ThreeBodyManySpecies(r_cut*2, elements, m.grid)
elif mtype == "CombinedSingleSpeciesModel" or mtype == "CombinedManySpeciesModel":
m.build_grid(0.0, 5, 5, ncores=2)
if mtype == "CombinedSingleSpeciesModel":
calc = calculators.CombinedSingleSpecies(
r_cut*2, m.grid_2b, m.grid_3b)
elif mtype == "CombinedManySpeciesModel":
calc = calculators.CombinedManySpecies(
r_cut*2, elements, m.grid_2b, m.grid_3b)
elif mtype == "EamSingleSpeciesModel" or mtype == "EamManySpeciesModel":
m.build_grid(5, ncores=2)
if mtype == "EamSingleSpeciesModel":
calc = calculators.EamSingleSpecies(
r_cut*2, m.grid, m.gp.kernel.theta[2], m.gp.kernel.theta[3])
elif mtype == "EamManySpeciesModel":
calc = calculators.EamManySpecies(
r_cut*2, elements, m.grid, m.gp.kernel.theta[2], m.gp.kernel.theta[3])
elif mtype == "TwoThreeEamSingleSpeciesModel" or mtype == "TwoThreeEamManySpeciesModel":
m.build_grid(0, 5, 5, 5, ncores=2)
if mtype == "TwoThreeEamSingleSpeciesModel":
calc = calculators.TwoThreeEamSingleSpecies(
r_cut*2, m.grid_2b, m.grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])
elif mtype == "TwoThreeEamManySpeciesModel":
calc = calculators.TwoThreeEamManySpecies(
r_cut*2, elements, m.grid_2b, m.grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])
map_forces = np.zeros((len(pred_forces), 3))
map_energies = np.zeros_like(pred_energies)
for i in np.arange(ntest):
coords = np.vstack(([0, 0, 0], glob_confs[-ntest:][i][0, 0:3, 0:3]))
atoms = Atoms(positions=coords + 20)
atoms.set_atomic_numbers([glob_confs[-ntest:][i][0, 0, 3],
glob_confs[-ntest:][i][0, 0, 4], glob_confs[-ntest:][i][0, 1, 4]])
atoms.set_cell([100, 100, 100])
atoms.set_calculator(calc)
map_energies[i] = atoms.get_potential_energy()
for i in np.arange(ntest):
coords = np.vstack(([0, 0, 0], loc_confs[-ntest:][i][0:3, 0:3]))
atoms = Atoms(positions=coords + 20)
atoms.set_atomic_numbers(
[loc_confs[-ntest:][i][0, 3], loc_confs[-ntest:][i][0, 4], loc_confs[-ntest:][i][1, 4]])
atoms.set_cell([100, 100, 100])
atoms.set_calculator(calc)
map_forces[i] = atoms.get_forces()[0, :]
error_f = np.sum((pred_forces - map_forces)**2, axis=1)**0.5
error_e = pred_energies - map_energies
# print("Force Error: %.4f eV/A Energy Error: %.4f eV " %(np.mean(error_f), np.mean(error_e)))
m.save('MODELS/')
class Tests():
def __init__(self, elements, noise, sigma, r_cut, theta, ntr_f,
ntr_e, ntest, alpha, r0, ncores):
self.elements = elements
self.noise = noise
self.sigma = sigma
self.r_cut = r_cut
self.theta = theta
self.ntr_f = ntr_f
self.ntr_e = ntr_e
self.ntest = ntest
self.alpha = alpha
self.r0 = r0
self.ncores = ncores
self.glob_confs, self.loc_confs = generate_confs(self.ntr_f+self.ntr_e+self.ntest,
self.elements, self.r_cut)
self.forces = get_forces(self.loc_confs)
self.energies = get_potentials(self.glob_confs)
def test_2_body_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoBodySingleSpeciesModel(
element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2,
theta=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 2-body Single %s fit" % (fit_type))
def test_3_body_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.ThreeBodySingleSpeciesModel(
element=self.elements, noise=self.noise, sigma=self.sigma,
r_cut=self.r_cut*2, theta=self.theta)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 3-body Single %s fit" % (fit_type))
def test_combined_body_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.CombinedSingleSpeciesModel(
element=self.elements, noise=self.noise, sigma_2b=self.sigma, sigma_3b=self.sigma, r_cut=self.r_cut*2, theta_2b=self.theta, theta_3b=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in combined Single %s fit" % (fit_type))
def test_eam_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.EamSingleSpeciesModel(
element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, alpha=self.alpha, r0=self.r0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in Eam Single %s fit" % (fit_type))
def test_23eam_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoThreeEamSingleSpeciesModel(
self.elements, self.r_cut*2, self.sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self.r0, self.noise, 0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 23 Eam Single %s fit" % (fit_type))
def test_2_body_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoBodyManySpeciesModel(
elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 2-body Many %s fit" % (fit_type))
def test_3_body_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.ThreeBodyManySpeciesModel(
elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 3-body Many %s fit" % (fit_type))
def test_combined_body_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.CombinedManySpeciesModel(elements=self.elements, noise=self.noise, sigma_2b=self.sigma,
sigma_3b=self.sigma, r_cut=self.r_cut*2, theta_2b=self.theta, theta_3b=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in combined Many %s fit" % (fit_type))
def test_eam_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.EamManySpeciesModel(
elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, alpha=self.alpha, r0=self.r0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in eam Many %s fit" % (fit_type))
def test_23eam_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoThreeEamManySpeciesModel(
self.elements, self.r_cut*2, self.sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self.r0, self.noise, 0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 23 eam Many %s fit" % (fit_type))
def test_load(self):
onlyfiles = [f for f in listdir("MODELS") if isfile(join("MODELS", f))]
for file in onlyfiles:
if file.endswith(".json"):
try:
m2 = utility.load_model("MODELS/" + file)
except:
print("ERROR: %s not loaded" % (file))
if __name__ == '__main__':
# GP Parameters
sigma = 1.0 # Angstrom - typical value 0.2-0.6
noise = .001 # Number - Typical values 0.01 - 0.0001
theta = 0.1 # Cutoff decay lengthscale in Angstrom - Typical value r_cut/5 - r_cut/10
r_cut = 3.0
ntr_f = 10
ntr_e = 10
ntest = 10
elements = [1]
ncores = 2
alpha = 1
r0 = 10
test = Tests(elements, noise, sigma, r_cut, theta, ntr_f,
ntr_e, ntest, alpha, r0, ncores)
test.test_2_body_single()
test.test_3_body_single()
test.test_combined_body_single()
test.test_eam_single()
test.test_23eam_single()
test.test_2_body_many()
test.test_3_body_many()
test.test_combined_body_many()
test.test_eam_many()
test.test_23eam_many()
test.test_load()
|
1614985
|
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import inspect
import re
import numpy as np
import os
import collections
import pickle
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, cvt_rgb=True):
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1 and cvt_rgb:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def tensor2vec(vector_tensor):
numpy_vec = vector_tensor.data.cpu().numpy()
if numpy_vec.ndim == 4:
return numpy_vec[:, :, 0, 0]
else:
return numpy_vec
def pickle_load(file_name):
data = None
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
def pickle_save(file_name, data):
with open(file_name, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def interp_z(z0, z1, num_frames, interp_mode='linear'):
zs = []
if interp_mode == 'linear':
for n in range(num_frames):
ratio = n / float(num_frames - 1)
z_t = (1 - ratio) * z0 + ratio * z1
zs.append(z_t[np.newaxis, :])
zs = np.concatenate(zs, axis=0).astype(np.float32)
if interp_mode == 'slerp':
# st()
z0_n = z0 / (np.linalg.norm(z0)+1e-10)
z1_n = z1 / (np.linalg.norm(z1)+1e-10)
omega = np.arccos(np.dot(z0_n, z1_n))
sin_omega = np.sin(omega)
if sin_omega < 1e-10 and sin_omega > -1e-10:
zs = interp_z(z0, z1, num_frames, interp_mode='linear')
else:
for n in range(num_frames):
ratio = n / float(num_frames - 1)
z_t = np.sin((1 - ratio) * omega) / sin_omega * z0 + np.sin(ratio * omega) / sin_omega * z1
zs.append(z_t[np.newaxis, :])
zs = np.concatenate(zs, axis=0).astype(np.float32)
return zs
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path, 'JPEG', quality=100)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def normalize_tensor(in_feat, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat**2, dim=1)).repeat(1, in_feat.size()[1], 1, 1)
return in_feat / (norm_factor+eps)
def cos_sim(in0, in1):
in0_norm = normalize_tensor(in0)
in1_norm = normalize_tensor(in1)
return torch.mean(torch.sum(in0_norm*in1_norm, dim=1))
|
1615045
|
import random
import string
import json
import requests
from . import logger
from ..engine.decorators import Plugin
from ..engine.download import DownloadBase
@Plugin.download(regexp=r'(?:https?://)?(?:(?:www|m|live)\.)?acfun\.cn')
class Acfun(DownloadBase):
def __init__(self, fname, url, suffix='flv'):
super().__init__(fname, url, suffix)
def check_stream(self):
if len(self.url.split("acfun.cn/live/")) < 2:
logger.debug("直播间地址错误")
return False
rid = self.url.split("acfun.cn/live/")[1]
did = "web_"+get_random_name(16)
headers1 = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.67"
}
cookies1 = dict(_did=did)
data1 = {'sid': 'acfun.api.visitor'}
r1 = requests.post("https://id.app.acfun.cn/rest/app/visitor/login",
headers=headers1, data=data1, cookies=cookies1)
userId = r1.json()['userId']
visitorSt = r1.json()['acfun.api.visitor_st']
params = {
"subBiz": "mainApp",
"kpn": "ACFUN_APP",
"kpf": "PC_WEB",
"userId": str(userId),
"did": did,
"acfun.api.visitor_st": visitorSt
}
data2 = {'authorId': rid, 'pullStreamType': 'FLV'}
headers2 = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.67",
"Referer": "https://live.acfun.cn/"
}
r2 = requests.post("https://api.kuaishouzt.com/rest/zt/live/web/startPlay",
headers=headers2, data=data2, params=params)
if r2.json()['result'] != 1:
logger.debug(r2.json()['error_msg'])
return False
d = r2.json()['data']['videoPlayRes']
e = json.loads(d)['liveAdaptiveManifest'][0]['adaptationSet']['representation']
self.raw_stream_url = e[-1]['url']
return True
def get_random_name(l):
return random.choice(string.ascii_lowercase) + \
''.join(random.sample(string.ascii_letters + string.digits, l - 1))
|
1615073
|
import timeit, functools
def dist_test():
pp_sketchlib.queryDatabase("listeria", "listeria", names, names, kmers, 1)
setup = """
import sys
sys.path.insert(0, "build/lib.macosx-10.9-x86_64-3.7")
import pp_sketchlib
"""
#import numpy as np
#
#from __main__ import dist_test
#
#kmers = np.arange(15, 30, 3)
#
#names = []
#sequences = []
#with open("rfiles.txt", 'r') as refFile:
# for refLine in refFile:
# refFields = refLine.rstrip().split("\t")
# names.append(refFields[0])
# sequences.append(list(refFields[1:]))
#"""
if __name__ == '__main__':
import numpy as np
import sys
sys.path.insert(0, "build/lib.macosx-10.9-x86_64-3.7")
import pp_sketchlib
#from __main__ import dist_test
kmers = np.arange(15, 30, 3)
names = []
sequences = []
with open("rfiles.txt", 'r') as refFile:
for refLine in refFile:
refFields = refLine.rstrip().split("\t")
names.append(refFields[0])
sequences.append(list(refFields[1:]))
t = timeit.Timer(functools.partial(pp_sketchlib.queryDatabase, "listeria", "listeria", names, names, kmers, 1), setup=setup)
print(t.timeit(100))
|
1615079
|
def test(name, input0, input1, input2, output0, input0_data, input1_data, input2_data, output_data):
model = Model().Operation("SELECT_V2_EX", input0, input1, input2).To(output0)
example = Example({
input0: input0_data,
input1: input1_data,
input2: input2_data,
output0: output_data,
}, model=model, name=name)
test(
name="float",
input0=Input("input0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
input1=Input("input1", "TENSOR_FLOAT32", "{1, 1, 1, 4}"),
input2=Input("input2", "TENSOR_FLOAT32", "{1, 1, 1, 4}"),
output0=Output("output0", "TENSOR_FLOAT32", "{1, 1, 1, 4}"),
input0_data=[True, False, True, False],
input1_data=[1, 2, 3, 4],
input2_data=[5, 6, 7, 8],
output_data=[1, 6, 3, 8],
)
test(
name="broadcast_1d_single_value",
input0=Input("input0", "TENSOR_BOOL8", "{1}"),
input1=Input("input1", "TENSOR_FLOAT32", "{1, 2, 2, 1}"),
input2=Input("input2", "TENSOR_FLOAT32", "{1, 2, 2, 1}"),
output0=Output("output0", "TENSOR_FLOAT32", "{1, 2, 2,1 }"),
input0_data=[False],
input1_data=[1, 2, 3, 4],
input2_data=[5, 6, 7, 8],
output_data=[5, 6, 7, 8],
)
test(
name="broadcast_less_4d",
input0=Input("input0", "TENSOR_BOOL8", "{1, 2}"),
input1=Input("input1", "TENSOR_FLOAT32", "{1, 2, 2}"),
input2=Input("input2", "TENSOR_FLOAT32", "{1, 2, 2}"),
output0=Output("output0", "TENSOR_FLOAT32", "{1, 2, 2}"),
input0_data=[False, True],
input1_data=[1, 2, 3, 4],
input2_data=[5, 6, 7, 8],
output_data=[5, 2, 7, 4],
)
test(
name="broadcast_2d_one",
input0=Input("input0", "TENSOR_BOOL8", "{1, 1}"),
input1=Input("input1", "TENSOR_FLOAT32", "{1, 1, 2, 2}"),
input2=Input("input2", "TENSOR_FLOAT32", "{1, 1, 2, 2}"),
output0=Output("output0", "TENSOR_FLOAT32", "{1, 1, 2, 2}"),
input0_data=[False],
input1_data=[1, 2, 3, 4],
input2_data=[5, 6, 7, 8],
output_data=[5, 6, 7, 8],
)
test(
name="broadcast_2d_two",
input0=Input("input0", "TENSOR_BOOL8", "{1, 2}"),
input1=Input("input1", "TENSOR_FLOAT32", "{1, 2, 2}"),
input2=Input("input2", "TENSOR_FLOAT32", "{1, 2, 1}"),
output0=Output("output0", "TENSOR_FLOAT32", "{1, 2, 2}"),
input0_data=[False, True],
input1_data=[1, 2, 3, 4],
input2_data=[5, 6], # 5 5 6 6
output_data=[5, 2, 6, 4],
)
|
1615090
|
import sys, re, os
file = sys.argv[1]
ver = sys.argv[2]
pattern = re.compile("(SikuliVersion = )\".*\"(;)")
#print file, ver
f = open(file, 'r')
output = open(file+".tmp", 'w')
for line in f.xreadlines():
output.write(pattern.sub(r'\1"'+ver+r'"\2', line))
output.close()
f.close()
os.remove(file)
os.rename(file+".tmp", file)
|
1615113
|
import numpy as np
import matplotlib.pyplot as plt
img = plt.imread('../data/elephant.png')
print img.shape, img.dtype
# (200, 300, 3) dtype('float32')
plt.imshow(img)
plt.savefig('plot.png')
plt.show()
plt.imsave('red_elephant', img[:,:,0], cmap=plt.cm.gray)
# This saved only one channel (of RGB)
plt.imshow(plt.imread('red_elephant.png'))
plt.show()
# Other libraries:
from scipy.misc import imsave
imsave('tiny_elephant.png', img[::6,::6])
plt.imshow(plt.imread('tiny_elephant.png'), interpolation='nearest')
plt.show()
|
1615137
|
import argparse
import codecs
import csv
import datetime
import errno
import importlib
import json
import logging
import os
import shutil
import subprocess
import sys
import traceback
from functools import singledispatch
from pathlib import Path
from typing import (
Any,
Iterable,
List,
Tuple,
Union,
cast,
)
from types import ModuleType
from urllib.error import URLError
import publicsuffix
import requests
import strict_rfc3339
MANDATORY_SCANNER_PROPERTIES = (
"headers",
"to_rows"
)
# global in-memory cache
suffix_list = None
# Time Conveniences #
# Now, in UTC, in seconds (with decimal microseconds).
def local_now() -> float:
return datetime.datetime.now().timestamp()
def format_datetime(obj) -> Union[str, None]:
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, str):
return obj
else:
return None
# Cut off floating point errors, always output duration down to
# microseconds.
def just_microseconds(duration: float) -> str:
if duration is None:
return None
return "%.6f" % duration
# RFC 3339 timestamp for a given UTC time.
# seconds can be a float, down to microseconds.
# A given time needs to be passed in *as* UTC already.
def utc_timestamp(seconds: Union[float, int]) -> Union[str, None]:
if not seconds:
return None
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(seconds)
# /Time Conveniences #
# Filesystem Conveniences #
# mkdir -p in python, from:
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path: str) -> None:
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def read(source):
with open(source) as f:
contents = f.read()
return contents
def write(content: Union[bytes, str], destination: str,
binary: bool=False) -> None:
mkdir_p(os.path.dirname(destination))
if binary:
binary_content = cast(bytes, content) # mypy wrangling
with open(destination, "bw") as fb:
fb.write(binary_content)
else:
string_content = cast(str, content) # mypy wrangling
with open(destination, "w", encoding="utf-8") as fs:
fs.write(string_content)
# /Filesystem Conveniences #
# Error Conveniences #
def format_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
# Error Conveniences #
# Command Line Conveniences #
def scan(command: List[str], env: dict=None,
allowed_return_codes: list=[]) -> Union[str, None]:
try:
response = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=False, env=env
)
return str(response, encoding='UTF-8')
except subprocess.CalledProcessError as exc:
if exc.returncode in allowed_return_codes:
return str(exc.stdout, encoding='UTF-8')
else:
logging.warning("Error running %s." % (str(command)))
logging.warning("Error running %s." % (str(exc.output)))
logging.warning(format_last_exception())
return None
# test if a command exists, don't print output
def try_command(command):
try:
subprocess.check_call(["which", command], shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return True
except subprocess.CalledProcessError:
logging.warning(format_last_exception())
logging.warning("No command found: %s" % (str(command)))
return False
# /Command Line Conveniences #
# JSON Conveniences #
# Format datetimes, sort keys, pretty-print.
def json_for(object: object) -> str:
return json.dumps(object, sort_keys=True, indent=2, default=format_datetime)
# Mirror image of json_for.
def from_json(string):
return json.loads(string)
# /JSON Conveniences #
# Logging Conveniences #
def configure_logging(options: Union[dict, None]=None) -> None:
options = {} if not options else options
if options.get('debug', False):
log_level = "debug"
else:
log_level = options.get("log", "warn")
if log_level not in ["debug", "info", "warn", "error"]:
print("Invalid log level (specify: debug, info, warn, error).")
sys.exit(1)
logging.basicConfig(format='%(message)s', level=log_level.upper())
# /Logging Conveniences #
# CSV Handling #
# Sort a CSV by domain name, "in-place" (by making a temporary copy).
# This loads the whole thing into memory: it's not a great solution for
# super-large lists of domains.
def sort_csv(input_filename):
logging.warning("Sorting %s..." % input_filename)
input_file = open(input_filename, encoding='utf-8', newline='')
tmp_filename = "%s.tmp" % input_filename
tmp_file = open(tmp_filename, 'w', newline='')
tmp_writer = csv.writer(tmp_file)
# store list of domains, to sort at the end
domains = []
# index rows by domain
rows = {}
header = None
for row in csv.reader(input_file):
# keep the header around
if (row[0].lower() == "domain"):
header = row
continue
# index domain for later reference
domain = row[0]
domains.append(domain)
rows[domain] = row
# straight alphabet sort
domains.sort()
# write out to a new file
tmp_writer.writerow(header)
for domain in domains:
tmp_writer.writerow(rows[domain])
# close the file handles
input_file.close()
tmp_file.close()
# replace the original
shutil.move(tmp_filename, input_filename)
def write_rows(rows, domain, base_domain, scanner, csv_writer, meta={}):
# If we didn't get any info, we'll still output information about why the scan failed.
if rows is None:
empty_row = [None] * len(scanner.headers)
rows = [empty_row]
# Always output Domain and Base Domain.
standard_prefix = [
domain,
base_domain,
]
# If requested, add local and Lambda scan data.
meta_fields = []
if bool(meta):
meta_fields.append(" ".join(meta.get('errors', [])))
meta_fields.append(utc_timestamp(meta.get("start_time")))
meta_fields.append(utc_timestamp(meta.get("end_time")))
meta_fields.append(just_microseconds(meta.get("duration")))
if meta.get("lambda") is not None:
meta_fields.append(meta['lambda'].get('request_id'))
meta_fields.append(meta['lambda'].get('log_group_name'))
meta_fields.append(meta['lambda'].get('log_stream_name'))
meta_fields.append(utc_timestamp(meta['lambda'].get('start_time')))
meta_fields.append(utc_timestamp(meta['lambda'].get('end_time')))
meta_fields.append(meta['lambda'].get('memory_limit'))
meta_fields.append(just_microseconds(meta['lambda'].get('measured_duration')))
# Write out prefix, scan data, and meta scan data.
for row in rows:
csv_writer.writerow(standard_prefix + row + meta_fields)
# CSV Handling #
# Cache Handling #
def cache_single(filename, cache_dir="./cache"):
return os.path.join(cache_dir, filename)
# Predictable cache path for a domain and operation.
def cache_path(domain, operation, ext="json", cache_dir="./cache"):
return os.path.join(cache_dir, operation, ("%s.%s" % (domain, ext)))
# Used to quickly get cached data for a domain.
def data_for(domain, operation, cache_dir="./cache"):
path = cache_path(domain, operation, cache_dir=cache_dir)
if os.path.exists(path):
raw = read(path)
data = json.loads(raw)
if isinstance(data, dict) and (data.get('invalid', False)):
return None
else:
return data
else:
return {}
# marker for a cached invalid response
def invalid(data=None):
if data is None:
data = {}
data['invalid'] = True
return json_for(data)
# Return base domain for a subdomain, factoring in the Public Suffix List.
def base_domain_for(subdomain, cache_dir="./cache"):
global suffix_list
"""
For "x.y.domain.gov", return "domain.gov".
If suffix_list is None, the caches have not been initialized, so do that.
"""
if suffix_list is None:
suffix_list, discard = load_suffix_list(cache_dir=cache_dir)
if suffix_list is None:
logging.warning("Error downloading the PSL.")
exit(1)
return suffix_list.get_public_suffix(subdomain)
# Returns an instantiated PublicSuffixList object, and the
# list of lines read from the file.
def load_suffix_list(cache_dir="./cache"):
cached_psl = cache_single("public-suffix-list.txt", cache_dir=cache_dir)
if os.path.exists(cached_psl):
logging.debug("Using cached Public Suffix List...")
with codecs.open(cached_psl, encoding='utf-8') as psl_file:
suffixes = publicsuffix.PublicSuffixList(psl_file)
content = psl_file.readlines()
else:
# File does not exist, download current list and cache it at given location.
logging.debug("Downloading the Public Suffix List...")
try:
cache_file = publicsuffix.fetch()
except URLError as err:
logging.warning("Unable to download the Public Suffix List...")
logging.debug("{}".format(err))
return None, None
content = cache_file.readlines()
suffixes = publicsuffix.PublicSuffixList(content)
# Cache for later.
write(''.join(content), cached_psl)
return suffixes, content
# /Cache Handling #
# Argument Parsing #
class ArgumentParser(argparse.ArgumentParser):
"""
This lets us test for errors from argparse by overriding the error method.
See https://stackoverflow.com/questions/5943249
"""
def _get_action_from_name(self, name):
"""Given a name, get the Action instance registered with this parser.
If only it were made available in the ArgumentError object. It is
passed as its first arg...
"""
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
exc = sys.exc_info()[1]
if exc:
exc.argument = self._get_action_from_name(exc.argument_name)
raise exc
super(ArgumentParser, self).error(message)
def build_scan_options_parser() -> ArgumentParser:
""" Builds the argparse parser object. """
parser = ArgumentParser(prefix_chars="--")
parser.add_argument("domains", help="".join([
"Either a comma-separated list of domains or the url of a CSV ",
"file/path to a local CSV file containing the domains to be ",
"domains to be scanned. The CSV's header row will be ignored ",
"if the first cell starts with \"Domain\" (case-insensitive).",
]))
parser.add_argument("--cache", action="store_true", help="".join([
"Use previously cached scan data to avoid scans hitting the network ",
"where possible.",
]))
parser.add_argument("--debug", action="store_true",
help="Print out more stuff. Useful with '--serial'")
parser.add_argument("--lambda", action="store_true", help="".join([
"Run certain scanners inside Amazon Lambda instead of locally.",
]))
parser.add_argument("--lambda-profile", nargs=1, help="".join([
"When running Lambda-related commands, use a specified AWS named ",
"profile. Credentials/config for this named profile should already ",
"be configured separately in the execution environment.",
]))
parser.add_argument("--lambda-retries", type=int, help="".join([
"The maximum number of times to retry a Lambda job that fails. ",
"If not specified then the value 0 is used."
]))
parser.add_argument("--meta", action="store_true", help="".join([
"Append some additional columns to each row with information about ",
"the scan itself. This includes start/end times and durations, as ",
"well as any encountered errors. When also using '--lambda', ",
"additional, Lambda-specific information will be appended.",
]))
parser.add_argument("--scan", nargs=1, required=True,
help="Comma-separated list of scanners (required).")
parser.add_argument("--sort", action="store_true", help="".join([
"Sort result CSVs by domain name, alphabetically. (Note: this causes ",
"the entire dataset to be read into memory.)",
]))
parser.add_argument("--serial", action="store_true", help="".join([
"Disable parallelization, force each task to be done simultaneously. ",
"Helpful for testing and debugging.",
]))
parser.add_argument("--suffix", nargs=1, help="".join([
"Add a suffix to all input domains. For example, a --suffix of ",
"'virginia.gov' will add '.virginia.gov' to the end of all ",
"input domains."
]))
parser.add_argument("--output", nargs=1, default=["./"], help="".join([
"Where to output the 'cache/' and 'results/' directories. ",
"Defaults to './'.",
]))
parser.add_argument("--workers", nargs=1,
help="Limit parallel threads per-scanner to a number.")
# TODO: Should workers have a default value?
parser.add_argument("--no-fast-cache", action="store_true", help="".join([
"Do not use fast caching even if a scanner supports it. This option ",
"will cause domain-scan to use less memory, but some (possibly ",
"expensive) network activity or other operations may be repeated."
]))
# TODO: Move the scanner-specific argument parsing to each scanner's code.
# a11y:
parser.add_argument("--a11y-config",
help="a11y: Location of pa11y config file (used with a11y scanner.")
parser.add_argument("--a11y-redirects",
help="a11y: Location of YAML file with redirects to inform the a11y scanner.")
# pshtt:
parser.add_argument("--ca_file",
help="ca_file: Location of PEM file of trust store to verify certs with.")
parser.add_argument("--pt_int_ca_file",
help="pt_int_ca_file: Location of PEM file of public trust store with any needed intermediate certificates to verify certs with.")
# sslyze:
parser.add_argument("--sslyze-serial",
help="sslyze: If set, will use a synchronous (single-threaded in-process) scanner. Defaults to true.")
parser.add_argument("--sslyze-certs",
help="sslyze: If set, will use the CertificateInfoScanner and return certificate info. Defaults to true.")
parser.add_argument("--sslyze-reneg",
help="sslyze: If set, will use the SessionRenegotiationScanner and return session renegotiation info. Defaults to true.")
# trustymail:
parser.add_argument("--starttls", action='store_true', help="".join([
"trustymail: Only check mx records and STARTTLS support. ",
"(Implies --mx.)"
]))
parser.add_argument("--timeout", help="".join([
"trustymail: The DNS lookup timeout in seconds. (Default is 5.)"
]))
parser.add_argument("--smtp-timeout", help="".join([
"trustymail: The SMTP connection timeout in seconds. (Default is 5.)"
]))
parser.add_argument("--smtp-localhost", help="".join([
"trustymail: The hostname to use when connecting to SMTP ",
"servers. (Default is the FQDN of the host from ",
"which trustymail is being run.)"
]))
parser.add_argument("--smtp-ports", help="".join([
"trustymail: A comma-delimited list of ports at which to look ",
"for SMTP servers. (Default is '25,465,587'.)"
]))
parser.add_argument("--dns", help="".join([
"trustymail: A comma-delimited list of DNS servers to query ",
"against. For example, if you want to use ",
"Google's DNS then you would use the ",
"value --dns-hostnames='8.8.8.8,8.8.4.4'. By ",
"default the DNS configuration of the host OS ",
"(/etc/resolv.conf) is used. Note that ",
"the host's DNS configuration is not used at all ",
"if this option is used."
]))
parser.add_argument("--no-smtp-cache", help="".join([
"trustymail: Do not cache SMTP results during the run. This",
"may results in slower scans due to testing the ",
"same mail servers multiple times."
]))
parser.add_argument("--mx", action='store_true', help="".join([
"trustymail: Only check MX records"
]))
parser.add_argument("--spf", action='store_true', help="".join([
"trustymail: Only check SPF records"
]))
parser.add_argument("--dmarc", action='store_true', help="".join([
"trustymail: Only check DMARC records"
]))
return parser
def options() -> Tuple[dict, list]:
"""
Parse options for the ``scan`` command.
Impure
Reads from sys.argv.
"""
parser = build_scan_options_parser()
parsed, unknown = parser.parse_known_args()
opts = {k: v for k, v in vars(parsed).items() if v is not None}
if opts.get("lambda_profile") and not opts.get("lambda"):
raise argparse.ArgumentTypeError(
"Can't set lambda profile unless lambda flag is set.")
# We know we want one value, but the ``nargs`` flag means we get a list.
should_be_singles = (
"lambda_profile",
"output",
"scan",
"suffix",
"workers",
)
opts = make_values_single(opts, should_be_singles)
# Derive some options not set directly at CLI:
opts["_"] = {
"cache_dir": os.path.join(opts.get("output", "./"), "cache"),
"report_dir": opts.get("output", "./"),
"results_dir": os.path.join(opts.get("output", "./"), "results"),
}
return (opts, unknown)
def make_values_single(dct: dict, should_be_singles: Iterable[str]) -> dict:
for key in (k for k in should_be_singles if k in dct):
dct[key] = dct[key][0]
return dct
def handle_scanner_arguments(scans: List[ModuleType], opts: dict, unknown: List[str]):
for scan in scans:
if hasattr(scan, "handle_scanner_args"):
scan_opts, unknown = scan.handle_scanner_args(unknown, opts) # type: ignore
opts.update(scan_opts)
return (opts, unknown)
# /Argument Parsing #
def build_scanner_list(names: List[str],
mod: str="scanners") -> List[ModuleType]:
"""
Given a list of names, load modules corresponding to those names from the
scanners directory. Also verify that they have the required properties.
"""
scans = []
for name in names:
try:
scan = importlib.import_module(
"%s.%s" % (mod, name))
verify_scanner_properties(scan)
except ImportError:
exc_type, exc_value, exc_traceback = sys.exc_info()
errmsg = "\n".join([
"[%s] Scanner not found, or had an error during loading." % name,
"\tERROR: %s" % exc_type,
"\t%s" % exc_value,
])
logging.error(errmsg)
raise ImportError(errmsg)
scans.append(scan)
return scans
def verify_scanner_properties(scanner: ModuleType) -> None:
name = scanner.__name__
for prop in MANDATORY_SCANNER_PROPERTIES:
if not hasattr(scanner, prop):
raise ImportError("%s lacks required %s property" % (name, prop))
# If the scan has a canonical command, make sure it exists.
# mypy doesn't handle optional properties well, it seems.
if hasattr(scan, "command") and scan.command and (not try_command(scan.command)): # type: ignore
errmsg = "[%s] Command not found: %s" % (name, scan.command) # type: ignore
logging.error(errmsg)
raise ImportError(errmsg)
def begin_csv_writing(scanner: ModuleType, options: dict,
base_hdrs: Tuple[List[str], List[str], List[str]]) -> dict:
"""
Determine the CSV output file path for the scanner, open the file at that
path, instantiate a CSV writer for it, determine whether or not to use
lambda, determine what the headers are, write the headers to the CSV.
Return a dict containing the above.
"""
PREFIX_HEADERS, LOCAL_HEADERS, LAMBDA_HEADERS = base_hdrs
name = scanner.__name__.split(".")[-1] # e.g. 'pshtt'
results_dir = options["_"]["results_dir"]
meta = options.get("meta")
lambda_mode = options.get("lambda")
use_lambda = lambda_mode and \
hasattr(scanner, "lambda_support") and \
scanner.lambda_support # type: ignore # it's an optional variable.
# Write the header row, factoring in Lambda detail if needed.
headers = PREFIX_HEADERS + scanner.headers # type: ignore # optional again
# Local scan timing/errors.
if meta:
headers += LOCAL_HEADERS
# Lambda scan timing/errors. (At this step, only partial fields.)
if meta and use_lambda:
headers += LAMBDA_HEADERS
scanner_csv_path = Path(results_dir, "%s.csv" % name).resolve()
scanner_file = scanner_csv_path.open('w', newline='')
scanner_writer = csv.writer(scanner_file)
scanner_writer.writerow(headers)
return {
'name': name,
'file': scanner_file,
'filename': str(scanner_csv_path),
'writer': scanner_writer,
'headers': headers,
'use_lambda': use_lambda,
}
def determine_scan_workers(scanner: ModuleType, options: dict, w_default: int,
w_max: int) -> int:
"""
Given a number of inputs, determines the right number of workers to set
when running scans.
"""
if options.get("serial"):
workers = 1
elif hasattr(scanner, "workers"):
workers = scanner.workers # type: ignore # The subclass objects set this sometimes.
else:
# mypy has trouble with this, presumably because we're using a dict
workers = int(options.get("workers", w_default)) # type: ignore
# Enforce a local worker maximum as a safety valve.
return min(workers, w_max)
# Yield domain names from a single string, or a CSV of them.
@singledispatch
def domains_from(arg: Any, domain_suffix=None) -> Iterable[str]:
raise TypeError("'%s' is not a recognized source for domains." % arg)
@domains_from.register(str)
def _df_str(arg: str, domain_suffix: Union[str, None]=None) -> Iterable[str]:
# TODO: how do we handle domain_suffix here?
if domain_suffix is not None:
errmsg = "Passing in domains at CLI not compatible with --suffix."
raise argparse.ArgumentError(errmsg)
for x in arg.split(","):
yield x
@domains_from.register(Path)
def _df_path(arg: Path, domain_suffix: Union[str, None]=None) -> Iterable[str]:
if arg.suffix == ".csv":
with arg.open(encoding='utf-8', newline='') as csvfile:
for row in csv.reader(csvfile):
if (not row) or (not row[0]) or (row[0].lower() == "domain") or (row[0].lower() == "domain name"):
continue
domain = row[0].lower()
if domain_suffix:
sep = "."
if domain_suffix.startswith("."):
sep = ""
yield "%s%s%s" % (domain, sep, domain_suffix)
else:
yield domain
else:
# Note: the path referred to below will be the path to the local cached
# download and not to the original URL. It shouldn't be possible to get
# here with that being a problem, but noting it anyway.
msg = "\n".join([
"Domains should be specified as a comma-separated list ",
"or as the URL or path to a .csv file. ",
"%s does not appear to be any of those." % arg
])
raise TypeError(msg)
def handle_domains_argument(domains: str, cache_dir: Path) -> Union[Path, str]:
# `domains` can be either a path or a domain name.
# It can also be a URL, and if it is we want to download it now,
# and then adjust the value to be the path of the cached download.
# Note that the cache_dir is basically guaranteed to exist by the time
# we reach this point in the execution path.
if domains.startswith("http:") or domains.startswith("https:"):
domains_path = Path(cache_dir, "domains.csv")
try:
response = requests.get(domains)
write(response.text, str(domains_path))
except requests.exceptions.RequestException as err:
msg = "\n".join([
"Domains URL not downloaded successfully; RequestException",
str(err),
])
logging.error(msg)
raise IOError(msg)
return domains_path
elif domains.endswith(".csv"):
# Assume file is either absolute or relative from current dir.
try:
domains_path = Path(os.path.curdir, domains).resolve()
if not domains_path.exists():
raise FileNotFoundError
return domains_path
except FileNotFoundError as err:
msg = "\n".join([
"Domains CSV file not found.",
"(Curdir: %s CSV file: %s)" % (os.path.curdir, domains),
str(err),
])
logging.error(msg)
raise FileNotFoundError(msg)
return domains
|
1615143
|
from .. import config
from .. import serializers
from ..repositories.common import CreateResource, ListResources, DeleteResource, GetResource
from ..sdk_exceptions import ResourceFetchingError
class GetBaseProjectsApiUrlMixin(object):
def _get_api_url(self, **_):
return config.config.CONFIG_HOST
class CreateProject(GetBaseProjectsApiUrlMixin, CreateResource):
SERIALIZER_CLS = serializers.Project
def get_request_url(self, **_):
return "/projects/"
class ListProjects(GetBaseProjectsApiUrlMixin, ListResources):
SERIALIZER_CLS = serializers.Project
def get_request_url(self, **kwargs):
return "/projects/"
def _get_instance_dicts(self, data, **kwargs):
project_dict_list = data["data"]
return project_dict_list
def _get_request_params(self, kwargs):
params = {
"filter": """{"offset":0,"where":{"dtDeleted":null},"order":"dtCreated desc"}"""
}
tags = kwargs.get("tags")
if tags:
params["tagFilter"] = tags
return params
class DeleteProject(GetBaseProjectsApiUrlMixin, DeleteResource):
def get_request_url(self, **kwargs):
return "/projects/{}/deleteProject".format(kwargs.get("id"))
def _send_request(self, client, url, json_data=None):
response = client.post(url, json=json_data)
return response
class GetProject(GetBaseProjectsApiUrlMixin, GetResource):
SERIALIZER_CLS = serializers.Project
def get_request_url(self, **kwargs):
return "/projects/"
def _get_request_params(self, kwargs):
id_ = kwargs["id"]
params = {
"filter": """{"where":{"handle":"%s"}}""" % id_
}
return params
def _parse_object(self, instance_dict, **kwargs):
try:
instance_dict = instance_dict["data"][0]
except IndexError:
raise ResourceFetchingError("Project not found")
instance_dict = super(GetProject, self)._parse_object(instance_dict, **kwargs)
return instance_dict
|
1615195
|
import numpy
from numpy.testing import assert_raises, assert_equal, assert_allclose
from fuel.datasets import Iris
from tests import skip_if_not_available
def test_iris_all():
skip_if_not_available(datasets=['iris.hdf5'])
dataset = Iris(('all',), load_in_memory=False)
handle = dataset.open()
data, labels = dataset.get_data(handle, slice(0, 10))
assert data.dtype == 'float32'
assert data.shape == (10, 4)
assert labels.shape == (10, 1)
known = numpy.array([5.1, 3.5, 1.4, 0.2])
assert_allclose(data[0], known)
assert labels[0][0] == 0
assert dataset.num_examples == 150
dataset.close(handle)
def test_iris_axes():
skip_if_not_available(datasets=['iris.hdf5'])
dataset = Iris(('all',), load_in_memory=False)
assert_equal(dataset.axis_labels['features'],
('batch', 'feature'))
def test_iris_invalid_split():
skip_if_not_available(datasets=['iris.hdf5'])
assert_raises(ValueError, Iris, ('dummy',))
|
1615209
|
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import escape
import re
register = template.Library()
rx = re.compile(r'(%(\([^\s\)]*\))?[sd])')
def format_message(message):
return mark_safe(rx.sub('<code>\\1</code>', escape(message).replace(r'\n','<br />\n')))
format_message=register.filter(format_message)
def lines_count(message):
return 1 + sum([len(line)/50 for line in message.split('\n')])
lines_count=register.filter(lines_count)
def mult(a,b):
return int(a)*int(b)
mult=register.filter(mult)
def minus(a,b):
try:
return int(a) - int(b)
except:
return 0
minus=register.filter(minus)
def gt(a,b):
try:
return int(a) > int(b)
except:
return False
gt=register.filter(gt)
|
1615254
|
from boa3.builtin import public
@public
def Main(*a: int) -> int:
c, *b = a # not implemented, won't compile
return c
|
1615282
|
import os
# toolchains options
ARCH ='risc-v'
CPU ='e906'
CPUNAME ='e906f'
VENDOR ='t-head'
CROSS_TOOL ='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'/home/xinge/tools/riscv64-elf-x86_64-20200616-1.9.6/bin'
else:
print 'Please make sure your toolchains is GNU GCC!'
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
# BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'riscv64-unknown-elf-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'g++'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
STRIP = PREFIX + 'strip'
if CPUNAME == 'e906fd':
DEVICE = ' -march=rv32imafdcxthead -mabi=ilp32d'
if CPUNAME == 'e906f':
DEVICE = ' -march=rv32imafcxthead -mabi=ilp32f'
if CPUNAME == 'e906':
DEVICE = ' -march=rv32imacxthead -mabi=ilp32'
CFLAGS = DEVICE + ' -c -g -ffunction-sections -fdata-sections -Wall -mcmodel=medlow'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--no-whole-archive -T gcc_csky.ld -lm -lc -lgcc -Wl,-gc-sections -Wl,-zmax-page-size=1024'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -g2'
CXXFLAGS = CFLAGS
# M_CFLAGS = DEVICE + ' -EL -G0 -O2 -mno-abicalls -fno-common -fno-exceptions -fno-omit-frame-pointer -mlong-calls -fno-pic '
# M_CXXFLAGS = M_CFLAGS
# M_LFLAGS = DEVICE + ' -EL -r -Wl,--gc-sections,-z,max-page-size=0x4' +\
# ' -nostartfiles -static-libgcc'
# M_POST_ACTION = STRIP + ' -R .hash $TARGET\n' + SIZE + ' $TARGET \n'
DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
|
1615286
|
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml import Pipeline, Model
from pyspark.ml.classification import RandomForestClassifier
import json
''' Read data with Spark SQL '''
spark = SparkSession.builder.getOrCreate()
df_data = spark.read.csv(path="german_credit_data_biased_training.csv", sep=",", header=True, inferSchema=True)
df_data.head()
spark_df = df_data
(train_data, test_data) = spark_df.randomSplit([0.8, 0.2], 24)
print("Number of records for training: " + str(train_data.count()))
print("Number of records for evaluation: " + str(test_data.count()))
spark_df.printSchema()
si_CheckingStatus = StringIndexer(inputCol = 'CheckingStatus', outputCol = 'CheckingStatus_IX')
si_CreditHistory = StringIndexer(inputCol = 'CreditHistory', outputCol = 'CreditHistory_IX')
si_LoanPurpose = StringIndexer(inputCol = 'LoanPurpose', outputCol = 'LoanPurpose_IX')
si_ExistingSavings = StringIndexer(inputCol = 'ExistingSavings', outputCol = 'ExistingSavings_IX')
si_EmploymentDuration = StringIndexer(inputCol = 'EmploymentDuration', outputCol = 'EmploymentDuration_IX')
si_Sex = StringIndexer(inputCol = 'Sex', outputCol = 'Sex_IX')
si_OthersOnLoan = StringIndexer(inputCol = 'OthersOnLoan', outputCol = 'OthersOnLoan_IX')
si_OwnsProperty = StringIndexer(inputCol = 'OwnsProperty', outputCol = 'OwnsProperty_IX')
si_InstallmentPlans = StringIndexer(inputCol = 'InstallmentPlans', outputCol = 'InstallmentPlans_IX')
si_Housing = StringIndexer(inputCol = 'Housing', outputCol = 'Housing_IX')
si_Job = StringIndexer(inputCol = 'Job', outputCol = 'Job_IX')
si_Telephone = StringIndexer(inputCol = 'Telephone', outputCol = 'Telephone_IX')
si_ForeignWorker = StringIndexer(inputCol = 'ForeignWorker', outputCol = 'ForeignWorker_IX')
si_Label = StringIndexer(inputCol="Risk", outputCol="label").fit(spark_df)
label_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=si_Label.labels)
va_features = VectorAssembler(inputCols=["CheckingStatus_IX", "CreditHistory_IX", "LoanPurpose_IX", "ExistingSavings_IX", "EmploymentDuration_IX", "Sex_IX", \
"OthersOnLoan_IX", "OwnsProperty_IX", "InstallmentPlans_IX", "Housing_IX", "Job_IX", "Telephone_IX", "ForeignWorker_IX", \
"LoanDuration", "LoanAmount", "InstallmentPercent", "CurrentResidenceDuration", "LoanDuration", "Age", "ExistingCreditsCount", \
"Dependents"], outputCol="features")
''' Train Model with RF classifier '''
classifier = RandomForestClassifier(featuresCol="features")
pipeline = Pipeline(stages=[si_CheckingStatus, si_CreditHistory, si_EmploymentDuration, si_ExistingSavings, si_ForeignWorker, si_Housing, si_InstallmentPlans, si_Job, si_LoanPurpose, si_OthersOnLoan,\
si_OwnsProperty, si_Sex, si_Telephone, si_Label, va_features, classifier, label_converter])
model = pipeline.fit(train_data)
predictions = model.transform(test_data)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction")
area_under_curve = evaluatorDT.evaluate(predictions)
# default evaluation is areaUnderROC
print("areaUnderROC = %g" % area_under_curve)
print(model)
print(predictions)
# Persistent model, pipeline, and training data
model.write().overwrite().save('model')
train_data.write.option("header", "true").mode("overwrite").csv('train_data')
evaluation_metrics = {
'metrics': [
{
"name": "areaUnderROC",
"value": area_under_curve,
"threshold": 0.7
}
]
}
with open('evaluation.json', 'w') as f:
json.dump(evaluation_metrics, f, indent=2)
f.close()
|
1615320
|
import cv2
import torch
import scipy.special
import numpy as np
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from enum import Enum
from scipy.spatial.distance import cdist
from ultrafastLaneDetector.model import parsingNet
lane_colors = [(0,0,255),(0,255,0),(255,0,0),(0,255,255)]
tusimple_row_anchor = [ 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112,
116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164,
168, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216,
220, 224, 228, 232, 236, 240, 244, 248, 252, 256, 260, 264, 268,
272, 276, 280, 284]
culane_row_anchor = [121, 131, 141, 150, 160, 170, 180, 189, 199, 209, 219, 228, 238, 248, 258, 267, 277, 287]
class ModelType(Enum):
TUSIMPLE = 0
CULANE = 1
class ModelConfig():
def __init__(self, model_type):
if model_type == ModelType.TUSIMPLE:
self.init_tusimple_config()
else:
self.init_culane_config()
def init_tusimple_config(self):
self.img_w = 1280
self.img_h = 720
self.row_anchor = tusimple_row_anchor
self.griding_num = 100
self.cls_num_per_lane = 56
def init_culane_config(self):
self.img_w = 1640
self.img_h = 590
self.row_anchor = culane_row_anchor
self.griding_num = 200
self.cls_num_per_lane = 18
class UltrafastLaneDetector():
def __init__(self, model_path, model_type=ModelType.TUSIMPLE, use_gpu=False):
self.use_gpu = use_gpu
# Load model configuration based on the model type
self.cfg = ModelConfig(model_type)
# Initialize model
self.model = self.initialize_model(model_path, self.cfg, use_gpu)
# Initialize image transformation
self.img_transform = self.initialize_image_transform()
@staticmethod
def initialize_model(model_path, cfg, use_gpu):
# Load the model architecture
net = parsingNet(pretrained = False, backbone='18', cls_dim = (cfg.griding_num+1,cfg.cls_num_per_lane,4),
use_aux=False) # we dont need auxiliary segmentation in testing
# Load the weights from the downloaded model
if use_gpu:
net = net.cuda()
state_dict = torch.load(model_path, map_location='cuda')['model'] # CUDA
else:
state_dict = torch.load(model_path, map_location='cpu')['model'] # CPU
compatible_state_dict = {}
for k, v in state_dict.items():
if 'module.' in k:
compatible_state_dict[k[7:]] = v
else:
compatible_state_dict[k] = v
# Load the weights into the model
net.load_state_dict(compatible_state_dict, strict=False)
net.eval()
return net
@staticmethod
def initialize_image_transform():
# Create transfom operation to resize and normalize the input images
img_transforms = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
return img_transforms
def detect_lanes(self, image, draw_points=True):
input_tensor = self.prepare_input(image)
# Perform inference on the image
output = self.inference(input_tensor)
# Process output data
self.lanes_points, self.lanes_detected = self.process_output(output, self.cfg)
# Draw depth image
visualization_img = self.draw_lanes(image, self.lanes_points, self.lanes_detected, self.cfg, draw_points)
return visualization_img
def prepare_input(self, img):
# Transform the image for inference
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_pil = Image.fromarray(img)
input_img = self.img_transform(img_pil)
input_tensor = input_img[None, ...]
if self.use_gpu:
input_tensor = input_tensor.cuda()
return input_tensor
def inference(self, input_tensor):
with torch.no_grad():
output = self.model(input_tensor)
return output
@staticmethod
def process_output(output, cfg):
# Parse the output of the model
processed_output = output[0].data.cpu().numpy()
processed_output = processed_output[:, ::-1, :]
prob = scipy.special.softmax(processed_output[:-1, :, :], axis=0)
idx = np.arange(cfg.griding_num) + 1
idx = idx.reshape(-1, 1, 1)
loc = np.sum(prob * idx, axis=0)
processed_output = np.argmax(processed_output, axis=0)
loc[processed_output == cfg.griding_num] = 0
processed_output = loc
col_sample = np.linspace(0, 800 - 1, cfg.griding_num)
col_sample_w = col_sample[1] - col_sample[0]
lanes_points = []
lanes_detected = []
max_lanes = processed_output.shape[1]
for lane_num in range(max_lanes):
lane_points = []
# Check if there are any points detected in the lane
if np.sum(processed_output[:, lane_num] != 0) > 2:
lanes_detected.append(True)
# Process each of the points for each lane
for point_num in range(processed_output.shape[0]):
if processed_output[point_num, lane_num] > 0:
lane_point = [int(processed_output[point_num, lane_num] * col_sample_w * cfg.img_w / 800) - 1, int(cfg.img_h * (cfg.row_anchor[cfg.cls_num_per_lane-1-point_num]/288)) - 1 ]
lane_points.append(lane_point)
else:
lanes_detected.append(False)
lanes_points.append(lane_points)
return np.array(lanes_points), np.array(lanes_detected)
@staticmethod
def draw_lanes(input_img, lanes_points, lanes_detected, cfg, draw_points=True):
# Write the detected line points in the image
visualization_img = cv2.resize(input_img, (cfg.img_w, cfg.img_h), interpolation = cv2.INTER_AREA)
# Draw a mask for the current lane
if(lanes_detected[1] and lanes_detected[2]):
lane_segment_img = visualization_img.copy()
cv2.fillPoly(lane_segment_img, pts = [np.vstack((lanes_points[1],np.flipud(lanes_points[2])))], color =(255,191,0))
visualization_img = cv2.addWeighted(visualization_img, 0.7, lane_segment_img, 0.3, 0)
if(draw_points):
for lane_num,lane_points in enumerate(lanes_points):
for lane_point in lane_points:
cv2.circle(visualization_img, (lane_point[0],lane_point[1]), 3, lane_colors[lane_num], -1)
return visualization_img
|
1615323
|
from tests.hypergol_test_case import DataClass1
from tests.hypergol_test_case import HypergolTestCase
class TestDatasetDefFile(HypergolTestCase):
def __init__(self, methodName='runTest'):
super(TestDatasetDefFile, self).__init__(
location='test_dataset_def_file_location',
projectName='test_dataset_def_file',
branch='branch',
chunkCount=16,
methodName=methodName
)
def setUp(self):
super().setUp()
self.expectedObjects = {DataClass1(id_=k, value1=k) for k in range(100)}
self.dataset = self.create_test_dataset(
dataset=self.datasetFactory.get(dataType=DataClass1, name='data_class'),
content=self.expectedObjects
)
self.datasetNew = self.datasetFactory.get(dataType=DataClass1, name='data_class_new')
def tearDown(self):
super().tearDown()
self.delete_if_exists(dataset=self.dataset)
self.delete_if_exists(dataset=self.datasetNew)
self.clean_directories()
def test_dataset_correctly_locates_def_file(self):
self.assertEqual(self.dataset.defFile.defFilename, f'{self.location}/{self.projectName}/{self.branch}/data_class/data_class.def')
def test_dataset_check_def_file_returns_true_if_correct(self):
self.assertEqual(self.dataset.defFile.check_def_file(), True)
|
1615325
|
from unittest import TestCase
from app.fila import Fila
class TestFila(TestCase):
@classmethod
def setUpClass(cls):
cls.arquivo = 'arquivo.txt'
print('setUpClass')
@classmethod
def tearDownClass(cls):
print('tearDownClass')
from os import remove
remove(cls.arquivo)
def setUp(self):
self.fila = Fila()
print('setup')
def test_cria_arquivo(self):
open(self.arquivo, 'w')
def tearDown(self):
print('tearDown')
def test_1_quando_5_entrar_na_fila_5_deve_estar_no_final_da_fila(self):
entrada = 5
saida_esperada = 5
# quando 5 entrar na fila
self.fila.entrar(entrada)
# então 5 deve estar na fila
self.assertEqual(saida_esperada, self.fila[-1])
print(self.fila)
# print('test_quando_5_entrar_na_fila_5_deve_estar_no_final_da_fila')
def test_quando_10_entrar_na_fila_10_deve_estar_no_final_da_fila(self):
entrada = 10
saida_esperada = 10
# quando 10 entrar na fila
self.fila.entrar(entrada)
# então 10 deve estar na fila
self.assertEqual(saida_esperada, self.fila[-1])
print(self.fila)
# print('test_quando_10_entrar_na_fila_10_deve_estar_no_final_da_fila')
def test_quando_10_entrar_na_fila_seguido_de_5_10_deve_estar_no_comeco_da_fila(self):
entrada1 = 10
entrada2 = 5
saida_esperada = 10
self.fila.entrar(entrada1)
self.fila.entrar(entrada2)
self.assertEqual(saida_esperada, self.fila[0])
print(self.fila)
# print('test_quando_10_entrar_na_fila_seguido_de_5_10_deve_estar_no_comeco_da_fila')
|
1615353
|
import numpy as np
import numba as nb
_signatures = [
(nb.float32[:], nb.float32[:], nb.float32[:]),
(nb.float64[:], nb.float64[:], nb.float64[:]),
]
@nb.njit(_signatures, cache=True)
def _de_castlejau(z, beta, res):
# De Casteljau algorithm, numerically stable
n = len(beta)
if n == 0:
res[:] = np.nan
else:
betai = np.empty_like(beta)
for iz, zi in enumerate(z):
azi = 1.0 - zi
betai[:] = beta
for j in range(1, n):
for k in range(n - j):
betai[k] = betai[k] * azi + betai[k + 1] * zi
res[iz] = betai[0]
return res
_signatures = [
nb.float32[:](nb.float32[:]),
nb.float64[:](nb.float64[:]),
]
@nb.njit(_signatures, cache=True)
def _beta_int(beta):
n = len(beta)
r = np.zeros(n + 1, dtype=beta.dtype)
for j in range(1, n + 1):
for k in range(j):
r[j] += beta[k]
r *= 1.0 / n
return r
@nb.njit(cache=True)
def _prepare_z_beta(x, xmin, xmax, beta):
inverse_scale = 1 / (xmax - xmin)
z = x.copy()
z -= xmin
z *= inverse_scale
# beta = beta.copy()
# inverse_scale /= len(beta) + 1
# beta *= inverse_scale
return z, beta
def _prepare_array(x):
x = np.atleast_1d(x)
if x.dtype.kind != "f":
x = x.astype(np.float64)
return x
_signatures = [
(nb.float32[:], nb.float32[:], nb.float32[:], nb.float32[:], nb.float32[:]),
(nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:]),
]
@nb.guvectorize(_signatures, "(),(n),(),()->()", cache=True)
def scaled_pdf(x, beta, xmin, xmax, res):
z, beta = _prepare_z_beta(x, xmin, xmax, beta)
_de_castlejau(z, beta, res)
@nb.guvectorize(_signatures, "(),(n),(),()->()", cache=True)
def scaled_cdf(x, beta, xmin, xmax, res):
z, beta = _prepare_z_beta(x, xmin, xmax, beta)
beta = _beta_int(beta)
_de_castlejau(z, beta, res)
@nb.extending.overload(scaled_pdf)
def bernstein_scaled_pdf_ol(x, beta, xmin, xmax):
from numba.core.errors import TypingError
from numba.types import Array, Float
if not isinstance(x, Array):
raise TypingError("x must be a Numpy array")
if not isinstance(beta, Array):
raise TypingError("beta must be a Numpy array")
if not isinstance(xmin, Float):
raise TypingError("xmin must be float")
if not isinstance(xmax, Float):
raise TypingError("xmax must be float")
def impl(x, beta, xmin, xmax):
z, beta = _prepare_z_beta(x, xmin, xmax, beta)
res = np.empty_like(z)
_de_castlejau(z, beta, res)
return res
return impl
@nb.extending.overload(scaled_cdf)
def bernstein_scaled_cdf_ol(x, beta, xmin, xmax):
from numba.core.errors import TypingError
from numba.types import Array, Float
if not isinstance(x, Array):
raise TypingError("x must be a Numpy array")
if not isinstance(beta, Array):
raise TypingError("beta must be a Numpy array")
if not isinstance(xmin, Float):
raise TypingError("xmin must be float")
if not isinstance(xmax, Float):
raise TypingError("xmax must be float")
def impl(x, beta, xmin, xmax):
z, beta = _prepare_z_beta(x, xmin, xmax, beta)
beta = _beta_int(beta)
res = np.empty_like(z)
_de_castlejau(z, beta, res)
return res
return impl
density = scaled_pdf
|
1615368
|
from xendit.models._base_model import BaseModel
from xendit._api_requestor import _APIRequestor
from xendit._extract_params import _extract_params
from xendit.xendit_error import XenditError
class RecurringPayment(BaseModel):
"""RecurringPayment class (API Reference: RecurringPayment)
Static Methods:
- RecurringPayment.create (API Reference: /Create Recurring Payment)
- RecurringPayment.get (API Reference: /Get Recurring Payment)
- RecurringPayment.edit (API Reference: /Edit Recurring Payment)
- RecurringPayment.stop (API Reference: /Stop Recurring Payment)
- RecurringPayment.pause (API Reference: /Pause Recurring Payment)
- RecurringPayment.resume (API Reference: /Resume Recurring Payment)
Static Methods for Object Creation:
- RecurringPayment.helper_create_installment (For Installment in create_authorization and create_charge)
Attributes:
- id (str)
- user_id (str)
- external_id (str)
- status (str)
- amount (float)
- payer_email (str)
- description (str)
- should_send_email (bool)
- interval (str)
- interval_count (int)
- recurrence_progress (int)
- last_created_invoice_url (str)
- credit_card_token (str)
- created (str)
- updated (str)
- recharge (bool)
- payment_method_id (str)
Optional Attributes:
- success_redirect_url (str)
- failure_redirect_url (str)
- invoice_duration (int)
- charge_immediately (bool)
- currency (str)
"""
id: str
user_id: str
external_id: str
status: str
amount: float
payer_email: str
description: str
should_send_email: bool
interval: str
interval_count: int
recurrence_progress: int
last_created_invoice_url: str
credit_card_token: str
created: str
updated: str
recharge: bool
payment_method_id: str
# Optional
success_redirect_url: str
failure_redirect_url: str
invoice_duration: int
charge_immediately: bool
currency: str
@staticmethod
def create(
*,
external_id,
payer_email,
description,
amount,
interval,
interval_count,
total_recurrence=None,
invoice_duration=None,
should_send_email=None,
missed_payment_action=None,
credit_card_token=None,
start_date=None,
success_redirect_url=None,
failure_redirect_url=None,
recharge=None,
charge_immediately=None,
payment_method_id=None,
currency=None,
x_idempotency_key=None,
for_user_id=None,
x_api_version=None,
**kwargs,
):
"""Send POST Request to create refund for Credit Card (API Reference: Credit Card/Create Refund)
Args:
- external_id (str)
- payer_email (str)
- description (str)
- amount (float)
- interval (str)
- interval_count (int)
- **total_recurrence (int)
- **invoice_duration (int)
- **should_send_email (int)
- **missed_payment_action (str)
- **credit_card_token (str)
- **start_date (str)
- **success_redirect_url (str)
- **failure_redirect_url (str)
- **recharge (bool)
- **charge_immediately (bool)
- **payment_method_id (str)
- **currency (str)
- **x_idempotency_key (str)
- **for_user_id (str)
- **x_api_version (str)
Returns:
RecurringPayment
Raises:
XenditError
"""
url = "/recurring_payments"
headers, body = _extract_params(
locals(),
func_object=RecurringPayment.create,
headers_params=["for_user_id", "x_idempotency_key", "x_api_version"],
)
kwargs["headers"] = headers
kwargs["body"] = body
resp = _APIRequestor.post(url, **kwargs)
if resp.status_code >= 200 and resp.status_code < 300:
return RecurringPayment(**resp.body)
else:
raise XenditError(resp)
@staticmethod
def get(
*, id, for_user_id=None, x_api_version=None, **kwargs,
):
"""Get Recurring Payment by ID (API Reference: Recurring Payment/Get Recurring Payment)
Args:
- id (str)
- **for_user_id (str)
- **x_api_version (str)
Returns:
RecurringPayment
Raises:
XenditError
"""
url = f"/recurring_payments/{id}"
headers, _ = _extract_params(
locals(),
func_object=RecurringPayment.get,
headers_params=["for_user_id", "x_api_version"],
)
kwargs["headers"] = headers
resp = _APIRequestor.get(url, **kwargs)
if resp.status_code >= 200 and resp.status_code < 300:
return RecurringPayment(**resp.body)
else:
raise XenditError(resp)
@staticmethod
def edit(
*,
id,
amount=None,
credit_card_token=None,
interval=None,
interval_count=None,
should_send_email=None,
invoice_duration=None,
missed_payment_action=None,
payment_method_id=None,
customer_id=None,
x_idempotency_key=None,
for_user_id=None,
x_api_version=None,
**kwargs,
):
"""Edit Recurring Payment Data (API Reference: Recurring Payment/Edit Recurring Payment)
Args:
- id (str)
- **amount (int)
- **credit_card_token (str)
- **interval (str)
- **interval_count (int)
- **should_send_email (bool)
- **invoice_duration (int)
- **missed_payment_action (str)
- **payment_method_id (str)
- **customer_id (str)
- **x_idempotency_key (str)
- **for_user_id (str)
- **x_api_version (str)
Returns:
RecurringPayment
Raises:
XenditError
"""
url = f"/recurring_payments/{id}"
headers, body = _extract_params(
locals(),
func_object=RecurringPayment.edit,
headers_params=["for_user_id", "x_idempotency_key", "x_api_version"],
ignore_params=["id"],
)
kwargs["headers"] = headers
kwargs["body"] = body
resp = _APIRequestor.patch(url, **kwargs)
if resp.status_code >= 200 and resp.status_code < 300:
return RecurringPayment(**resp.body)
else:
raise XenditError(resp)
@staticmethod
def stop(
*, id, x_idempotency_key=None, for_user_id=None, x_api_version=None, **kwargs,
):
"""Stop Recurring Payment (API Reference: Recurring Payment/Stop Recurring Payment)
Args:
- id (str)
- **x_idempotency_key (str)
- **for_user_id (str)
- **x_api_version (str)
Returns:
RecurringPayment
Raises:
XenditError
"""
url = f"/recurring_payments/{id}/stop!"
headers, body = _extract_params(
locals(),
func_object=RecurringPayment.stop,
headers_params=["for_user_id", "x_idempotency_key", "x_api_version"],
ignore_params=["id"],
)
kwargs["headers"] = headers
kwargs["body"] = body
resp = _APIRequestor.post(url, **kwargs)
if resp.status_code >= 200 and resp.status_code < 300:
return RecurringPayment(**resp.body)
else:
raise XenditError(resp)
@staticmethod
def pause(
*, id, x_idempotency_key=None, for_user_id=None, x_api_version=None, **kwargs,
):
"""Pause Recurring Payment (API Reference: Recurring Payment/Pause Recurring Payment)
Args:
- id (str)
- **x_idempotency_key (str)
- **for_user_id (str)
- **x_api_version (str)
Returns:
RecurringPayment
Raises:
XenditError
"""
url = f"/recurring_payments/{id}/pause!"
headers, body = _extract_params(
locals(),
func_object=RecurringPayment.stop,
headers_params=["for_user_id", "x_idempotency_key", "x_api_version"],
ignore_params=["id"],
)
kwargs["headers"] = headers
kwargs["body"] = body
resp = _APIRequestor.post(url, **kwargs)
if resp.status_code >= 200 and resp.status_code < 300:
return RecurringPayment(**resp.body)
else:
raise XenditError(resp)
@staticmethod
def resume(
*, id, x_idempotency_key=None, for_user_id=None, x_api_version=None, **kwargs,
):
"""Pause Recurring Payment (API Reference: Recurring Payment/Pause Recurring Payment)
Args:
- id (str)
- **x_idempotency_key (str)
- **for_user_id (str)
- **x_api_version (str)
Returns:
RecurringPayment
Raises:
XenditError
"""
url = f"/recurring_payments/{id}/resume!"
headers, body = _extract_params(
locals(),
func_object=RecurringPayment.stop,
headers_params=["for_user_id", "x_idempotency_key", "x_api_version"],
ignore_params=["id"],
)
kwargs["headers"] = headers
kwargs["body"] = body
resp = _APIRequestor.post(url, **kwargs)
if resp.status_code >= 200 and resp.status_code < 300:
return RecurringPayment(**resp.body)
else:
raise XenditError(resp)
|
1615372
|
from nose import tools as nt
from tests.base import AdminTestCase
from tests.test_conferences import ConferenceFactory
from admin.meetings.serializers import serialize_meeting
class TestsSerializeMeeting(AdminTestCase):
def setUp(self):
super(TestsSerializeMeeting, self).setUp()
self.conf = ConferenceFactory()
def test_serialize(self):
res = serialize_meeting(self.conf)
nt.assert_is_instance(res, dict)
nt.assert_equal(res['endpoint'], self.conf.endpoint)
nt.assert_equal(res['name'], self.conf.name)
nt.assert_equal(res['info_url'], self.conf.info_url)
nt.assert_equal(res['logo_url'], self.conf.logo_url)
nt.assert_equal(res['active'], self.conf.active)
nt.assert_equal(res['public_projects'], self.conf.public_projects)
nt.assert_equal(res['poster'], self.conf.poster)
nt.assert_equal(res['talk'], self.conf.talk)
nt.assert_equal(res['num_submissions'], self.conf.valid_submissions.count())
|
1615392
|
from girder import plugin
from .views import RabbitUserQueue
class GirderPlugin(plugin.GirderPlugin):
def load(self, info):
info["apiRoot"].rabbit_user_queues = RabbitUserQueue()
|
1615425
|
import os
import sys
import glob
import html
import fnmatch
from os import path
import coverage
OUTPUT_TEMPLATE = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Spec Coverage</title>
<link rel="stylesheet" href="style.css" type="text/css">
<style>
.covered {
}
.missed {
background-color: lightcoral;
}
code {
margin: 0;
padding: 0;
display:block;
white-space:pre-wrap;
}
</style>
</head>
<body>
%head
<div><pre>
%body
</pre></div>
</body>
</html>
"""
LINE_TEMPLATE = "<code class=\"%class\">%lineno| %source</code>"
def write_report(data, source_file, output_file):
module_name, executable_lines, excluded_lines, missing_lines, _ = data
missing_lines = set(missing_lines)
with open(output_file, "w") as output, open(source_file, "r") as source:
lines = source.readlines()
file_report = []
padding = len(str(len(lines)))
for index, line in enumerate(lines):
line = line[0:-1]
lineno = index + 1
line_number = str(lineno).rjust(padding)
covered = lineno not in missing_lines
line_class = 'covered' if covered else 'missed'
formatted_line = (LINE_TEMPLATE.replace('%class', line_class)
.replace('%lineno', line_number)
.replace('%source', html.escape(line)))
file_report.append(formatted_line)
report_body = ''.join(file_report)
report_header = ''
report = (OUTPUT_TEMPLATE.replace('%head', report_header)
.replace('%body', report_body))
output.write(report)
def main(argv):
parsing_path = path.normpath(path.join(path.dirname(__file__), ".."))
files = argv[1:]
if not files:
files = [os.path.join(root, file) for root, _, files in os.walk(parsing_path)
for file in fnmatch.filter(files, '*.vtt')]
cov = coverage.Coverage()
cov.start()
for file_path in files:
with open(file_path, "r") as file:
source = file.read()
import parser
p = parser.VTTParser(source)
p.parse()
cov.stop()
data = cov.analysis2(parser.__file__)
write_report(data, parser.__file__, "report.html")
if __name__ == '__main__':
main(sys.argv)
|
1615446
|
UNKNOWN = "Unknown"
MANUAL = "Manual"
TESTDRAFT = "TestDraft"
SCHEDULED = "Scheduled"
WEBHOOK = "Webhook"
INTERNAL = "Internal"
WATCHER = "Watcher"
UNKNOWN_ENUM_INDEX = 0
MANUAL_ENUM_INDEX = 1
TESTDRAFT_ENUM_INDEX = 2
SCHEDULED_ENUM_INDEX = 3
WEBHOOK_ENUM_INDEX = 4
INTERNAL_ENUM_INDEX = 5
WATCHER_ENUM_INDEX = 6
mapping = {
UNKNOWN_ENUM_INDEX: UNKNOWN,
MANUAL_ENUM_INDEX: MANUAL,
TESTDRAFT_ENUM_INDEX: TESTDRAFT,
SCHEDULED_ENUM_INDEX: SCHEDULED,
WEBHOOK_ENUM_INDEX: WEBHOOK,
INTERNAL_ENUM_INDEX: INTERNAL,
WATCHER_ENUM_INDEX: WATCHER
}
|
1615486
|
import os
import sys
import argparse
import tensorflow as tf
from misc.helpers import *
from misc.digits import Digits
###################################################################
# Model #
###################################################################
@print_info
def linear_model(x):
with tf.name_scope("Model"):
pred = tf.layers.dense(inputs=x, units=10,
activation=tf.nn.softmax)
return tf.identity(pred, name="prediction")
@print_info
def cnn_model(x):
conv1 = tf.layers.conv2d(inputs=tf.reshape(x, [-1, 28, 28, 1]),
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
with tf.name_scope('Model'):
pred = tf.layers.dense(inputs=dense, units=10, activation=tf.nn.softmax)
return tf.identity(pred, name="prediction")
###################################################################
# Training #
###################################################################
@print_info
def train_model(x, y, cost, optimizer, accuracy, learning_rate, batch_size, epochs, data_dir, outputs_dir, logs_dir):
# get run
try:
run = Run.get_context()
except:
run = None
# log paramters
aml_log(run, learning_rate=learning_rate,
batch_size=batch_size, epochs=epochs,
data_dir=data_dir, outputs_dir=outputs_dir,
logs_dir=logs_dir)
info('Initializing Devices')
print(' ')
# load MNIST data (if not available)
digits = Digits(data_dir, batch_size)
test_x, test_y = digits.test
# Create a summary to monitor cost tensor
tf.summary.scalar("cost", cost)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(str(logs_dir), graph=tf.get_default_graph())
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
acc = 0.
info('Training')
# epochs to run
for epoch in range(epochs):
print("Epoch {}".format(epoch+1))
avg_cost = 0.
# loop over all batches
for i, (train_x, train_y) in enumerate(digits):
# Run optimization, cost, and summary
_, c, summary = sess.run([optimizer, cost, merged_summary_op],
feed_dict={x: train_x, y: train_y})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * digits.total + i)
# Compute average loss
avg_cost += c / digits.total
print("\r Batch {}/{} - Cost {:5.4f}".format(i+1, digits.total, avg_cost), end="")
acc = accuracy.eval({x: test_x, y: test_y})
print("\r Cost: {:5.4f}, Accuracy: {:5.4f}\n".format(avg_cost, acc))
# aml log
aml_log(run, cost=avg_cost, accuracy=acc)
# save model
info("Saving Model")
save_model(sess, outputs_dir, 'Model/prediction')
def main(settings):
# resetting graph
tf.reset_default_graph()
# mnist data image of shape 28*28=784
x = tf.placeholder(tf.float32, [None, 784], name='x')
# 0-9 digits recognition => 10 classes
y = tf.placeholder(tf.float32, [None, 10], name='y')
# model
hx = cnn_model(x)
# accuracy
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hx, 1), tf.argmax(y, 1)), tf.float32))
# cost / loss
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=hx))
# optimizer
optimizer = tf.train.AdamOptimizer(settings.lr).minimize(cost)
# training session
train_model(x, y, cost, optimizer, accuracy,
settings.lr, settings.batch, settings.epochs,
settings.data, settings.outputs, settings.logs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CNN Training for Image Recognition.')
parser.add_argument('-d', '--data', help='directory to training and test data', default='data')
parser.add_argument('-e', '--epochs', help='number of epochs', default=10, type=int)
parser.add_argument('-b', '--batch', help='batch size', default=100, type=int)
parser.add_argument('-l', '--lr', help='learning rate', default=0.001, type=float)
parser.add_argument('-g', '--logs', help='log directory', default='logs')
parser.add_argument('-o', '--outputs', help='output directory', default='outputs')
args = parser.parse_args()
args.data = check_dir(args.data).resolve()
args.outputs = check_dir(args.outputs).resolve()
args.logs = check_dir(args.logs).resolve()
main(args)
|
1615491
|
import torch
import torch.nn as nn
from torch.cuda.amp import GradScaler, autocast
from torchattacks.attack import Attack
class FastBIM(Attack):
def __init__(self, model, eps=4/255, alpha=1/255, steps=0):
super().__init__("FastBIM", model)
self.eps = eps
self.alpha = alpha
if steps == 0:
self.steps = int(min(eps*255 + 4, 1.25*eps*255))
else:
self.steps = steps
self._supported_mode = ['default', 'targeted']
self.scaler = GradScaler()
def forward(self, images, labels):
r"""
Overridden.
"""
images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
if self._targeted:
target_labels = self._get_target_label(images, labels)
loss = nn.CrossEntropyLoss()
ori_images = images.clone().detach()
for _ in range(self.steps):
images.requires_grad = True
# Accelerating forward propagation
with autocast():
outputs = self.model(images)
# Calculate loss
if self._targeted:
cost = -loss(outputs, target_labels)
else:
cost = loss(outputs, labels)
# Update adversarial images with gradient scaler applied
scaled_loss = self.scaler.scale(cost)
# Update adversarial images
grad = torch.autograd.grad(scaled_loss, images,
retain_graph=False,
create_graph=False)[0]
adv_images = images + self.alpha*grad.sign()
a = torch.clamp(ori_images - self.eps, min=0)
b = (adv_images >= a).float()*adv_images \
+ (adv_images < a).float()*a
c = (b > ori_images+self.eps).float()*(ori_images+self.eps) \
+ (b <= ori_images + self.eps).float()*b
images = torch.clamp(c, max=1).detach()
return images
|
1615535
|
import logging
import multiprocessing as mp
from argparse import Namespace
from typing import Optional, List
from arango import ArangoClient
from cklib.args import ArgumentParser
from cklib.jwt import add_args as jwt_add_args
from core import async_extensions
from core.db.arangodb_extensions import ArangoHTTPClient
from core.db.db_access import DbAccess
from core.message_bus import MessageBus
from core.model.adjust_node import DirectAdjuster
from core.task.task_handler import TaskHandler
from core.util import shutdown_process
log = logging.getLogger(__name__)
def parse_args(args: Optional[List[str]] = None, namespace: Optional[str] = None) -> Namespace:
parser = ArgumentParser(
env_args_prefix="CKCORE_",
description="Maintains graphs of resources of any shape.",
epilog="Keeps all the things.",
)
jwt_add_args(parser)
parser.add_argument(
"--log-level",
default="info",
help="Log level (default: info)",
)
parser.add_argument(
"--graphdb-server",
default="http://localhost:8529",
dest="graphdb_server",
help="Graph database server (default: http://localhost:8529)",
)
parser.add_argument(
"--graphdb-database",
default="cloudkeeper",
dest="graphdb_database",
help="Graph database name (default: cloudkeeper)",
)
parser.add_argument(
"--graphdb-username",
default="cloudkeeper",
dest="graphdb_username",
help="Graph database login (default: cloudkeeper)",
)
parser.add_argument(
"--graphdb-password",
default="",
dest="graphdb_password",
help='Graph database password (default: "")',
)
parser.add_argument(
"--graphdb-type",
default="arangodb",
dest="graphdb_type",
help="Graph database type (default: arangodb)",
)
parser.add_argument(
"--graphdb-no-ssl-verify",
action="store_true",
dest="graphdb_no_ssl_verify",
help="If the connection should not be verified (default: False)",
)
parser.add_argument(
"--graphdb-request-timeout",
type=int,
default=900,
dest="graphdb_request_timeout",
help="Request timeout in seconds (default: 900)",
)
parser.add_argument(
"--plantuml-server",
default="https://www.plantuml.com/plantuml",
help="PlantUML server URI for UML image rendering (default: https://www.plantuml.com/plantuml)",
)
parser.add_argument(
"--host",
type=str,
default="localhost",
nargs="+",
help="TCP host(s) to bind on (default: localhost)",
)
parser.add_argument(
"--port",
type=int,
default=8900,
help="TCP port to bind on (default: 8900)",
)
parser.add_argument(
"--merge_max_wait_time_seconds",
type=int,
default=3600,
help="Max waiting time to complete a merge graph action.",
)
TaskHandler.add_args(parser)
return parser.parse_args(args, namespace) # type: ignore
# Note: this method should be called from every started process as early as possible
def setup_process(args: Namespace, child_process: Optional[str] = None) -> None:
# Note: if another appender than the log appender is used, proper multiprocess logging needs to be enabled.
# See https://docs.python.org/3/howto/logging-cookbook.html#logging-to-a-single-file-from-multiple-processes
log_format = "%(asctime)s|ckcore|%(levelname)5s|%(process)d|%(threadName)10s %(message)s"
logging.basicConfig(
format=log_format, datefmt="%y-%m-%d %H:%M:%S", level=logging.getLevelName(args.log_level.upper())
)
# set/reset process creation method
reset_process_start_method()
# reset global async thread pool (forked processes need to create a fresh pool)
async_extensions.GlobalAsyncPool = None
def reset_process_start_method() -> None:
preferred = "spawn"
current = mp.get_start_method(True)
if current != preferred:
if preferred in mp.get_all_start_methods():
log.info(f"Set process start method to {preferred}")
mp.set_start_method(preferred, True)
return
log.warning(f"{preferred} method not available. Have {mp.get_all_start_methods()}. Use {current}")
def db_access(args: Namespace, message_bus: MessageBus) -> DbAccess:
if args.graphdb_type not in "arangodb":
log.fatal(f"Unknown Graph DB type {args.graphdb_type}")
shutdown_process(1)
http_client = ArangoHTTPClient(args.graphdb_request_timeout, not args.graphdb_no_ssl_verify)
client = ArangoClient(hosts=args.graphdb_server, http_client=http_client)
database = client.db(args.graphdb_database, username=args.graphdb_username, password=args.graphdb_password)
adjuster = DirectAdjuster()
return DbAccess(database, message_bus, adjuster)
|
1615557
|
from bip import *
import idc
import pytest
"""
Test for all classes used for representing ast nodes are tested by this
file, this is also used for testing the visitors.
Are tested in this file the following:
* :class:`AbstractCItem` from ``bip/hexrays/astnode.py``
* :class:`CNode`, :class:`CNodeExpr` and :class:`CNodeStmt` from
``bip/hexrays/cnode.py``
* :class:`HxCItem`, :class:`HxCExpr` and :class:`HxCStmt` from
``bip/hexrays/hx_citem.py``
* classes in ``bip/hexrays/hx_cexpr.py`` and ``bip/hexrays/hx_cstmt.py``
and their equivalent dynamoically create by ``bip/hexrays/cnode.py``
* visitors functions in ``bip/hexrays/hx_visitor.py`` and
``bip/hexrays/cnode_visitor.py`` (indirectly).
This also use the function from ``test/genst_hxast.py`` for performing
test on all nodes through visitors.
The function starting by ``gentst_`` are made to be able to run on which
ever node which inherit from this class and check generic properties which
should be valid for all node. Those are use for allowing to get more test
executed when using the visitors.
"""
from genst_hxast import *
def test_bipabstractcitem00():
## fix abstract citem test, made on the root_node
hxf = HxCFunc.from_addr(0x01800D2FF0)
aci = hxf.root_node
gentst_abstractcitem(aci) # generic test for all abstractcitem
# base
#assert aci.ea == 0x1800D300B # first instruction after the header
assert aci.is_expr == False
assert aci.is_statement == True
assert aci._ctype == HxCType.CIT_BLOCK
# equality
assert id(aci) != id(hxf.root_node)
assert aci == hxf.root_node
assert aci != hxf.root_node.stmt_children[0]
assert aci.__eq__(0x10) == NotImplemented
assert aci.__ne__(0x10) == NotImplemented
assert aci != 0x10
def test_bipcnode00():
## fix CNode, CNodeExpr and CNodeStmt test, made from the root_node
hxf = HxCFunc.from_addr(0x01800D2FF0)
cn = hxf.root_node
assert isinstance(cn, CNodeStmtBlock) # root node is always a block
assert cn.is_statement
assert not cn.is_expr
gentst_cnode(cn)
gentst_cnodestmt(cn)
cnc = cn.stmt_children[0] # first child, this should be a CNodeStmtExpr
assert isinstance(cnc, CNodeStmtExpr)
assert cnc.is_statement
assert not cnc.is_expr
gentst_cnode(cnc)
gentst_cnodestmt(cnc)
cna = cnc.value # first asg
assert isinstance(cna, CNodeExprAsg)
assert cna.is_expr
assert not cna.is_statement
gentst_cnodeexpr(cna)
# base
#assert cn.closest_ea == 0x1800D300B
# access
assert cn.has_parent == False
assert cnc.has_parent == True
with pytest.raises(RuntimeError): cn.parent
assert cnc.parent == cn
assert cn.hxcfunc == hxf
# comment
assert cna.comment is None
cna.comment = "cmt4test"
assert cna.comment == "cmt4test"
# cnodeExpr
assert len(cna.ops) == 2
assert isinstance(cna.find_final_left_node(), CNodeExprVar)
# cnodeStmt
assert len(cn.stmt_children) != 0
assert len(cnc.stmt_children) == 0
assert len(cn.expr_children) == 0
assert len(cnc.expr_children) == 1
hxf2 = HxCFunc.from_addr(0x0180002524)
assert isinstance(hxf2.get_cnode_label(6), CNode)
assert hxf2.get_cnode_label(42) is None
cnl = hxf2.cnodes_with_label
assert isinstance(cnl, list)
for cn in cnl:
assert isinstance(cn, CNode)
assert cn.has_label == True
def test_biphxcitem00():
## fix HxCItem test, just apply generic as most is the same as in the test_bipcnode00
hxf = HxCFunc.from_addr(0x01800D2FF0)
hi = hxf.hx_root_stmt
gentst_hxcstmt(hi)
hic = hi.stmt_children[0] # first child, this should be a CNodeStmtExpr
assert isinstance(hic, HxCStmtExpr)
gentst_hxcstmt(hic)
hia = hic.value # first asg
assert isinstance(hia, HxCExprAsg)
gentst_hxcexpr(hia)
hxf2 = HxCFunc.from_addr(0x0180002524)
assert isinstance(hxf2.hx_get_label(6), HxCItem)
def test_biphxvisitor00():
# test for the HxCItem visitors
hxf = HxCFunc.from_addr(0x01800D2FF0)
hxf.hx_visit_expr(genst_all)
hxf.hx_visit_list_expr([HxCExprCall], genst_all)
hxf.hx_visit_stmt(genst_all)
hxf.hx_visit_list_stmt([HxCStmtExpr], genst_all)
hxf.hx_visit_all(genst_all)
hxf.hx_visit_list_all([HxCExprCall, HxCStmtExpr], genst_all)
def test_bipcnodevisitor00():
# Visitor for the cnode, as visitor functions in HxCFunc are wrapper on top
# of the CNode functions this is considered enough. Internally those
# use the functions in cnode_visitor.py
hxf = HxCFunc.from_addr(0x01800D2FF0)
hxf.visit_cnode(genst_all)
def _intern_testfilter(cn):
assert isinstance(cn, (CNodeExprCall, CNodeStmtExpr))
genst_all(cn)
hxf.visit_cnode_filterlist(_intern_testfilter, [CNodeExprCall, CNodeStmtExpr])
hxf = HxCFunc.from_addr(0x0180002524)
hxf.visit_cnode(genst_all)
ln = hxf.get_cnode_filter_type([CNodeStmtReturn])
for cnr in ln:
cn = cnr.value
assert isinstance(cn, CNodeExpr)
assert len(cn.get_cnode_filter(lambda x: True)) <= 8 # 8 should be more than sufficient
hxf = HxCFunc.from_addr(0x0180078F20)
def _intern_testfilter2(cn): # return the call to an Helper function
return isinstance(cn, CNodeExprCall) and isinstance(cn.caller, CNodeExprHelper)
ln = hxf.get_cnode_filter(_intern_testfilter2)
assert isinstance(ln, list)
assert len(ln) == 1
assert isinstance(ln[0], CNodeExprCall) and isinstance(ln[0].caller, CNodeExprHelper)
ln = hxf.get_cnode_filter_type([CNodeExprHelper])
assert isinstance(ln, list)
assert len(ln) == 1
assert isinstance(ln[0], CNodeExprHelper)
hxf = HxCFunc.from_addr(0x018009BF50)
hxf.visit_cnode(genst_all)
|
1615564
|
import os
from pathlib import Path
import shutil
from ament_index_python.packages import get_package_share_directory, get_package_prefix
import launch
import launch_ros.actions
def generate_launch_description():
if not "tesseract_collision" in os.environ["AMENT_PREFIX_PATH"]:
# workaround for pluginlib ClassLoader bug: manually add tesseract_collision to the AMENT_PREFIX_PATH env variable
head, tail = os.path.split(get_package_prefix('crs_support'))
path = os.path.join(head, 'tesseract_collision')
os.environ["AMENT_PREFIX_PATH"] += os.pathsep + path
urdf = os.path.join(get_package_share_directory('crs_support'), 'urdf', 'swri_demo.urdf')
srdf = os.path.join(get_package_share_directory('crs_support'), 'urdf', 'ur10e_robot.srdf')
gzworld = os.path.join(get_package_share_directory('crs_support'), 'worlds', 'crs.world')
try:
crs_models_dir = str(Path.home().joinpath('.gazebo', 'models', 'crs_support').resolve(strict=True))
except FileNotFoundError: #os.path.exists(crs_models_dir):
gazebo_path = str(Path.home().joinpath('.gazebo', 'models').resolve())
os.mkdir(gazebo_path + "/crs_support")
shutil.copytree(os.path.join(get_package_share_directory('crs_support'), 'meshes'), Path.home().joinpath(Path.home().joinpath('.gazebo','models','crs_support','meshes').resolve()))
tesseract_env = launch_ros.actions.Node(
node_name='env_node',
package='tesseract_monitoring',
node_executable='tesseract_monitoring_environment_node',
output='screen',
parameters=[{'use_sim_time': 'true',
'desc_param': 'robot_description',
'robot_description': urdf,
'robot_description_semantic': srdf}])
gzserver = launch.actions.ExecuteProcess(
cmd=['gazebo', '--verbose', '-s', 'libgazebo_ros_factory.so', '--world', gzworld],
output='screen'
)
spawner1 = launch_ros.actions.Node(
node_name='spawn_node',
package='gazebo_ros',
node_executable='spawn_entity.py',
arguments=['-entity', 'test', '-x', '0', '-y', '0', '-z', '0.05', '-file', urdf])
return launch.LaunchDescription([
# environment
tesseract_env,
# gazebo
gzserver,
spawner1
])
|
1615568
|
import math
from time import time as now
from collections import deque
#from sorted_value_dict import SortedValueDict
SortedValueDict=dict #temp debug
from processors.properties.property import Property
class BlockchainState(Property):
def __init__(self):
super().__init__()
self.requires = ['trace']#, 'tx', 'log']
self.requiresHistoricalData = True
self.returnsData = False
self._init_state()
self._clear_state()
def _init_state(self):
self.recentLengthDays = 3
self.recentLength = self.recentLengthDays*86400 # 3 days in seconds
self.topXFraction = 0.000005
self.topUsedFraction = 0.000005
self.initialSupply = 72_000_000 * 1000000000000000000 #72M ETH in wei. The ETH created at genesis
#total ETH in existence
#we start at genesis and add every reward to the total supply
self.totalETH = self.initialSupply
#total amount of times where an account has been the initiator or receiver of an action
self.totalTimesUsed = 0
#contract lookup
self.isContract = {}
#balance lookup
self.balances = {}
#amount of transactions this acc participates in
self.timesUsed = {}
#total accs
self.totalAccountsAmount = 0
#recently active/created accounts.
#We push to the front the most recent accounts and pop from the back the older ones
#self.recentlyActive = deque()
#self.isRecentlyActive = {}
self.recentlyCreated = deque()
self.isRecentlyCreated = {}
#dicts to hold account timestamps. Used when calculating recently active/created
self.dateCreated = {}
#self.dateActive = {}
#in order ETHRecentlyExchanged to be calculated, we'll keep a list of the past x hours, each element being
#the amount of ETH exchanged within that hour. Then our variable will be the sum of that list
self.ETHRecentlyExchangedList = [0] * self.recentLengthDays
#clears everything that has to be cleared for every processTick call
def _clear_state(self):
#TOP IN BALANCE/USED GLOBAL/LOCAL CONTRACTS/ACCOUNTS LIST/MAP
#IS TOP X FROM THE ACTIVE ACCOUNTS
#THIS IS THE ONLY WAY
#top x% lookup (list & map)
self.isTopX = {}
self.topX = []
#for contracts
self.isContractTopX = {}
self.contractTopX = []
#local top X lookup
self.isLocalTopX = {}
self.localTopX = []
#for contracts
self.isContractLocalTopX = {}
self.contractLocalTopX = []
#top x% in usage (list & map)
self.isTopUsed = {}
self.topUsed = []
#for contracts
self.isContractTopUsed = {}
self.contractTopUsed = []
#local top X in usage lookup
self.isLocalTopUsed = {}
self.localTopUsed = []
#for contracts
self.isContractLocalTopUsed = {}
self.contractLocalTopUsed = []
#local structures to achieve local top scores
self.localBalances = SortedValueDict()
self.localTotalETH = 0
self.localTimesUsed = SortedValueDict()
self.localTotalTimesUsed = 0
#list of local accounts
self.isLocalAccount = {}
self.localAccounts = []
#list/map of local new accounts/contracts
self.isAccountNew = {}
self.newAccounts = []
self.isContractNew = {}
self.newContracts = []
#address -> list of transactions/traces/logs
self.localTransactionsOf = {}
self.localTracesOf = {}
self.localLogsOf = {}
# tx hash -> true/undefined whether this transaction exists
self.isTransaction = {}
#block N -> bool
self.isBlock = {}
#address -> gas used total in this period
self.gasUsedBy = {}
#address -> amount and list of sent/received in this period
self.amountSentBy = {}
self.senders = []
self.amountReceivedBy = {}
self.receivers = []
#active accs
self.localAccountsAmount = 0
#amount of ETH that has been recently exchanged
self.ETHRecentlyExchanged = 0
#time of last block
self.lastTimestamp = 0
#TODO: Avg bal of just created accs (avg value of first tx to acc?)
#TODO: when working with ETH, also have a USD version of the same property
def processTick(self, data):
t = now()
self._clear_state()
print("Clear state took %4fs" % (now() - t))
ethExchanged = 0
fromI = data['trace'].columns.get_loc('from')+1
if 'trace' in data:
t = now()
for trace in data['trace'].itertuples():
sender = getattr(trace, '_'+str(fromI))
receiver = trace.to
sender = self.noneIfInf(sender)
receiver = self.noneIfInf(receiver)
value = int(trace.value, 0)
gasUsed = self.noneIfInf(trace.gasUsed)
if gasUsed is not None:
gasUsed = int(trace.gasUsed, 0)
timestamp = trace.Index.value // 10**9 #EPOCH time
ethExchanged += value
self.isTransaction[trace.transactionHash] = True
self.isBlock[trace.blockNumber] = True
if gasUsed is not None:
self.gasUsedBy[sender] = self.gasUsedBy.get(sender, 0) + gasUsed
if trace.type == 'create':
assert(receiver not in self.isContract)
self.isContract[receiver] = True
self.isContractNew[receiver] = True
self.newContracts.append(receiver)
elif trace.type == 'reward':
#rewards can be either block or uncle, each of which is increasing the total supply of Ethereum
self.totalETH += value
for acc in [sender, receiver]:
if acc is None:
continue
if acc not in self.balances:
self.totalAccountsAmount += 1
self.dateCreated[acc] = timestamp
self.recentlyCreated.appendleft(acc)
self.balances[acc] = 0
self.timesUsed[acc] = 0
self.isAccountNew[acc] = True
self.newAccounts.append(acc)
if acc == sender:
currBal = self.balances[acc]
self.balances[acc] = 0 if currBal < value else currBal - value
#this case is possible because we don't have the genesis transfers
#TODO: Get the genesis transfers from Etherscan's API or make a web scrape
self.localTracesOf.setdefault(acc, [])
self.localTracesOf[acc].append(trace)
if acc not in self.amountSentBy:
self.amountSentBy[acc] = 0
self.senders.append(acc)
self.amountSentBy[acc] += value
if acc == receiver:
self.balances[receiver] += value
if acc not in self.amountReceivedBy:
self.amountReceivedBy[acc] = 0
self.receivers.append(acc)
self.amountReceivedBy[acc] += value
#update local accounts
if acc not in self.isLocalAccount:
self.isLocalAccount[acc] = True
self.localAccounts.append(acc)
#update usage info
self.totalTimesUsed += 1
self.timesUsed[acc] += 1
self.localTimesUsed.setdefault(acc, 0)
self.localTimesUsed[acc] += 1
#update active times
#lastActive = self.dateActive.get(acc, 0)
#assert(lastActive <= timestamp)
#if lastActive < timestamp:
# self.dateActive[acc] = timestamp
# self.recentlyActive.push_left(acc)
assert(self.lastTimestamp <= timestamp)
self.lastTimestamp = timestamp
print("Replaying traces took %4fs" % (now() - t))
if 'tx' in data:
t = now()
fromITX = data['tx'].columns.get_loc('from')+1
for tx in data['tx'].itertuples():
sender = self.noneIfInf(getattr(tx, '_'+str(fromITX)))
if sender is not None:
self.localTransactionsOf.setdefault(sender, [])
self.localTransactionsOf[sender].append(tx)
print("Iterating TXs took %4fs" % (now() - t))
if 'log' in data:
t = now()
for log in data['log'].itertuples():
adr = self.noneIfInf(log.address)
if adr is not None:
self.localLogsOf.setdefault(adr, [])
self.localLogsOf[adr].append(log)
print("Iterating logs took %4fs" % (now() - t))
t = now()
self.localAccountsAmount = len(self.localAccounts)
print("Getting recent records took %4fs" % (now() - t))
t = now()
#update local balances
for acc in self.localAccounts:
val = self.balances[acc]
self.localBalances[acc] = val
self.localTotalETH += val
#update top records
#TODO: Hold the previous top records and remove the no-longer top records before proceeding
#maybe by having a sorted dict and remove the last ones or just sort and remove?
if self.isTop(self.balances[acc], self.topXFraction, self.totalETH):
if self.isContract.get(acc, False):
self.isContractTopX[acc] = True
self.contractTopX.append(acc)
else:
self.isTopX[acc] = True
self.topX.append(acc)
if self.isTop(self.balances[acc], self.topXFraction, self.localTotalETH):
if self.isContract.get(acc, False):
self.isContractLocalTopX[acc] = True
self.contractLocalTopX.append(acc)
else:
self.isLocalTopX[acc] = True
self.localTopX.append(acc)
if self.isTop(self.timesUsed[acc], self.topUsedFraction, self.totalTimesUsed):
if self.isContract.get(acc, False):
self.isContractTopUsed[acc] = True
self.contractTopUsed.append(acc)
else:
self.isTopUsed[acc] = True
self.topUsed.append(acc)
if self.isTop(self.timesUsed[acc], self.topUsedFraction, self.localTotalTimesUsed):
if self.isContract.get(acc, False):
self.isContractLocalTopUsed[acc] = True
self.contractLocalTopUsed.append(acc)
else:
self.isLocalTopUsed[acc] = True
self.localTopUsed.append(acc)
print("Local balance iteration took %4fs" % (now() - t))
t = now()
#update recently created by removing the last part of accounts
self.recentlyCreated = self.removeOldDequeRecords(self.recentlyCreated, self.lastTimestamp - self.recentLength, self.dateCreated)
self.isRecentlyCreated = self.listToTrueDict(self.recentlyCreated)
#updated recent ETH exchanged
# remove the oldest tick and append the latest one
self.ETHRecentlyExchangedList.pop()
self.ETHRecentlyExchangedList.insert(0, ethExchanged)
self.ETHRecentlyExchanged = sum(self.ETHRecentlyExchangedList)
self.printDebug()
print("Debug & recently exchanged took %4fs" % (now() - t))
def noneIfInf(self, a):
if isinstance(a, float) and math.isnan(a):
return None
return a
def removeOldDequeRecords(self, dq, minVal, valDict=None):
#the dq should be ordered largest -> smallest values
while dq:
val = dq.pop()
if valDict is not None:
valCmp = valDict[val]
else:
valCmp = val
if valCmp >= minVal:
dq.append(val)
return dq
def getRecentRecords(self, dct, interval, currentTime=None):
target = None
if currentTime is not None:
target = currentTime - interval
res = []
prevVal = None
for key in dct:
val = dct[key]
if currentTime is None:
currentTime = val
target = currentTime - interval
res.append(key)
assert(prevVal is None or val <= prevVal)
prevVal = val
if val < target:
break
return res
def isTop(self, value, fraction, total):
minTarget = total * fraction
return value >= minTarget
def getTopRecords(self, dct, fraction, total, restrictorDict=None, inverseRestriction=False):
#we should return all records with values higher than min target
minTarget = total * fraction
res = []
prevVal = None
for key in dct:
val = dct[key]
if val >= minTarget and (restrictorDict is None or restrictorDict.get(key, False) is not inverseRestriction):
res.append(key)
else:
break
#assert(prevVal is None or val <= prevVal)
prevVal = val
return res
def listToTrueDict(self, lst, dct=None):
if dct is None:
dct = {}
for el in lst:
dct[el] = True
return dct
def printDebug(self):
print("Amount of topX: %d" % len(self.topX))
print("Amount of topX contracts: %d" % len(self.contractTopX))
print("Amount of local topX: %d" % len(self.localTopX))
print("Amount of local topX contracts: %d" % len(self.contractLocalTopX))
print("Amount of top used: %d" % len(self.topUsed))
print("Amount of contract top used: %d" % len(self.contractTopUsed))
print("Amount of local top used: %d" % len(self.localTopUsed))
print("Amount of contract local top used: %d" % len(self.contractLocalTopUsed))
print("ETH recently exchanged: %d" % self.ETHRecentlyExchanged)
#print("Recently active accounts: %d" % len(self.recentlyActive))
print("Recently created accounts: %d" % len(self.recentlyCreated))
if self.localAccountsAmount > 0:
print("Senders %d, with sent val of first being %d" % (len(self.senders), self.amountSentBy[self.senders[0]]))
print("Receivers %d, with received val of first being %d" % (len(self.receivers), self.amountReceivedBy[self.receivers[0]]))
acc = self.localAccounts[0]
print("Local accounts %d, traces from first account %d, logs %d and TXs %d" % (len(self.localAccounts), len(self.localTracesOf.get(acc, [])), \
len(self.localLogsOf.get(acc, [])), len(self.localTransactionsOf.get(acc, [])) ))
print("%d new accounts and %d new contracts" % (len(self.newAccounts), len(self.newContracts)))
print("%d times locally used and %d total local balance" % (self.localTotalTimesUsed, self.localTotalETH))
print("%d total accounts, %d total times used and %d total ETH" % (self.totalAccountsAmount, self.totalTimesUsed, self.totalETH))
#singleton
state = BlockchainState()
|
1615597
|
from adobe_analytics import Client, ReportDefinition
client = Client.from_json("my_credentials.json")
suites = client.suites()
suite = suites["my_report_suite_id"]
# for classifications a simple string for the dimension_id isn't sufficient anymore
# you need to specify the id and the classification name in a dictionary
report_definition = ReportDefinition(
dimensions=[
{"id": "product", "classification": "Product Name"}
],
metrics=["visits", "orders"], # similar for metrics
date_from="2017-01-01",
date_to="2017-12-31",
granularity="day"
)
dataframe = suite.download(report_definition)
print(dataframe.head())
|
1615600
|
def merge(output_dir, scan_name, threshold, motion_f, power_f, flag):
"""
Method to merge power parameters and motion
parameters file
"""
import os
import re
if threshold == None:
filename = scan_name + "_all_params.csv"
filename = filename.lstrip("_")
outfile = os.path.join(output_dir, filename)
threshold_val = 0.0
else:
filename = scan_name + threshold + "_all_params.csv"
filename = filename.lstrip("_")
outfile = os.path.join(output_dir, filename)
threshold_val = float(re.sub(r"[a-zA-Z_]", '', threshold))
# Read in the motion and power parameters files
try:
motion = open(motion_f, 'r').readlines()
except Exception as e:
err_string = "\n\n[!] CPAC says: Could not read the motion " \
"parameters file.\n\nFilepath: %s\n\nError details: %s" \
"\n\n" % (motion_f, e)
raise Exception(err_string)
try:
power = open(power_f, 'r').readlines()
except Exception as e:
err_string = "\n\n[!] CPAC says: Could not read the power " \
"parameters file.\n\nFilepath: %s\n\nError details: %s" \
"\n\n" % (power_f, e)
raise Exception(err_string)
# Write the combined motion and power parameters CSV file
try:
if flag:
f = open(outfile, 'w')
m = motion[0].strip("\n")
p = ','.join(power[0].split(",")[1:])
f.write(m+p)
else:
f = open(outfile, 'a')
m = motion[1]
p = ','.join(power[1].split(",")[2:])
f.write(m+p+"\n")
f.close()
except Exception as e:
err_string = "\n\n[!] CPAC says: Could not create or open the motion "\
"and power parameters CSV file. Ensure you have write " \
"permissions for the directory it is writing to.\n\n" \
"Attempted write path: %s\n\nError details: %s\n\n" \
% (outfile, e)
raise Exception(err_string)
def grab(output_dir, scrubbing):
"""
Method to grab all the motion parameters
and power parameters file from each subject
for each pipeline and merge them
Parameters
----------
output_dir : string
Path to the datasink output directory of CPAC
"""
import glob
import os
import re
from sets import Set
pipelines = glob.glob(os.path.join(output_dir, 'pipeline*'))
for p in pipelines:
scan_list = []
threshold_list = []
pattern1 = re.compile(r'(\w)*scan(\w)*(\d)*(\w)*[/]')
pattern2 = re.compile(r'(\w)*threshold_[-+]?([0-9]*\.[0-9]+|[0-9]+)')
scans = glob.glob(os.path.join(p, '*/power_params/*/*'))
#get the unique scans and threshold value
for s in scans:
val = re.search(pattern1, s)
if val:
scan_list.append(val.group(0).rstrip("/"))
val = re.search(pattern2, s)
if val:
threshold_list.append(val.group(0))
scan_list = Set(scan_list)
threshold_list = Set(threshold_list)
for scan in scan_list:
for threshold in threshold_list:
Flag = 1
#merge files for each subject
for sub in os.listdir(p):
sub = os.path.join(p, sub)
motion_file = os.path.join(sub, 'motion_params', scan,
'motion_parameters.txt')
power_file = os.path.join(sub, 'power_params', scan,
threshold, 'pow_params.txt')
if os.path.exists(motion_file) and \
os.path.exists(power_file):
merge(p, scan, threshold,
motion_file, power_file, Flag)
Flag = 0
if 0 in scrubbing:
for sub in os.listdir(p):
sub = os.path.join(p, sub)
motion_file = os.path.join(sub, 'motion_params', scan,
'motion_parameters.txt')
power_file = os.path.join(sub, 'power_params', scan,
'pow_params.txt')
if os.path.exists(motion_file) and \
os.path.exists(power_file):
threshold = None
merge(p, scan, threshold, motion_file,
power_file, Flag)
Flag = 0
return threshold
def run(output_path, scrubbing):
threshold = grab(output_path, scrubbing)
return threshold
if __name__ == '__main__':
import sys
if (len(sys.argv) == 2):
grab(sys.argv[1], [0])
else:
print('Usage: python extract_parameters.py /path/to/output/dir')
|
1615622
|
import argparse
class Config():
def __init__(self):
pass
def parse(self):
parser = argparse.ArgumentParser(description='GAN generation')
###parsing
parser.add_argument('--input_nc_G_parsing', type=int, default=36, help='# of input image channels: 3 for RGB and 1 for grayscale') # [3,3,20] 36 / 23 / 26 parsing/gen/MPV gen/
parser.add_argument('--input_nc_D_parsing', type=int, default=56, help='# of input image channels: 3 for RGB and 1 for grayscale') # 40 / 6 / 6
parser.add_argument('--output_nc_parsing', type=int, default=20, help='# of output image channels: 3 for RGB and 1 for grayscale') # 20 / 3 / 3
parser.add_argument('--netD_parsing', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG_parsing', type=str, default='unet_256', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
###appearance
parser.add_argument('--input_nc_G_app', type=int, default=26, help='# of input image channels: 3 for RGB and 1 for grayscale') # [3,3,20] 36 / 23 / 26 parsing/gen/MPV gen/
parser.add_argument('--input_nc_D_app', type=int, default=6, help='# of input image channels: 3 for RGB and 1 for grayscale') # 40 / 6 / 6
parser.add_argument('--output_nc_app', type=int, default=4, help='# of output image channels: 3 for RGB and 1 for grayscale') # 20 / 3 / 3
parser.add_argument('--netD_app', type=str, default='resnet_blocks', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG_app', type=str, default='treeresnet', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
###face
parser.add_argument('--input_nc_G_face', type=int, default=6, help='# of input image channels: 3 for RGB and 1 for grayscale') # [3,3,20] 36 / 23 / 26 parsing/gen/MPV gen/
parser.add_argument('--input_nc_D_face', type=int, default=6, help='# of input image channels: 3 for RGB and 1 for grayscale') # 40 / 6 / 6
parser.add_argument('--output_nc_face', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') # 20 / 3 / 3
parser.add_argument('--netD_face', type=str, default='resnet_blocks', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG_face', type=str, default='treeresnet', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') #default False || not dropout
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--decay_iters', type=int, default=10, help='epochs for learning rate decay to zero')
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--gpu_ids', type=list, default=[0,1,2,3])
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--epoch', type=int, default=200)
parser.add_argument('--size', type=tuple, default=(256,192))
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--gan_mode', type=str, default='lsgan') # lsgan or vanilla? lsgan is better compared with vanilla make sense bceloss
parser.add_argument('--save_epoch_freq', type=int, default=1)
parser.add_argument('--print_freq', type=int, default=10)
parser.add_argument('--val_freq', type=int, default=200)
parser.add_argument('--batch_size_t', type=int, default=128)
parser.add_argument('--batch_size_v', type=int, default=16)
parser.add_argument('--suffix', default='', type=str)
parser.add_argument('--train_mode', default='parsing', type=str)
parser.add_argument('--dataset', default='MPV', type=str)
parser.add_argument('--dataset_mode', default='regular', type=str)
parser.add_argument('--lambda_L1', type=float, default=1)
parser.add_argument('--G_GAN', type=float, default=1)
parser.add_argument('--G_VGG', type=float, default=1)
parser.add_argument('--mask', type=float, default=1)
parser.add_argument('--G_nn', type=float, default=1) # nnloss
parser.add_argument('--face_vgg', type=float, default=1)
parser.add_argument('--face_L1', type=float, default=10)
parser.add_argument('--face_img_L1', type=float, default=1)
parser.add_argument('--face_gan', type=float, default=3) # gan loss
parser.add_argument('--use_gmm', default=False, action='store_true')
parser.add_argument('--grid_size', type=int, default=5) # the same as cpvton
parser.add_argument('--fine_height', type=int, default=256)
parser.add_argument('--fine_width', type=int, default=192)
parser.add_argument('--joint', default=False, action='store_true')
parser.add_argument('--joint_all', default=False, action='store_true')
# forward
parser.add_argument('--forward', default='normal', type=str)
parser.add_argument('--isdemo', default=False, action='store_true')
parser.add_argument('--isval', default=False, action='store_true')
parser.add_argument('--forward_save_path', default='end2end', type=str)
parser.add_argument('--save_time', default=False, action='store_true')
# for edgetoshoe
parser.add_argument('--dataroot', default=False, action='store_true')
parser.add_argument('--pool_size', type=int, default=100)
### resume dir
parser.add_argument('--resume_gmm', default="pretrained_checkpoint/step_009000.pth", type=str)
parser.add_argument('--resume_G_parse', default='pretrained_checkpoint/parsing.tar', type=str)
parser.add_argument('--resume_G_app', default='pretrained_checkpoint/app.tar', type=str) #pretrained_checkpoint/app.tar
parser.add_argument('--resume_G_face', default='pretrained_checkpoint/face.tar', type=str)
parser.add_argument('--resume_D_parse', default='', type=str)
parser.add_argument('--resume_D_app', default='', type=str)
parser.add_argument('--resume_D_face', default='', type=str)
### face refinement
parser.add_argument('--face_residual', default=False, action='store_true')
### joint with parsing loss
parser.add_argument('--joint_parse_loss', default=False, action='store_true')
parser.add_argument('--joint_G_parsing', type=float, default=1)
parser.add_argument('--mask_tvloss', default=False, action='store_true')
### train | val | demo
parser.add_argument('--warp_cloth', default=False, action='store_true')
args = parser.parse_args()
print(args)
return args
if __name__ == "__main__":
pass
|
1615624
|
import os
import torch
from torchvision.datasets import CelebA, CIFAR10, LSUN, ImageFolder
from torch.utils.data import Dataset, DataLoader, random_split, Subset
from utils import CropTransform
import torchvision.transforms as transforms
import numpy as np
from tqdm import tqdm
import cv2
from PIL import Image
# Change the below to the actual dataset root folders
celeba_root = 'datasets/CelebA'
ffhq_root = 'datasets/FFHQ'
shoes_root = 'datasets/edges2shoes'
class Shoes(Dataset):
"""
Dataset format is the same as used in pix2pix. We take only trainB and testB.
"""
def __init__(self, root_dir, split='train', transform=None):
self.root_dir = root_dir
self.transform = transform
self.split = split
self.im_list = [f for f in os.listdir(os.path.join(root_dir, split+'B')) if f.endswith('jpg')]
print('Got {} shoes in split {}.'.format(len(self.im_list), split))
def __len__(self):
return len(self.im_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = os.path.join(self.root_dir, self.split+'B', self.im_list[idx])
image = Image.open(img_path)
if not image.mode == 'RGB':
image = image.convert('RGB')
if self.transform:
image = self.transform(image)
return image
class FFHQ(Dataset):
"""
FFHQ folder should contain images1024x1024 and thumbnails128x128
"""
def __init__(self, root_dir, split='train', transform=None, use_thumbnails=False):
self.root_dir = root_dir
self.transform = transform
self.split = split
self.use_thumbnails = use_thumbnails
self.split_ranges = {'train': (0, 60000), 'test': (60000, 70000)}
def __len__(self):
return self.split_ranges[self.split][1] - self.split_ranges[self.split][0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
subfolder = 'thumbnails128x128' if self.use_thumbnails else 'images1024x1024'
img_name = os.path.join(self.root_dir, subfolder, '%05i.png' % (idx+self.split_ranges[self.split][0]))
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
return image
def load_data(dataset, num_samples=None, w=128, shuffle=True, has_cls=False):
if num_samples:
if shuffle:
dataset = random_split(dataset, [num_samples, len(dataset)-num_samples])[0]
else:
dataset = Subset(dataset, np.arange(num_samples))
loader = DataLoader(dataset, shuffle=shuffle, num_workers=8)
if has_cls:
return np.vstack([x.numpy() for x, _ in tqdm(loader)]).transpose([0, 2, 3, 1]).reshape(-1, w*w*3)
return np.vstack([x.numpy() for x in tqdm(loader)]).transpose([0, 2, 3, 1]).reshape(-1, w*w*3)
def get_ffhq_data(split='train', num_samples=None, w=128, shuffle=True):
ffhq = FFHQ(ffhq_root, split=split, transform=transforms.Compose([transforms.Resize(w), transforms.ToTensor()]),
use_thumbnails=(w <= 128))
return load_data(ffhq, num_samples, w, shuffle)
def get_celeba_data(split='train', num_samples=None, w=128, attr_num=None, attr_value=None, shuffle=True):
celeba = CelebA(root=celeba_root, split=split, download=False, target_type='attr',
transform=transforms.Compose([CropTransform((25, 50, 25+128, 50+128)),
transforms.Resize(w),
transforms.ToTensor()]))
return load_data(celeba, num_samples, w, shuffle, has_cls=True)
def get_shoes_data(split='train', num_samples=None, w=128, shuffle=True):
shoes = Shoes(shoes_root, split=split, transform=transforms.Compose([transforms.CenterCrop((256, 256)),
transforms.Resize((w, w)),
transforms.ToTensor()]))
return load_data(shoes, num_samples, w, shuffle)
def true_transform(X, ttype='identity', w=128):
"""
Apply a synthetic transformation to a set of images
:param X: Images (ch last) flattened - each image as row vector in X
:param ttype: The required transformation
:param w: The image resolution (w=h)
:return: Transformed images
"""
X = X.reshape(-1, w, w, 3)
if ttype == 'rot90':
X = np.rot90(X, k=1, axes=(1, 2))
elif ttype == 'inpaint':
mask = cv2.imread('data/inpaint_mask_simple.png').astype(np.float32)/255.0
# mask = cv2.imread('data/inpaint_mask.png').astype(np.float32)/255.0
# mask[:, 64:, :] = 1.0 - mask[:, 64:, :]
if not mask.shape[0] == w:
mask = cv2.resize(mask, (w, w), interpolation=cv2.INTER_NEAREST)
X = X.copy() * mask.reshape(1, w, w, 3)
elif ttype == 'vflip':
X = X[:, ::-1]
elif ttype == 'colorize':
X = np.repeat(np.mean(X, axis=3, keepdims=True), 3, axis=3)
elif ttype == 'edges':
ksize = 1 if w == 64 else 3
X = np.stack([cv2.Laplacian(X[i], cv2.CV_32F, ksize=ksize) for i in range(X.shape[0])])
elif ttype == 'Canny-edges':
edges = np.stack([cv2.Canny((np.mean(X[i], axis=2)*255.0).astype(np.uint8), 80, 200) for i in range(X.shape[0])])
X = np.repeat(np.expand_dims(edges.astype(np.float32)*(1.0/255.0), 3), 3, axis=3)
elif ttype == 'super-res':
X = np.stack([cv2.resize(cv2.resize(X[i], (w//8, w//8), interpolation=cv2.INTER_LINEAR), (w, w),
interpolation=cv2.INTER_LINEAR) for i in range(X.shape[0])])
elif ttype == 'identity':
pass
else:
assert False, ttype
return X.reshape(-1, w*w*3)
def get_data(args):
"""
Load samples from a dataset and apply a synthetic transformation to half of the data ("A")
:param args: Relevant options are:
dataset: Name of the dataset to be loaded
n_train: Number of training images
n_test: Number of test images
resolution: Images will be resized to [resolution x resolution]
pairing: 'paired' = supervised - X_A[i] = T(X_B[i])
'matching' = The same original images are used for X_A and X_B, but in different random order
'nonmatching' = X_A and X_B are disjoint sets (i.e. split the dataset to two parts)
'few-matches' = Only 1/8 of the images in X_A and X_B match
a_transform: The synthetic transformation applied to X_A (see function true_transform)
:return: X_A, X_B, X_A_test, X_B_test
"""
if args.dataset == 'celeba':
train_x = get_celeba_data(num_samples=args.n_train, w=args.resolution)
test_x = get_celeba_data('test', num_samples=args.n_test, w=args.resolution, shuffle=False)
elif args.dataset == 'ffhq':
train_x = get_ffhq_data(num_samples=args.n_train, w=args.resolution)
test_x = get_ffhq_data('test', num_samples=args.n_test, w=args.resolution, shuffle=False)
elif args.dataset == 'shoes':
train_x = get_shoes_data(num_samples=args.n_train, w=args.resolution)
test_x = get_shoes_data('test', num_samples=args.n_test, w=args.resolution, shuffle=False)
n_train = train_x.shape[0]
if args.pairing == 'nonmatching':
X_A = train_x[:n_train//2]
X_B = train_x[n_train//2:]
elif args.pairing == 'few-matches':
n_matches = n_train//8
if (n_train-n_matches) % 2 == 1:
n_matches += 1
print('Inserting {}/{} matching pairs...'.format(n_matches, n_train))
n_per_part = (n_train-n_matches) // 2
X_A = train_x[:(n_per_part+n_matches)].copy()
X_B = train_x[n_per_part:]
else:
X_A = train_x
X_B = train_x.copy()
if not args.pairing == 'paired':
np.random.shuffle(X_B)
X_A = true_transform(X_A, ttype=args.a_transform, w=args.resolution)
X_B_test = test_x.copy()
X_A_test = true_transform(test_x, ttype=args.a_transform, w=args.resolution)
return X_A, X_B, X_A_test, X_B_test
|
1615636
|
import logging.config
from kombu import Queue
from celery import Celery
from celery.result import AsyncResult
from celery.signals import setup_logging, task_postrun
from functools import partial
from lightflow.queue.const import DefaultJobQueueName
from lightflow.queue.pickle import patch_celery
from lightflow.models.exceptions import ConfigOverwriteError
LIGHTFLOW_INCLUDE = ['lightflow.queue.jobs', 'lightflow.models']
def create_app(config):
""" Create a fully configured Celery application object.
Args:
config (Config): A reference to a lightflow configuration object.
Returns:
Celery: A fully configured Celery application object.
"""
# configure the celery logging system with the lightflow settings
setup_logging.connect(partial(_initialize_logging, config), weak=False)
task_postrun.connect(partial(_cleanup_workflow, config), weak=False)
# patch Celery to use cloudpickle instead of pickle for serialisation
patch_celery()
# create the main celery app and load the configuration
app = Celery('lightflow')
app.conf.update(**config.celery)
# overwrite user supplied settings to make sure celery works with lightflow
app.conf.update(
task_serializer='pickle',
accept_content=['pickle'],
result_serializer='pickle',
task_default_queue=DefaultJobQueueName.Task
)
if isinstance(app.conf.include, list):
app.conf.include.extend(LIGHTFLOW_INCLUDE)
else:
if len(app.conf.include) > 0:
raise ConfigOverwriteError(
'The content in the include config will be overwritten')
app.conf.include = LIGHTFLOW_INCLUDE
return app
def _initialize_logging(config, **kwargs):
""" Hook into the logging system of celery.
Connects the local logging system to the celery logging system such that both systems
can coexist next to each other.
Args:
config (Config): Reference to the configuration object from which the
logging settings are retrieved.
**kwargs: Keyword arguments from the hook.
"""
logging.config.dictConfig(config.logging)
def _cleanup_workflow(config, task_id, args, **kwargs):
""" Cleanup the results of a workflow when it finished.
Connects to the postrun signal of Celery. If the signal was sent by a workflow,
remove the result from the result backend.
Args:
task_id (str): The id of the task.
args (tuple): The arguments the task was started with.
**kwargs: Keyword arguments from the hook.
"""
from lightflow.models import Workflow
if isinstance(args[0], Workflow):
if config.celery['result_expires'] == 0:
AsyncResult(task_id).forget()
|
1615662
|
import logging
import traceback
from unittest import mock
from .common import BuiltinTest
from bfg9000.builtins import core # noqa
from bfg9000 import exceptions
from bfg9000.path import Path, Root
from bfg9000.safe_str import safe_str, safe_format
class TestCore(BuiltinTest):
def test_warning(self):
with mock.patch('warnings.warn') as warn:
self.context['warning']('message')
warn.assert_called_once_with('message')
with mock.patch('warnings.warn') as warn:
self.context['warning']('message', 1, Path('path'), 'bar')
warn.assert_called_once_with(
'message 1 ' + repr(Path('path')) + ' bar'
)
def test_info(self):
with mock.patch('logging.log') as log:
self.context['info']('message')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.INFO, 'message', extra={
'full_stack': tb, 'show_stack': False
})
with mock.patch('logging.log') as log:
self.context['info']('message', 1, Path('path'), 'bar')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(
logging.INFO, 'message 1 ' + repr(Path('path')) + ' bar',
extra={
'full_stack': tb, 'show_stack': False
}
)
with mock.patch('logging.log') as log:
self.context['info']('message', show_stack=True)
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.INFO, 'message', extra={
'full_stack': tb, 'show_stack': True
})
def test_debug(self):
with mock.patch('logging.log') as log:
self.context['debug']('message')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.DEBUG, 'message', extra={
'full_stack': tb, 'show_stack': True
})
with mock.patch('logging.log') as log:
self.context['debug']('message', 1, Path('path'), 'bar')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(
logging.DEBUG, 'message 1 ' + repr(Path('path')) + ' bar',
extra={
'full_stack': tb, 'show_stack': True
}
)
with mock.patch('logging.log') as log:
self.context['debug']('message', show_stack=False)
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.DEBUG, 'message', extra={
'full_stack': tb, 'show_stack': False
})
def test_exceptions(self):
for name in dir(exceptions):
t = getattr(exceptions, name)
if isinstance(t, type):
self.assertIs(self.context[name], t)
def test_safe_str(self):
self.assertIs(self.context['safe_str'], safe_str)
self.assertIs(self.context['safe_format'], safe_format)
def test_submodule(self):
def mock_execute(context, path):
return context.PathEntry(path)
with mock.patch('bfg9000.build.execute_file',
mock.MagicMock(wraps=mock_execute)) as m:
self.assertEqual(self.context['submodule']('dir'), {})
m.assert_called_once_with(self.context,
Path('dir/build.bfg', Root.srcdir))
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)), \
mock.patch('bfg9000.build.execute_file',
mock.MagicMock(wraps=mock_execute)) as m: # noqa
self.assertEqual(self.context['submodule']('sub'), {})
m.assert_called_once_with(self.context,
Path('dir/sub/build.bfg', Root.srcdir))
def test_export(self):
with self.context.push_path(Path('foo/build.bfg', Root.srcdir)) as p:
self.context['export'](foo='foo')
self.assertEqual(p.exports, {'foo': 'foo'})
self.assertRaises(ValueError, self.context['export'], bar='bar')
|
1615670
|
from itertools import groupby
class AmoebaDivTwo:
def count(self, table, K):
def count(r):
return sum(max(len(list(g)) - K + 1, 0) for k, g in groupby(r) if k == "A")
return sum(count(r) for r in table) + (
sum(count(r) for r in zip(*table)) if K > 1 else 0
)
|
1615684
|
from django.urls import re_path, include, path
from .views import Classify, ClassifyTags, classify_stats
urlpatterns = [
re_path('^classify/$', Classify.as_view(), name='classify'),
re_path('^tags/$', ClassifyTags.as_view(), name='tags'),
re_path('^classify_stats/$', classify_stats),
]
|
1615740
|
import numpy as np
import os, pylab
import itertools as itl
from PIL import Image, ImageDraw, ImageFont
import util as ut
import scipy.misc, scipy.misc.pilutil # not sure if this is necessary
import scipy.ndimage
from StringIO import StringIO
#import cv
def show(*args, **kwargs):
import imtable
return imtable.show(*args, **kwargs)
# Functional code for drawing on images:
def draw_on(f, im):
pil = to_pil(im)
draw = ImageDraw.ImageDraw(pil)
f(draw)
return from_pil(pil)
def color_from_string(s):
""" todo: add more, see matplotlib.colors.cnames """
colors = {'r' : (255, 0, 0), 'g' : (0, 255, 0), 'b' : (0, 0, 255)}
if s in colors:
return colors[s]
else:
ut.fail('unknown color: %s' % s)
def parse_color(c):
if type(c) == type((0,)) or type(c) == type(np.array([1])):
return c
elif type(c) == type(''):
return color_from_string(c)
def colors_from_input(color_input, default, n):
""" Parse color given as input argument; gives user several options """
# todo: generalize this to non-colors
expanded = None
if color_input is None:
expanded = [default] * n
elif (type(color_input) == type((1,))) and map(type, color_input) == [int, int, int]:
# expand (r, g, b) -> [(r, g, b), (r, g, b), ..]
expanded = [color_input] * n
else:
# general case: [(r1, g1, b1), (r2, g2, b2), ...]
expanded = color_input
expanded = map(parse_color, expanded)
return expanded
def draw_rects(im, rects, outlines = None, fills = None, texts = None, text_colors = None, line_widths = None, as_oval = False):
rects = list(rects)
outlines = colors_from_input(outlines, (0, 0, 255), len(rects))
text_colors = colors_from_input(text_colors, (255, 255, 255), len(rects))
fills = colors_from_input(fills, None, len(rects))
if texts is None: texts = [None] * len(rects)
if line_widths is None: line_widths = [None] * len(rects)
def check_size(x, s): ut.check(x is None or len(x) == len(rects), "%s different size from rects" % s)
check_size(outlines, 'outlines')
check_size(fills, 'fills')
check_size(texts, 'texts')
check_size(text_colors, 'texts')
def f(draw):
for (x, y, w, h), outline, fill, text, text_color, lw in itl.izip(rects, outlines, fills, texts, text_colors, line_widths):
if lw is None:
if as_oval:
draw.ellipse((x, y, x + w, y + h), outline = outline, fill = fill)
else:
draw.rectangle((x, y, x + w, y + h), outline = outline, fill = fill)
else:
# TODO: to do this right, we need to find where PIL draws the corners
# x -= lw
# y -= lw
# w += 2*lw
# h += 2*lw
# pts = [(x, y), (x + w, y), (x + w, y + h), (x, y + h)]
# for i in xrange(len(pts)):
# #draw.line(pts[i] + pts[(i+1)%4], fill = outline, width = lw)
# draw.rectangle(pts[i] + pts[(i+1)%4], fill = outline, width = lw)
d = int(np.ceil(lw/2))
draw.rectangle((x-d, y-d, x+w+d, y+d), fill = outline)
draw.rectangle((x-d, y-d, x+d, y+h+d), fill = outline)
draw.rectangle((x+w+d, y+h+d, x-d, y+h-d), fill = outline)
draw.rectangle((x+w+d, y+h+d, x+w-d, y-d), fill = outline)
if text is not None:
# draw text inside rectangle outline
border_width = 2
draw.text((border_width + x, y), text, fill = text_color)
return draw_on(f, im)
def draw_rects_scale(sc, im, rects, outlines = None, fills = None, texts = None, text_colors = None):
scaled_rects = []
for r in rects:
r = np.array(r)
sr = r * sc
if r[2] >= 1 and r[3] >= 1:
sr[2:] = np.maximum(sr[2:], 1.)
scaled_rects.append(sr)
return draw_rects(scale(im, sc), scaled_rects, outlines, fills, texts, text_colors)
def draw_pts(im, points, colors = None, width = 1, texts = None):
#ut.check(colors is None or len(colors) == len(points))
points = list(points)
colors = colors_from_input(colors, (255, 0, 0), len(points))
rects = [(p[0] - width/2, p[1] - width/2, width, width) for p in points]
return draw_rects(im, rects, fills = colors, outlines = [None]*len(points), texts = texts)
def draw_lines(im, pts1, pts2, colors = None, width = 0):
ut.check(len(pts1) == len(pts2), 'Line endpoints different sizes')
colors = colors_from_input(colors, None, len(pts1))
def f(draw):
for p1, p2, c in itl.izip(pts1, pts2, colors):
draw.line(ut.int_tuple(p1) + ut.int_tuple(p2), fill = c, width = width)
return draw_on(f, im)
def draw_text(im, texts, pts, colors, font_size = None, bold = False):
im = rgb_from_gray(im)
# todo: add fonts, call from draw_rects
ut.check(len(pts) == len(texts))
#ut.check((colors is None) or len(colors) == len(texts))
colors = colors_from_input(colors, (0, 0, 0), len(texts))
def f(draw):
if font_size is None:
font = None
else:
#font_name = '/usr/share/fonts/truetype/ttf-liberation/LiberationMono-Regular.ttf'
font_choices = ['/usr/share/fonts/truetype/freefont/FreeMono%s.ttf' % ('Bold' if bold else ''), '/Library/Fonts/PTMono.ttc']
for font_name in font_choices:
if os.path.exists(font_name):
break
else:
raise RuntimeError('could not find a suitable font on this machine (please edit paths in img.py)')
font = ImageFont.truetype(font_name, size = font_size)
for pt, text, color in itl.izip(pts, texts, colors):
draw.text(ut.int_tuple(pt), text, fill = color, font = font)
return draw_on(f, im)
def draw_text_ul(im, text, color = (0, 255, 0), font_size = 25):
return draw_text(im, [text], [(0, 0)], [color], font_size = font_size)
def luminance(im):
if len(im.shape) == 2:
return im
else:
# see http://www.mathworks.com/help/toolbox/images/ref/rgb2gray.html
return np.uint8(np.round(0.2989 * im[:,:,0] + 0.587 * im[:,:,1] + 0.114 * im[:,:,2]))
#def sub_img(im, x_or_rect, y = None, w = None, h = None):
def sub_img(im, x_or_rect, y = None, w = None, h = None):
if x_or_rect is None:
return im
elif y is None:
x, y, w, h = x_or_rect
else:
x = x_or_rect
return im[y : y + h, x : x + w]
def sub_img_frac(im, x_or_rect, y = None, w = None, h = None):
if y is None:
x, y, w, h = x_or_rect
else:
x = x_or_rect
x = int(x*im.shape[1])
y = int(y*im.shape[0])
w = int(w*im.shape[1])
h = int(h*im.shape[0])
return im[y : y + h, x : x + w]
# def stack_img_pair(im1, im2):
# h1, w1 = im1.shape[:2]
# h2, w2 = im2.shape[:2]
# im3 = np.zeros((max(h1, h2), w1 + w2, 3), dtype = im1.dtype)
# im3[:h1, :w1, :] = rgb_from_gray(im1)
# im3[:h2, w1:, :] = rgb_from_gray(im2)
# return im3
# def stack_imgs(ims):
# """ slow, should rewrite """
# assert len(ims) > 0
# res = ims[0]
# for im in ims[1:]:
# res = stack_img_pair(res, im)
# return res
# def hstack_ims(ims):
# max_h = max(im.shape[0] for im in ims)
# result = []
# for im in ims:
# frame = np.zeros((max_h, im.shape[1], 3))
# frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
# result.append(frame)
# return np.hstack(result)
def hstack_ims(ims, bg_color = (0, 0, 0)):
max_h = max([im.shape[0] for im in ims])
result = []
for im in ims:
#frame = np.zeros((max_h, im.shape[1], 3))
frame = make(im.shape[1], max_h, bg_color)
frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
result.append(frame)
return np.hstack(result)
# def hstack_ims_mult(*all_ims):
# max_h = max(max(im.shape[0] for im in ims) for ims in all_ims)
# result = []
# for im in ims:
# frame = np.zeros((max_h, im.shape[1], 3))
# frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
# result.append(frame)
# return np.hstack(result)
def vstack_ims(ims, bg_color = (0, 0, 0)):
if len(ims) == 0:
return make(0, 0)
max_w = max([im.shape[1] for im in ims])
result = []
for im in ims:
#frame = np.zeros((im.shape[0], max_w, 3))
frame = make(max_w, im.shape[0], bg_color)
frame[:im.shape[0],:im.shape[1]] = rgb_from_gray(im)
result.append(frame)
return np.vstack(result)
def make_rgb(im):
im = rgb_from_gray(im, False)
if im.shape[2] < 3:
raise RuntimeError()
elif im.shape[2] > 3:
im = im[:, :, :3]
return im
def rgb_from_gray(img, copy = True, remove_alpha = True):
if img.ndim == 3 and img.shape[2] == 3:
return img.copy() if copy else img
elif img.ndim == 3 and img.shape[2] == 4:
return (img.copy() if copy else img)[..., :3]
elif img.ndim == 3 and img.shape[2] == 1:
return np.tile(img, (1,1,3))
elif img.ndim == 2:
return np.tile(img[:,:,np.newaxis], (1,1,3))
else:
raise RuntimeError('Cannot convert to rgb. Shape: ' + str(img.shape))
def load(im_fname, gray = False):
if im_fname.endswith('.gif'):
print "GIFs don't load correctly for some reason"
ut.fail('fail')
im = from_pil(Image.open(im_fname))
# use imread, then flip upside down
#im = np.array(list(reversed(pylab.imread(im_fname)[:,:,:3])))
if gray:
return luminance(im)
elif not gray and np.ndim(im) == 2:
return rgb_from_gray(im)
else:
return im
imread = load
def loadsc(fname, scale, gray = False):
return resize(load(fname, gray = gray), scale)
def save(img_fname, a):
if img_fname.endswith('jpg'):
return Image.fromarray(np.uint8(a)).save(img_fname, quality = 100)
else:
#return Image.fromarray(np.uint8(a)).save(img_fname)
return Image.fromarray(np.uint8(a)).save(img_fname, quality = 100)
# def make_temp_file(ext):
# fd, fname = tempfile.mkstemp(ext)
# # shouldn't delete file
# os.close(fd)
# return fname
# def make_pretty(img):
# if img.dtype == 'bool':
# return img * 255
# elif (0 <= np.min(img)) and (np.max(img) <= 1.0):
# return img*255
# return img
def show_html(html):
page = ut.make_temp('.html')
ut.make_file(page, html)
print 'opening', page
webbrowser.open(page)
# # http://opencv.willowgarage.com/wiki/PythonInterface
# def cv2array(im):
# depth2dtype = {
# cv.IPL_DEPTH_8U: 'uint8',
# cv.IPL_DEPTH_8S: 'int8',
# cv.IPL_DEPTH_16U: 'uint16',
# cv.IPL_DEPTH_16S: 'int16',
# cv.IPL_DEPTH_32S: 'int32',
# cv.IPL_DEPTH_32F: 'float32',
# cv.IPL_DEPTH_64F: 'float64',
# }
# arrdtype=im.depth
# a = np.fromstring(
# im.tostring(),
# dtype=depth2dtype[im.depth],
# count=im.width*im.height*im.nChannels)
# a.shape = (im.height,im.width,im.nChannels)
# return a
# def to_cv(a):
# dtype2depth = {
# 'uint8': cv.IPL_DEPTH_8U,
# 'int8': cv.IPL_DEPTH_8S,
# 'uint16': cv.IPL_DEPTH_16U,
# 'int16': cv.IPL_DEPTH_16S,
# 'int32': cv.IPL_DEPTH_32S,
# 'float32': cv.IPL_DEPTH_32F,
# 'float64': cv.IPL_DEPTH_64F,
# }
# try:
# nChannels = a.shape[2]
# except:
# nChannels = 1
# cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
# dtype2depth[str(a.dtype)],
# nChannels)
# cv.SetData(cv_im, a.tostring(),
# a.dtype.itemsize*nChannels*a.shape[1])
# return cv_im
#def to_pil(im): return Image.fromarray(np.uint8(im))
def to_pil(im):
#print im.dtype
return Image.fromarray(np.uint8(im))
def from_pil(pil):
#print pil
return np.array(pil)
def to_pylab(a): return np.uint8(a)
def test_draw_text():
im = 255 + np.zeros((300, 300, 3))
show([draw_text(im, ['hello', 'world'], [(100, 200), (0, 0)], [(255, 0, 0), (0, 255, 0)]),
draw_text(im, ['hello', 'world'], [(100, 100), (0, 0)], [(255, 0, 0), (0, 255, 0)], font_size = 12)])
def save_tmp(im, encoding = '.png', dir = None):
fname = ut.make_temp(encoding, dir = dir)
save(fname, im)
return fname
def save_tmp_nfs(im, encoding = '.png'):
return save_tmp(im, encoding, '/csail/vision-billf5/aho/tmp')
# def resize(im, size):
# if type(size) == type(1):
# size = float(size)
# #return scipy.misc.pilutil.imresize(im, size)
# return scipy.misc.imresize(im, size)
#def resize(im, scale, order = 3, hires = 'auto'):
def resize(im, scale, order = 3, hires = False):
if hires == 'auto':
hires = (im.dtype == np.uint8)
if np.ndim(scale) == 0:
new_scale = [scale, scale]
# interpret scale as dimensions; convert integer size to a fractional scale
elif ((scale[0] is None) or type(scale[0]) == type(0)) \
and ((scale[1] is None) or type(scale[1]) == type(0)) \
and (not (scale[0] is None and scale[1] is None)):
# if the size of only one dimension is provided, scale the other to maintain the right aspect ratio
if scale[0] is None:
dims = (int(float(im.shape[0])/im.shape[1]*scale[1]), scale[1])
elif scale[1] is None:
dims = (scale[0], int(float(im.shape[1])/im.shape[0]*scale[0]))
else:
dims = scale[:2]
new_scale = [float(dims[0] + 0.4)/im.shape[0], float(dims[1] + 0.4)/im.shape[1]]
# a test to make sure we set the floating point scale correctly
result_dims = [int(new_scale[0]*im.shape[0]), int(new_scale[1]*im.shape[1])]
assert tuple(result_dims) == tuple(dims)
elif type(scale[0]) == type(0.) and type(scale[1]) == type(0.):
new_scale = scale
#new_scale = scale[1], scale[0]
else:
raise RuntimeError("don't know how to interpret scale: %s" % (scale,))
# want new scale' to be such that
# int(scale'[0]*im.shape[0]) = scale[0], etc. (that's how zoom computes the new shape)
# todo: any more numerical issues?
#print 'scale before', im.shape, scale
# print 'scale after', scale
# print 'new image size', [int(scale[0]*im.shape[0]),int(scale[1]*im.shape[1])]
#scale_param = new_scale if im.ndim == 2 else (new_scale[0], new_scale[1], 1)
scale_param = new_scale if im.ndim == 2 else (new_scale[0], new_scale[1], 1)
if hires:
#sz = map(int, (scale_param*im.shape[1], scale_param*im.shape[0]))
sz = map(int, (scale_param[1]*im.shape[1], scale_param[0]*im.shape[0]))
return from_pil(to_pil(im).resize(sz, Image.ANTIALIAS))
else:
res = scipy.ndimage.zoom(im, scale_param, order = order)
# verify that zoom() returned an image of the desired size
if (np.ndim(scale) != 0) and type(scale[0]) == type(0) and type(scale[1]) == type(0):
assert res.shape[:2] == (scale[0], scale[1])
return res
scale = resize
# import skimage
# resize = skimage.imresize
def test_resize():
im = make(44, 44)
assert resize(im, (121, 120, 't')).shape[:2] == (121, 120)
assert resize(im, (2., 0.5, 't')).shape[:2] == (88, 22)
def show_file(fname):
show(load(fname))
def img_extensions():
return ['png', 'gif', 'jpg', 'jpeg', 'bmp', 'ppm', 'pgm']
def is_img_file(fname):
return any(fname.lower().endswith(ext) for ext in img_extensions())
def blur(im, sigma):
if np.ndim(im) == 2:
return scipy.ndimage.filters.gaussian_filter(im, sigma)
else:
return np.concatenate([scipy.ndimage.filters.gaussian_filter(im[:, :, i], sigma)[:, :, np.newaxis] for i in xrange(im.shape[2])], axis = 2)
def blit(src, dst, x, y, opt = None):
if opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
dst[dy : dy + dh, dx : dx + dw] = src[sy : sy + dh, sx : sx + dw]
def weighted_add(src, dst, x, y, src_weight, dst_weight, opt = None):
if opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
dst[dy : dy + dh, dx : dx + dw] = dst[dy : dy + dh, dx : dx + dw]*dst_weight + src[sy : sy + dh, sx : sx + dw]*src_weight
def make(w, h, fill = (0,0,0)):
return np.uint8(np.tile([[fill]], (h, w, 1)))
def luminance_rgb(im): return rgb_from_gray(luminance(im))
def rotate(img, angle, fill = 0):
""" Rotate image around its center by the given angle (in
radians). No interpolation is used; indices are rounded. The
returned image may be larger than the original, but the middle
pixel corresponds to the middle of the original. Pixels with no
correspondence are filled as 'fill'.
Also returns mapping from original image to rotated. """
r = int(np.ceil(np.sqrt(img.shape[0]**2 + img.shape[1]**2)))
X, Y = np.mgrid[0:r, 0:r]
X = X.flatten()
Y = Y.flatten()
X2 = np.array(np.round(img.shape[1]/2 + np.cos(angle) * (X - r/2) - np.sin(angle) * (Y - r/2)), dtype = int)
Y2 = np.array(np.round(img.shape[0]/2 + np.sin(angle) * (X - r/2) + np.cos(angle) * (Y - r/2)), dtype = int)
good = ut.logical_and_many(X2 >= 0, X2 < img.shape[1], Y2 >= 0, Y2 < img.shape[0])
out = fill + np.zeros((r, r) if img.ndim == 2 else (r, r, img.shape[2]), dtype = img.dtype)
out[Y[good], X[good]] = img[Y2[good], X2[good]]
T = np.dot(np.dot(ut.rigid_transform(np.eye(2), [img.shape[1]/2, img.shape[0]/2]),
ut.rigid_transform(ut.rotation_matrix2(angle))),
ut.rigid_transform(np.eye(2), [-r/2, -r/2]))
return out, np.linalg.inv(T)
def map_img(f, im, dtype = None, components = None):
new_im = np.zeros(im.shape if components is None else im.shape + (components,), \
dtype = im.dtype if dtype is None else dtype)
for y in xrange(im.shape[0]):
for x in xrange(im.shape[1]):
new_im[y,x] = f(im[y,x])
return new_im
def add_border(img, w, h, color = (0, 0, 0)):
assert 0 <= w
assert 0 <= h
out = make(img.shape[1] + 2*w, img.shape[0] + 2*h, color)
out[h:(h + img.shape[0]), w : (w + img.shape[1])] = img
return out
def pad_corner(im, pw, ph, color = (0, 0, 0)):
out = make(im.shape[1] + pw, im.shape[0] + ph, color)
out[:im.shape[0], :im.shape[1]] = im
return out
def expand(im, new_shape, opt = 'center'):
if type(new_shape) == type(0.):
new_w = int(im.shape[1]*new_shape)
new_h = int(im.shape[0]*new_shape)
elif type(new_shape) == type((1,)):
new_shape = new_shape[:2]
new_h, new_w = new_shape
else:
raise RuntimeError("Don't know how to interpret shape")
if im.shape[0] >= new_h and im.shape[1] >= new_w:
return im.copy()
else:
im = rgb_from_gray(im)
r = make(new_w, new_h)
if opt == 'center':
blit(im, r, im.shape[1]/2, im.shape[0]/2, opt = 'center')
elif opt == 'corner':
r[:im.shape[0], :im.shape[1]] = im
return r
def combine_rgb(r, g, b):
a = np.zeros(r.shape + (3,))
a[:,:,0] = r
a[:,:,1] = g
a[:,:,2] = b
return a
def compute_pyramid(ptm, interval, min_size):
# based on pff's featpyramid.m
# todo: upsample one level
sc = 2**(1.0/interval)
imsize = im.shape[:2]
max_scale = int(1 + np.floor(np.log(np.min(imsize)/min_size)/np.log(sc)))
ims = [None]*max_scale
scale = [None]*len(ims)
# skipping 2x scale
for i in xrange(1, interval+1):
im_scaled = resize(ptm, 1/sc**(i-1))
ims[-1 + i] = im_scaled
scale[-1 + i] = 1/sc**(i-1)
for j in xrange(i+interval, max_scale+1, interval):
im_scaled = resize(im_scaled, 0.5)
ims[-1 + j] = im_scaled
scale[-1 + j] = 0.5*scale[-1 + j - interval]
assert None not in ims
return ims, scale
#imrotate = scipy.misc.imrotate
def imrotate(*args):
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return scipy.misc.imrotate(*args)
def from_fig_slow(fig = None, tight = True):
ext = 'png'
if fig is None:
fig = pylab.gcf()
IO = StringIO()
if tight:
pylab.savefig(IO, format = ext, bbox_inches = 'tight')
else:
pylab.savefig(IO, format = ext)
IO.seek(0)
return from_pil(Image.open(IO))
# def from_fig_fast(fig = None, tight = True):
# ext = 'raw'
# if fig is None:
# fig = pylab.gcf()
# IO = StringIO()
# pylab.savefig(IO, format = ext)
# IO.seek(0)
# w, h = fig.canvas.get_width_height()
# return np.fromstring(IO.buf, dtype = np.uint8).reshape((600, -1, 4))
# def from_fig(fig = None):
# """
# http://www.icare.univ-lille1.fr/wiki/index.php/How_to_convert_a_matplotlib_figure_to_a_numpy_array_or_a_PIL_image
# @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
# @param fig a matplotlib figure
# @return a numpy 3D array of RGBA values
# """
# if fig is None:
# fig = pylab.gcf()
# # draw the renderer
# fig.canvas.draw()
# # Get the RGBA buffer from the figure
# w,h = fig.canvas.get_width_height()
# buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
# buf.shape = (h, w, 4)
# # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
# buf = np.roll(buf, 3, axis = 2)
# return buf
# #return buf[..., 1:]
# def from_fig(fig = None):
# """
# @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
# @param fig a matplotlib figure
# @return a numpy 3D array of RGBA values
# http://www.icare.univ-lille1.fr/wiki/index.php/How_to_convert_a_matplotlib_figure_to_a_numpy_array_or_a_PIL_image
# """
# if fig is None:
# fig = pylab.gcf()
# # draw the renderer
# fig.canvas.draw()
# # Get the RGBA buffer from the figure
# w,h = fig.canvas.get_width_height()
# buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
# buf.shape = (h, w, 4)
# # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
# buf = np.roll(buf, 3, axis = 2)
# # not sure how to set the background to white
# p = buf[:, :, 3] / 255.
# buf = np.array(buf[:, :, :3] * p[:, :, np.newaxis] + (1 - p)[:, :, np.newaxis]*255, 'uint8')
# return buf
def from_fig(fig = None, size_inches = None):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
http://www.icare.univ-lille1.fr/wiki/index.php/How_to_convert_a_matplotlib_figure_to_a_numpy_array_or_a_PIL_image
"""
if fig is None:
fig = pylab.gcf()
if size_inches is not None:
fig.set_size_inches(*size_inches)
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (h, w, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis = 2)
# not sure how to set the background to white
p = buf[:, :, 3] / 255.
buf = np.array(buf[:, :, :3] * p[:, :, np.newaxis] + (1 - p)[:, :, np.newaxis]*255, 'uint8')
return buf
def show_fig():
show(from_fig())
def scale_vals(A, lo, hi):
return np.uint8(255*(np.clip(A, lo, hi) - lo) / float(hi - lo))
def merge_ims(srcs, pts_or_rects, bg, opt = None):
""" Makes a new image where each image in patches is copied at a
corresponding pixel location. Overlapping images are averaged
together. """
dst = rgb_from_gray(bg)
layer = np.zeros(dst.shape)
#counts = np.zeros(dst.shape[:2], 'l')
counts = np.zeros(dst.shape[:2], 'd')
for src, r in itl.izip(srcs, pts_or_rects):
r = ut.int_tuple(r)
x, y = r[:2]
# rescale if we're given a rectangle, and it has a different size
if len(r) > 2:
assert len(r) == 4
assert opt != 'center'
if src.shape[:2] != (r[3], r[2]):
src = resize(src, (r[3], r[2]))
elif opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
layer[dy : dy + dh, dx : dx + dw] += src[sy : sy + dh, sx : sx + dw, :3]
if np.ndim(src) == 3 and src.shape[2] == 4:
counts[dy : dy + dh, dx : dx + dw] += np.array(src[sy : sy + dh, sx : sx + dw, 3],'d')/255.
else:
counts[dy : dy + dh, dx : dx + dw] += 1
dst[counts > 0] = layer[counts > 0] / counts[counts > 0][:, np.newaxis]
return dst
def label_im(im, text, color = (0, 255, 0)):
return draw_text(im, [text], [(25, im.shape[0] - 25)], [color])
def remap_color(im, xy):
assert im.shape[:2] == xy.shape[:2]
assert xy.shape[2] == 2
vals = []
for i in xrange(im.shape[2]):
dx = xy[..., 0].flatten()[np.newaxis, :]
dy = xy[..., 1].flatten()[np.newaxis, :]
v = scipy.ndimage.map_coordinates(im[..., i], np.concatenate([dy, dx]))
vals.append(v.reshape(im.shape[:2] + (1,)))
return np.concatenate(vals, axis = 2)
def stack_meshgrid(xs, ys, dtype = 'l'):
x, y = np.meshgrid(xs, ys)
return np.array(np.concatenate([x[..., np.newaxis], y[..., np.newaxis]], axis = 2), dtype = dtype)
def sub_img_pad(im, (x, y, w, h), oob = 0):
if len(im.shape) == 2:
dst = np.zeros((h, w))
else:
dst = np.zeros((h, w, im.shape[2]))
dst[:] = oob
sx, sy, sw, sh = ut.crop_rect_to_img((x, y, w, h), im)
dst[(sy - y) : (sy - y) + sh,
(sx - x) : (sx - x) + sw] = im[sy : sy + sh, sx : sx + sw]
return dst
def sub_img_reflect(im, (x, y, w, h)):
x, y, w, h = map(ut.iround, [x, y, w, h])
yy, xx = np.mgrid[y : y + h, x : x + w]
vals = np.uint8(lookup_bilinear(im, xx.flatten(), yy.flatten(), order = 0, mode = 'reflect'))
return vals.reshape((h, w, im.shape[2]))
def compress(im, format = 'png'):
out = StringIO()
im = to_pil(im)
im.save(out, format = format)
c = out.getvalue()
out.close()
return c
def compress_jpeg(im, format = 'jpeg'):
return compress(im, format)
def uncompress(s):
return from_pil(Image.open(StringIO(s)))
# def cv_uncompress(s):
# import cv2
# #return cv2.imdecode(s)
# a = cv2.imdecode(np.fromstring(s, np.uint8), cv2.IMREAD_COLOR)#
# b = uncompress(s)
# print a.shape, b.shape
# print np.mean(np.abs(a.astype('float32')-b.astype('float32')))
# return a
def test_compress():
im = load('/afs/csail.mit.edu/u/a/aho/bear.jpg')
print 'orig', ut.guess_bytes(im)
s = compress(im)
print 'comp', ut.guess_bytes(s)
assert(np.all(im == uncompress(s)))
def mix_ims(im1, im2, mask, alpha = 0.5):
im1 = im1.copy()
im2 = np.asarray(im2)
if len(im2) == 3:
# single color
im1[mask] = im1[mask]*alpha + im2*(1-alpha)
else:
im1[mask] = im1[mask]*alpha + im2[mask]*(1-alpha)
return im1
#def lookup_bilinear(im, x, y, order = 3, mode = 'constant', cval = 0.0):
def lookup_bilinear(im, x, y, order = 1, mode = 'constant', cval = 0.0):
yx = np.array([y, x])
if np.ndim(im) == 2:
return scipy.ndimage.map_coordinates(im, yx, order = order, mode = mode, cval = cval)
else:
return np.concatenate([scipy.ndimage.map_coordinates(im[:, :, i], yx, order = order, mode = mode)[:, np.newaxis] \
for i in xrange(im.shape[2])], axis = 1)
def map_helper((xs, i, order, mode)):
return scipy.ndimage.map_coordinates(xs, np.array([i]), order = order, mode = mode)[:, np.newaxis]
def lookup_bilinear1d(xs, i, order = 4, mode = 'constant', cval = 0.0, par = 0):
if np.ndim(xs) == 1:
return scipy.ndimage.map_coordinates(xs, i, order = order, mode = mode, cval = cval)
else:
if par:
vals = [(xs[:, j], i, order, mode) for j in xrange(xs.shape[1])]
return np.concatenate(ut.parmap(map_helper, vals), axis = 1)
else:
return np.concatenate([scipy.ndimage.map_coordinates(xs[:, j], np.array([i]), order = order, mode = mode)[:, np.newaxis] \
for j in xrange(xs.shape[1])], axis = 1)
#def pixels_in_bounds(im, xs, ys):
def pixels_in_bounds(im_shape, xs, ys):
return ut.land(0 <= xs, xs < im_shape[1],
0 <= ys, ys < im_shape[0])
def im2float(im):
im = np.array(im, 'float32')
im /= 255.
return im
def try_load_img(fname, default_size = (256, 256)):
try:
return load(fname)
except IOError:
print 'Failed to load:', fname
return make(default_size[0], default_size[1])
def exif(fname, use_jhead = True):
if use_jhead:
pass
else:
with Image.open(fname) as img:
if not hasattr(img, '_getexif'):
return None
return img._getexif()
def exif_shape(fname):
ex = exif(fname)
yid = 40963
xid = 40962
if ex is None or xid not in ex or yid not in ex:
return None
return ex[yid], ex[xid]
|
1615745
|
import tensorflow as tf
import numpy as np
import input_data
def sample_prob(probs):
return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))
# return tf.select((tf.random_uniform(tf.shape(probs), 0, 1) - probs) > 0.5, tf.ones(tf.shape(probs)), tf.zeros(tf.shape(probs)))
learning_rate = 0.1
momentum = 0.9
batchsize = 100
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX = mnist.train.images
X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])
rbm_w = tf.placeholder("float", [784, 500])
rbm_vb = tf.placeholder("float", [784])
rbm_hb = tf.placeholder("float", [500])
rbm_w_inc = tf.placeholder("float", [784, 500])
rbm_vb_inc = tf.placeholder("float", [784])
rbm_hb_inc = tf.placeholder("float", [500])
h0_a = tf.nn.sigmoid(tf.matmul(X, rbm_w) + rbm_hb)
h0 = sample_prob(h0_a)
v1_a = tf.nn.sigmoid(tf.matmul(h0, tf.transpose(rbm_w)) + rbm_vb)
v1 = sample_prob(v1_a)
h1_a = tf.nn.sigmoid(tf.matmul(v1, rbm_w) + rbm_hb)
# h1 = sample_prob(h1_a)
w_positive_grad = tf.matmul(tf.transpose(X), h0_a)
w_negative_grad = tf.matmul(tf.transpose(v1_a), h1_a)
grad_w = (w_positive_grad - w_negative_grad) / tf.to_float(tf.shape(X)[0])
grad_vb = tf.reduce_mean(X - v1_a, 0)
grad_hb = tf.reduce_mean(h0 - h1_a, 0)
update_w_inc = momentum * rbm_w_inc + (learning_rate / batchsize) * grad_w
update_vb_inc = momentum * rbm_vb_inc + (learning_rate / batchsize) * grad_vb
update_hb_inc = momentum * rbm_hb_inc + (learning_rate / batchsize) * grad_hb
update_w = rbm_w + update_w_inc
update_vb = rbm_vb + update_vb_inc
update_hb = rbm_hb + update_hb_inc
err = X - v1_a
err_sum = tf.reduce_mean(err * err)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
o_w = np.zeros([784, 500], np.float32)
o_vb = np.zeros([784], np.float32)
o_hb = np.zeros([500], np.float32)
o_w_inc = np.zeros([784, 500], np.float32)
o_vb_inc = np.zeros([784], np.float32)
o_hb_inc = np.zeros([500], np.float32)
print(sess.run(err_sum, feed_dict={X: trX, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb}))
for e in range(0, 50):
for start, end in zip(range(0, len(trX), batchsize), range(batchsize, len(trX), batchsize)):
batch = trX[start:end]
o_w_inc, o_vb_inc, o_hb_inc, o_w, o_vb, o_hb = sess.run([update_w_inc, update_vb_inc, update_hb_inc, update_w, update_vb, update_hb], feed_dict={X: batch, rbm_w_inc: o_w_inc, rbm_vb_inc: o_vb_inc, rbm_hb_inc: o_hb_inc, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb})
print(sess.run(err_sum, feed_dict={X: trX, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb}))
|
1615750
|
import numpy as np
import pandas as pd
from copy import deepcopy
def super_str(x):
if isinstance(x,np.int64):
x=float(x)
if isinstance(x,int):
x=float(x)
ans=str(x)
return ans
def convert_to_array(x):
if isinstance(x, np.ndarray):
return x
else:
return np.array(x)
def special_sort(a, order='ascending'):
n=len(a)
if order=='ascending':
for i in range(1,n):
j=deepcopy(i)
while j>0 and a[j][1]<a[j-1][1]:
temp=a[j-1]
a[j-1]=a[j]
a[j]=temp
j=j-1
elif order=='descending':
for i in range(1,n):
j=deepcopy(i)
while j>0 and a[j][1]>a[j-1][1]:
temp=a[j-1]
a[j-1]=a[j]
a[j]=temp
j=j-1
return a
def dissimilarity(arr1, arr2, weighted):
n=arr1.shape[0]
s=0
if weighted==True:
for i in range(0,n):
diff=abs(arr1[i]-arr2[i])
s = s + (diff*(n-i)/n)
else:
for i in range(0,n):
diff=abs(arr1[i]-arr2[i])
s = s + (diff)
return s
def create_utility_matrix(data, formatizer = {'user':0, 'item': 1, 'value': 2}):
"""
:param data: pandas dataframe, 2D, nx3
:param formatizer: dict having the column name or ids for users, items and ratings/values
:return: 1. the utility matrix. (2D, n x m, n=users, m=items)
2. list of users (in order with the utility matrix rows)
3. list of items (in order with the utility matrix columns)
"""
itemField = formatizer['item']
userField = formatizer['user']
valueField = formatizer['value']
userList = data.ix[:,userField].tolist()
itemList = data.ix[:,itemField].tolist()
valueList = data.ix[:,valueField].tolist()
users = list(set(data.ix[:,userField]))
items = list(set(data.ix[:,itemField]))
users_index = {users[i]: i for i in range(len(users))}
pd_dict = {item: [np.nan for i in range(len(users))] for item in items}
for i in range(0,len(data)):
item = itemList[i]
user = userList[i]
value = valueList[i]
pd_dict[item][users_index[user]] = value
X = pd.DataFrame(pd_dict)
X.index = users
users = list(X.index)
items = list(X.columns)
return np.array(X), users, items
|
1615755
|
from running_modes.reinforcement_learning.configurations.learning_strategy_configuration import LearningStrategyConfiguration
from running_modes.reinforcement_learning.learning_strategy import BaseLearningStrategy
from running_modes.reinforcement_learning.learning_strategy import DAPStrategy
from running_modes.reinforcement_learning.learning_strategy import LearningStrategyEnum
from running_modes.reinforcement_learning.learning_strategy import MASCOFStrategy
from running_modes.reinforcement_learning.learning_strategy import MAULIStrategy
from running_modes.reinforcement_learning.learning_strategy import SDAPStrategy
class LearningStrategy:
def __new__(cls, critic_model, optimizer, configuration: LearningStrategyConfiguration, logger=None) \
-> BaseLearningStrategy:
learning_strategy_enum = LearningStrategyEnum()
if learning_strategy_enum.DAP == configuration.name:
return DAPStrategy(critic_model, optimizer, configuration, logger)
if learning_strategy_enum.MAULI == configuration.name:
return MAULIStrategy(critic_model, optimizer, configuration, logger)
if learning_strategy_enum.MASCOF == configuration.name:
return MASCOFStrategy(critic_model, optimizer, configuration, logger)
if learning_strategy_enum.SDAP == configuration.name:
return SDAPStrategy(critic_model, optimizer, configuration, logger)
|
1615769
|
from PSpeedChatQuestTerminal import decodeSCQuestMsg
from PSpeedChatQuestTerminal import decodeSCQuestMsgInt
from otp.speedchat.SCDecoders import *
|
1615780
|
import numpy as np
import pandas as pd
from main.data import SETTINGS, IN_PAPER_NAMES, VAD, BE5, SHORT_COLUMNS
from framework.util import get_average_result_from_df, save_tsv, no_zeros_formatter, load_tsv
import datetime
import framework.util as util
directions=['be2vad', 'vad2be']
models=['baseline', 'reference_LM', 'Reference_KNN', 'my_model']
VARS=VAD+BE5
df=pd.DataFrame(index=[setting.name for setting in SETTINGS],
columns=VARS)
for d in directions:
for s in SETTINGS:
results=load_tsv('results/{}/{}/my_model.tsv'.format(d, s.name))
for var in VARS:
if var in list(results):
df.loc[s.name, var]=results.loc['Average', var]
df.rename(index=IN_PAPER_NAMES, inplace=True)
df.rename(index=str, columns=SHORT_COLUMNS, inplace=True)
save_tsv(df, 'overview_individual.tsv')
# read normalized split half reliabilites to make larger values bold
df_shr=load_tsv('../../analysis/shr/shr_normalized.tsv')
df_greater=df>df_shr
df_lesser=df<df_shr
print(df_greater)
print(df_lesser)
outperformed=0
not_outperformed=0
# add cell colour
df=df.round(3)
print(df)
lines=[]
lines.append('%%%%%% Automatic Python output from {} &%%%%%%%%%%'.format(datetime.datetime.now()))
lines.append('\\begin{tabular}{|l|rrr|rrrrr|}')
lines.append('\hline')
lines.append(' & '.join(['{}']+list(df))+'\\\\')
lines.append('\hline\hline')
for i in range(df.shape[0]):
row_list=[]
row_list.append(df.index[i].replace('_','\_'))
for j in range(df.shape[1]):
cell=''
if df_greater.iloc[i,j]==True:
cell+='\cellcolor{blue!25} '
elif df_lesser.iloc[i,j]==True:
cell+='\cellcolor{lightred} '
cell+=util.no_zeros_formatter(df.iloc[i,j])
row_list.append(cell)
lines.append(' & '.join(row_list)+'\\\\')
lines.append('\hline')
# add average values
row=['Avg.']
avg=df.mean(axis=0)
for i in avg:
row.append(util.no_zeros_formatter(i))
row=' & '.join(row)+'\\\\'
lines.append(row)
lines.append('\hline')
lines.append('\end{tabular}')
lines.append('%%%%%%%%%%%%%%%%%%%%%%%%')
string='\n'.join(lines)
string=string.replace('nan', '---')
print('\n', string, '\n')
print('System was superior to SHR in {} of {} cases!\n'.format(outperformed,outperformed+not_outperformed))
|
1615783
|
from hyperparams import Hyperparams as hp
import codecs
import os
import regex
from collections import Counter
def make_vocab(fpath, fname):
"""Constructs vocabulary.
Args:
fpath: A string. Input file path.
fname: A string. Output file name.
Writes vocabulary line by line to `preprocessed/fname`
"""
text = codecs.open(fpath, "r", "utf-8").read()
text = regex.sub("[^\s\p{L}']", "", text)
words = text.split()
word2cnt = Counter(words)
if not os.path.exists("preprocessed"):
os.mkdir("preprocessed")
with codecs.open("preprocessed/{}".format(fname), "w", "utf-8") as fout:
fout.write(
"{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format(
"<PAD>", "<UNK>", "<S>", "</S>"
)
)
for word, cnt in word2cnt.most_common(len(word2cnt)):
fout.write(u"{}\t{}\n".format(word, cnt))
if __name__ == "__main__":
make_vocab(hp.source_train, "cn.txt.vocab.tsv")
make_vocab(hp.target_train, "en.txt.vocab.tsv")
print("Done")
|
1615801
|
import unittest
import geoio
import dgsamples
class TestDownsample(unittest.TestCase):
"""Test accuracy of downsampling routines.
"""
def setUp(self):
# Setup test gdal object
self.test_img = dgsamples.wv2_longmont_1k.ms
self.img = geoio.GeoImage(self.test_img)
def tearDown(self):
# Remove gdal image object
self.img = None
def test_GeoImage_files_meta_exists(self):
pass # TBD
#self.assertIsInstance(self.img.files,tt.bunch.OrderedBunch)
if __name__ == '__main__':
unittest.main()
|
1615805
|
from scipy.optimize import minimize
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
import math
def f(x):
""" Function that returns x_0^2 + e^{0.5*x_0} + 10*sin(x_1) + x_1^2. """
return x[0] ** 2 + math.exp(0.5 * x[0]) + 10 * math.sin(x[1]) + x[1] ** 2
def fprime(x):
""" The derivative of f. """
ddx0 = 2 * x[0] + 0.5 * math.exp(0.5 * x[0])
ddx1 = 10 * math.cos(x[1]) + 2 * x[1]
return np.array([ddx0, ddx1])
opt_out = minimize(f, x0=np.array(
[10, 10]), jac=fprime, tol=1e-8, method='BFGS', options={'disp': True})
# Plotting
pl.close('all')
r = 6
x_range = np.linspace(-r, r)
y_range = np.linspace(-r, r)
X, Y = np.meshgrid(x_range, y_range)
Z = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Z[i, j] = f(np.array([X[i, j], Y[i, j]]))
fig = pl.figure('Cost function')
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap=pl.cm.coolwarm, alpha=0.6)
ax.scatter(opt_out.x[0], opt_out.x[1], f(opt_out.x), c='r', s=50)
pl.show(block=False)
|
1615828
|
import time
import ppp4py.hdlc
import sys
import serial
import select
import binascii
import struct
surf_dev = '/dev/ttyUSB0'
surf = serial.Serial(surf_dev, baudrate=115200, timeout=5)
poller = select.poll()
poller.register(surf.fileno())
compress_ac = True
print 'Reading boot status (one line):'
while True:
t = surf.readline().strip()
while t.startswith('\0'):
t = t[1:]
print t
if t.startswith('#'):
break
if t.startswith('!'):
(flag, value) = t[1:].split()
value = int(value)
print 'flag "%s" value "%d"' % (flag, value)
if 'compress_ac' == flag:
compress_ac = (0 != value)
else:
print 'Unrecognized flag %s' % (flag,)
print 'Address/control compression: %s' % (compress_ac,)
framer = ppp4py.hdlc.HDLCforPPP(compress_ac=compress_ac)
#framer.setFrameCheckSequenceHelper(ppp4py.hdlc.FrameCheckSequenceNull)
framer.setFrameCheckSequenceHelper(ppp4py.hdlc.FrameCheckSequence16)
framer.updateReceivingACCM(0)
tests = [ 'a', 'a', 'b', "a\nb", "1\x7e2", "\x7e\x7d\x7d\x72", '12345678', '', '123' ]
def ProcessResponse_Text (framer, test):
response = ''
while True:
if poller.poll(None):
c = surf.read()
if "\n" == c:
print response
response = ''
return
else:
response += c
def ProcessResponse (framer, tx_text):
return ProcessResponse_Text(framer, tx_text)
timeout = 5000
while poller.poll(timeout):
c = surf.read()
print 'RX %s' % (binascii.hexlify(c),)
framer.putBytes(c)
pkt = framer.getPacket()
if (pkt is not None) and (0 < len(pkt)):
(rx_len,) = struct.unpack('B', pkt[0])
rx_text = pkt[1:]
if rx_len == len(tx_text):
if rx_text == tx_text:
print 'PASS rx echoed %d bytes correctly' % (rx_len)
else:
print 'FAIL tx %s rx %s content error' % (binascii.hexlify(tx_text), binascii.hexlify(rx_text))
else:
print 'FAIL tx len %d text %s, rx len %d text %s' % (len(tx_text), binascii.hexlify(tx_text), rx_len, binascii.hexlify(rx_text))
return
for t in tests:
framed = framer.framePacket(t)
print '\nTest: %s' % (binascii.hexlify(t),)
print 'Frame: %s' % (binascii.hexlify(framed),)
rv = surf.write(framed)
ProcessResponse(framer, t)
|
1615839
|
import sys
sys.path.append("../../")
from duckietown_rl.gym_duckietown.simulator import Simulator
from keras.models import load_model
import cv2
env = Simulator(seed=123, map_name="zigzag_dists", max_steps=5000001, domain_rand=True, camera_width=640,
camera_height=480, accept_start_angle_deg=4, full_transparency=True, distortion=True,
randomize_maps_on_reset=False, draw_curve=False, draw_bbox=False, frame_skip=1, draw_DDPG_features=False)
# TODO: Put your model name here!
model = load_model("trained_models/#YOUR_MODEL_NAME.h5")
observation = env.reset()
env.render()
cumulative_reward = 0.0
EPISODES = 10
STEPS = 1000
for episode in range(0, EPISODES):
for steps in range(0, STEPS):
# Cut the horizon: obs.shape = (480,640,3) --> (300,640,3)
observation = observation[150:450, :]
# we can resize the image here
observation = cv2.resize(observation, (120, 60))
# NOTICE: OpenCV changes the order of the channels !!!
observation = cv2.cvtColor(observation, cv2.COLOR_BGR2RGB)
# Rescale the image
observation = observation * 1.0/255
action = model.predict(observation.reshape(1, 60, 120, 3))[0]
observation, reward, done, info = env.step(action)
cumulative_reward += reward
if done:
env.reset()
print(f"DONE! after {steps}/{STEPS} steps!")
break
# print(f"Reward: {reward:.2f}",
# f"\t| Action: [{action[0]:.3f}, {action[1]:.3f}]",
# f"\t| Speed: {env.speed:.2f}")
#
# cv2.imshow("obs", observation)
# if cv2.waitKey() & 0xFF == ord('q'):
# break
env.render()
env.reset()
print('total reward: {}, mean reward: {}'.format(cumulative_reward, cumulative_reward // EPISODES))
env.close()
|
1615849
|
import pytest
pytestmark = [pytest.mark.django_db]
@pytest.fixture
def course(mixer):
return mixer.blend('products.Course')
@pytest.fixture
def order(factory, course, user):
order = factory.order(user=user, item=course)
order.set_paid()
return order
@pytest.fixture
def another_order(factory, user):
order = factory.order(user=user)
order.set_paid()
return order
@pytest.mark.usefixtures('user')
def test_nothing(course):
assert len(course.get_purchased_users()) == 0
@pytest.mark.usefixtures('order')
def test_one_user(course, user):
assert user in course.get_purchased_users()
def test_single_user_in_two_orders(course, order, another_order):
another_order.set_item(order.course)
assert len(course.get_purchased_users()) == 1
def test_non_purchased(course, order):
order.set_not_paid()
assert len(course.get_purchased_users()) == 0
@pytest.mark.usefixtures('another_order')
def test_another_order(course):
assert len(course.get_purchased_users()) == 0
|
1615855
|
import numpy as np
import infotheory
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
TEST_HEADER = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
SUCCESS = bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC
FAILED = bcolors.FAIL + "FAILED" + bcolors.ENDC
def _except(e):
print("\n" + FAILED)
print(e)
exit(1)
def do_matching(base_str, result, target, name, decimals=5):
result = np.round(result, decimals=decimals)
target = np.round(target, decimals=decimals)
if result == target:
print(base_str, name, result, target, SUCCESS)
else:
raise Exception(
"{} not equal to expected value. Expected = {}, Actual = {}".format(
name, target, result
)
)
def decomposition_equivalence_4D(dims, nreps, nbins, data_ranges, data):
try:
# creating the object and adding data
it_par = infotheory.InfoTools(dims, nreps)
it_par.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
it_par.add_data(data)
# PID-ing
total_mi = it_par.mutual_info([1, 1, 1, 0])
redundant_info = it_par.redundant_info([1, 2, 3, 0])
unique_1 = it_par.unique_info([1, 2, 3, 0])
unique_2 = it_par.unique_info([2, 1, 3, 0])
unique_3 = it_par.unique_info([2, 3, 1, 0])
synergy = it_par.synergy([1, 2, 3, 0])
targets = [total_mi, redundant_info, unique_1, unique_2, unique_3, synergy]
# Alternate PID-ing
total_mi = it_par.mutual_info([1, 1, 1, 0])
redundant_info = it_par.redundant_info([2, 1, 3, 0])
unique_1 = it_par.unique_info([1, 3, 2, 0])
unique_2 = it_par.unique_info([3, 1, 2, 0])
unique_3 = it_par.unique_info([3, 2, 1, 0])
synergy = it_par.synergy([2, 1, 3, 0])
base_str = "Decomposition equivalence | "
do_matching(base_str, total_mi, targets[0], "Total MI")
do_matching(base_str, redundant_info, targets[1], "Redundant info | ")
do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ")
do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ")
do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ")
do_matching(base_str, synergy, targets[5], "Synergistic info | ")
except Exception as e:
_except(e)
def decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets):
""" testing if 4D PID matches expected values """
try:
# creating the object and adding data
it_par = infotheory.InfoTools(dims, nreps)
it_par.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
it_par.add_data(data)
# PID-ing
total_mi = it_par.mutual_info([1, 1, 1, 0])
redundant_info = it_par.redundant_info([1, 2, 3, 0])
unique_1 = it_par.unique_info([1, 2, 3, 0])
unique_2 = it_par.unique_info([2, 1, 3, 0])
unique_3 = it_par.unique_info([2, 3, 1, 0])
synergy = it_par.synergy([1, 2, 3, 0])
results = [total_mi, redundant_info, unique_1, unique_2, unique_3, synergy]
base_str = "Decomposition test | "
do_matching(base_str, total_mi, targets[0], "Total MI")
do_matching(base_str, redundant_info, targets[1], "Redundant info | ")
do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ")
do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ")
do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ")
do_matching(base_str, synergy, targets[5], "Synergistic info | ")
except Exception as e:
_except(e)
def pid_test_3D(dims, nreps, nbins, data_ranges, data):
""" testing sum of pid == total_mi """
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(data)
# estimating mutual information
mi = it.mutual_info([1, 1, 0])
redundant_info = it.redundant_info([1, 2, 0])
unique_1 = it.unique_info([1, 2, 0])
unique_2 = it.unique_info([2, 1, 0])
synergy = it.synergy([1, 2, 0])
# total_pid
total_pid = np.sum(
np.round([redundant_info, unique_1, unique_2, synergy], decimals=6)
)
# mi
total_mi = np.round(mi, decimals=6)
if (total_pid - total_mi) < 1e-5:
print(total_pid, total_mi, SUCCESS)
else:
raise Exception(
"Total PID does not equal MI: total_mi = {}; total_pid = {}".format(
total_pid, total_mi
)
)
except Exception as e:
_except(e)
def decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data):
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(data)
# estimating mutual information
redundant_info_1 = it.redundant_info([1, 2, 0])
synergy_1 = it.synergy([1, 2, 0])
redundant_info_2 = it.redundant_info([2, 1, 0])
synergy_2 = it.synergy([2, 1, 0])
base_str = "Decomposition equivalence | "
do_matching(base_str, redundant_info_1, redundant_info_2, "Redundant info | ")
do_matching(base_str, synergy_1, synergy_2, "Synergy | ")
except Exception as e:
_except(e)
def decomposition_test_3D(dims, nreps, nbins, data_ranges, data, results):
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(data)
# estimating mutual information
redundant_info = it.redundant_info([1, 2, 0])
unique_1 = it.unique_info([1, 2, 0])
unique_2 = it.unique_info([2, 1, 0])
synergy = it.synergy([1, 2, 0])
if all(
np.round([redundant_info, unique_1, unique_2, synergy], decimals=2)
== results
):
print(synergy, SUCCESS)
else:
raise Exception("PID computation error")
except Exception as e:
_except(e)
def uniform_random_mi_test(dims, nreps, nbins, data_ranges, num_samples=1000):
print(
"Testing mutual info with uniform random variables. MI = ", end="", flush=True
)
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(np.random.rand(num_samples, dims))
# ...alternatively,
# for _ in range(num_samples):
# it.add_data_point(np.random.rand(dims))
# estimating mutual information
mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins)))
print(mi, SUCCESS)
except Exception as e:
print(e)
_except(e)
def identical_random_mi_test(
dims, nreps, nbins, data_ranges, add_noise=False, num_samples=1000
):
print("Testing mutual info with identical random variables", end="", flush=True)
if add_noise:
print(" with noise. MI = ", end="", flush=True)
else:
print(". MI = ", end="", flush=True)
try:
# creating the object
if dims % 2 != 0:
dims += 1
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
p_dims = int(dims / 2)
# adding points
for _ in range(num_samples):
point1 = np.random.rand(p_dims)
if add_noise:
point2 = point1 + (np.random.rand(p_dims) / 30)
else:
point2 = point1
it.add_data_point(np.concatenate((point1, point2)))
# computing mutual information
mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins)))
print(mi, SUCCESS)
except Exception as e:
_except(e)
def entropy_test(dims, nreps, nbins, data_ranges, data_sampler, num_samples=1000):
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
for _ in range(num_samples):
it.add_data_point([data_sampler()])
# estimate entropy
print(it.entropy([0]), SUCCESS)
except Exception as e:
_except(e)
def test_pid_4D():
""" Testing
3D PI-decomposition
1. sanity for each PI measure
2. known PIDs for even parity
"""
print("\n" + bcolors.TEST_HEADER + "PID-4D" + bcolors.ENDC)
## Testing PID by value
dims = 4
nreps = 0
nbins = [2] * dims
data_ranges = [[0] * dims, [1] * dims]
# Even parity check
data = [
[0, 0, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
]
targets = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
print("Testing PID with even parity checker")
decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets)
# random data
print("Testing PID with uniform random data")
dims = 4
neps = 0
nbins = [50] * dims
data_ranges = [[0] * dims, [1] * dims]
data = np.random.rand(5000, dims)
decomposition_equivalence_4D(dims, nreps, nbins, data_ranges, data)
def test_pid_3D():
""" Testing
1. sum(PID) == mi
2. known PIDs for logic gates
3. synergy([0,1,2]) == synergy([0,2,1])?
"""
print("\n" + bcolors.TEST_HEADER + "PID-3D" + bcolors.ENDC)
## Testing PID by value
dims = 3
neps = 0
nbins = [2] * dims
data_ranges = [[0] * dims, [1] * dims]
# AND gate
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
print("Testing total PID with total mi | AND gate = ", end="", flush=True)
pid_test_3D(dims, nreps, nbins, data_ranges, data)
# XOR gate
data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
print("Testing total PID with total mi | XOR gate = ", end="", flush=True)
pid_test_3D(dims, nreps, nbins, data_ranges, data)
# random data
dims = 3
neps = 0
nbins = [50] * 3
data_ranges = [[0] * 3, [1] * 3]
data = np.random.rand(500, dims)
print("Testing total PID with total mi | random data = ", end="", flush=True)
pid_test_3D(dims, nreps, nbins, data_ranges, data)
## Testing PI decomposition
dims = 3
neps = 0
nbins = [2] * 3
data_ranges = [[0] * 3, [1] * 3]
# AND gate
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
print("Testing decomposition with AND gate = ", end="", flush=True)
decomposition_test_3D(dims, nreps, nbins, data_ranges, data, [0.31, 0.0, 0.0, 0.5])
# XOR gate
data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
print("Testing decomposition with XOR gate = ", end="", flush=True)
decomposition_test_3D(dims, nreps, nbins, data_ranges, data, [0.0, 0.0, 0.0, 1.0])
## Testing decomposition equivalence
dims = 3
neps = 0
nbins = [2] * 3
data_ranges = [[0] * 3, [1] * 3]
# AND gate
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
print("Testing redundant and synergistic equivalence | AND gate")
decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data)
# XOR gate
data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
print("Testing redundant and synergistic equivalence | XOR gate")
decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data)
# random data
dims = 3
neps = 0
nbins = [50] * 3
data_ranges = [[0] * 3, [1] * 3]
data = np.random.rand(500, dims)
print("Testing redundant and synergistic equivalence | random data")
decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data)
def test_mutual_info(dims, nreps, nbins, data_ranges):
""" Testing mutual information under three conditions
1. two uniform random variables (low MI)
2. two identical random variables (high MI)
3. one ranom variable and a noisy version of the same (medium MI)
"""
print("\n" + bcolors.TEST_HEADER + "MUTUAL INFORMATION" + bcolors.ENDC)
uniform_random_mi_test(dims, nreps, nbins, data_ranges)
identical_random_mi_test(dims, nreps, nbins, data_ranges, add_noise=False)
identical_random_mi_test(dims, nreps, nbins, data_ranges, add_noise=True)
def test_entropy(dims, nreps, nbins, data_ranges):
""" Testing entropy under two conditions
1. A uniform random variable (high entropy)
2. A gaussian with low std. dev. (low entropy)
"""
print("\n" + bcolors.TEST_HEADER + "ENTROPY" + bcolors.ENDC)
print("Testing entropy with uniform distribution = ", end="", flush=True)
entropy_test(dims, nreps, nbins, data_ranges, lambda: np.random.uniform())
print("Testing entropy with normal distribution = ", end="", flush=True)
entropy_test(
dims, nreps, nbins, data_ranges, lambda: np.random.normal(loc=0.5, scale=0.01)
)
def test_binning(dims, nreps, nbins, data_ranges):
""" Test execution of both types of binning
1. Equal interval
2. Manual specification
"""
print("\n" + bcolors.TEST_HEADER + "BINNING" + bcolors.ENDC)
mi_eq = mi_mb = None
# resetting for this test
dims = 2
# generating a commong set of datapoints
datapoints = []
for _ in range(1000):
point1 = np.random.rand()
point2 = point1 + (np.random.rand() / 30)
datapoints.append([point1, point2])
# Equal interval binning
try:
print("Estimating MI using equal interval binning = ", end="", flush=True)
it = infotheory.InfoTools(dims, nreps)
# set bin boundaries
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(datapoints)
# computing mutual information
mi_eq = it.mutual_info([0, 1])
print(mi_eq, SUCCESS)
except Exception as e:
_except(e)
# Manual binning
try:
print("Estimating MI using manually specified binning = ", end="", flush=True)
it = infotheory.InfoTools(dims, nreps)
# set bin boundaries
it.set_bin_boundaries([[0.3333, 0.6666], [0.3333, 0.6666]])
# adding points
it.add_data(datapoints)
# computing mutual information
mi_mb = it.mutual_info([0, 1])
print(mi_mb, SUCCESS)
except Exception as e:
_except(e)
# mi_eq == mi_mb?
print(
"Tested both binning methods. Difference in result = {}".format(mi_eq - mi_mb),
SUCCESS,
)
def test_creation(dims, nreps, nbins, data_ranges):
print("Testing creating an object. ", end="", flush=True)
try:
# creating object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
print(bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC)
except Exception as e:
_except(e)
def run_tests(dims, nreps, nbins, data_ranges):
""" runs all tests """
print(bcolors.HEADER + "************ Starting tests ************" + bcolors.ENDC)
test_creation(dims, nreps, nbins, data_ranges)
test_binning(dims, nreps, [3, 3], data_ranges)
test_entropy(1, nreps, [50], [[0], [1]])
test_mutual_info(dims, nreps, nbins, data_ranges)
test_pid_3D()
test_pid_4D()
print(
"\n"
+ bcolors.HEADER
+ "************ Tests completed ************"
+ bcolors.ENDC
)
def manual_test(m, n):
it = infotheory.InfoTools(2, 1, [2, 2], [0, 0], [1, 1])
it.add_data([[0, 0]] * m + [[1, 1]] * n)
print("m = ", m, " n = ", n, " MI = ", it.mutual_info([0, 1]))
if __name__ == "__main__":
dims = 2
nreps = 0
nbins = [50] * dims
data_ranges = [[0] * dims, [1] * dims]
# for m,n in zip([1,2,2,3,500,499,200],[1,1,2,2,500,500,500]):
# manual_test(m,n)
run_tests(dims, nreps, nbins, data_ranges)
|
1615893
|
import numpy as np
from skimage.io import imread
# import pdb
def add_patch(img,trigger):
flag=False
if img.max()>1.:
img=img/255.
flag=True
if trigger.max()>1.:
trigger=trigger/255.
# x,y=np.random.randint(10,20,size=(2,))
x,y = np.random.choice([3, 28]), np.random.choice([3, 28])
m,n,_=trigger.shape
#img[x-int(m/2):x+m-int(m/2),y-int(n/2):y+n-int(n/2),:]=img[x-int(m/2):x+m-int(m/2),
# y-int(n/2):y+n-int(n/2),:]*(1-trigger)+trigger
img[x-int(m/2):x+m-int(m/2),y-int(n/2):y+n-int(n/2),:]=trigger # opaque trigger
if flag:
img=(img*255).astype('uint8')
return img
def generate_poisoned_data(X_train,Y_train,source,target, trigger):
ind=np.argwhere(Y_train==source)
Y_poisoned=target*np.ones((ind.shape[0])).astype(int)
# k=np.random.randint(6,11)
# trigger=imread('Data/Masks_Test_5/mask%1d.bmp'%(k))
# pdb.set_trace()
X_poisoned=np.stack([add_patch(X_train[i,...],trigger) for i in ind.squeeze()],0)
return X_poisoned,Y_poisoned,trigger,ind.squeeze()
|
1615902
|
import torch
import torch.nn as nn
from torch import optim
import numpy as np
import nltk
class TreeRecursiveEduNN(nn.Module):
def __init__(self, embed_dict, glove, embed_size, glove_size, hidden_size, use_relations=True):
super(TreeRecursiveEduNN, self).__init__()
self.glove = glove
self.embed_dict = embed_dict
self.embed_size = embed_size
self.hidden_size = hidden_size
self.glove_size = glove_size
self.embeddings = self.init_embeddings()
self.use_relations = use_relations
self.Wforget = nn.Linear(embed_size, hidden_size, bias=True)
self.Uforget_l_l = nn.Linear(hidden_size, hidden_size, bias=False)
self.Uforget_l_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.Uforget_r_l = nn.Linear(hidden_size, hidden_size, bias=False)
self.Uforget_r_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.Winput = nn.Linear(embed_size, hidden_size, bias=True)
self.Uinput_l = nn.Linear(hidden_size, hidden_size, bias=False)
self.Uinput_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.Woutput = nn.Linear(embed_size, hidden_size, bias=True)
self.Uoutput_l = nn.Linear(hidden_size, hidden_size, bias=False)
self.Uoutput_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.Wupdate = nn.Linear(embed_size, hidden_size, bias=True)
self.Uupdate_l = nn.Linear(hidden_size, hidden_size, bias=False)
self.Uupdate_r = nn.Linear(hidden_size, hidden_size, bias=False)
self.tree2scores = nn.Linear(hidden_size * 2, 3, bias=True)
self.edu_lstm = nn.LSTM(glove_size, hidden_size, num_layers=1, batch_first=True)
def forward(self, input_tree):
root_hidden_output = self.forward_recurse(input_tree)
return self.tree2scores(root_hidden_output)[0], root_hidden_output
def forward_recurse(self, input_tree):
if (input_tree.left_child is None):
return self.compute_edu_embeddings(input_tree)
else:
l_child_hidden_state, l_child_cell = self.forward_recurse(input_tree.left_child)
r_child_hidden_state, r_child_cell = self.forward_recurse(input_tree.right_child)
# Embedding for the current discourse role (node)
mononuclear = ["Joint", "Contrast", "TextualOrganization", "Same-Unit"]
if (input_tree.role == 'Root'):
return torch.cat((l_child_hidden_state, r_child_hidden_state), 2)
elif (input_tree.rel_type in mononuclear):
if self.use_relations:
root_embedding = self.embeddings(self.embed_dict[input_tree.rel_type])
else:
root_embedding = self.embeddings(self.embed_dict['Nucleus'])
else:
if self.use_relations:
root_embedding = self.embeddings(self.embed_dict[input_tree.rel_type + "_" + input_tree.role])
else:
root_embedding = self.embeddings(self.embed_dict[input_tree.role])
# RNN gates
forget_gate_left = torch.sigmoid(self.Wforget(root_embedding) + self.Uforget_l_l(l_child_hidden_state) + self.Uforget_l_r(r_child_hidden_state))
forget_gate_right = torch.sigmoid(self.Wforget(root_embedding) + self.Uforget_r_l(l_child_hidden_state)
+ self.Uforget_r_r(r_child_hidden_state))
input_gate = torch.sigmoid(self.Winput(root_embedding) + self.Uinput_l(l_child_hidden_state)
+ self.Uinput_r(r_child_hidden_state))
output_gate = torch.sigmoid(self.Woutput(root_embedding) + self.Uoutput_l(l_child_hidden_state)
+ self.Uoutput_r(r_child_hidden_state))
update_gate = torch.tanh(self.Wupdate(root_embedding) + self.Uupdate_l(l_child_hidden_state)
+ self.Uupdate_r(r_child_hidden_state))
cell = input_gate * update_gate + forget_gate_left * l_child_cell + forget_gate_right * r_child_cell
hidden = output_gate * torch.tanh(cell)
return hidden, cell
def init_embeddings(self):
return nn.Embedding(len(self.embed_dict), self.embed_size)
def compute_edu_embeddings(self, tree):
_, edu_hid_cell_tuple = self.edu_lstm(self.construct_edu_embeddings(tree.edu_text))
return edu_hid_cell_tuple
def construct_edu_embeddings(self, edu_text):
edu_words = nltk.word_tokenize(edu_text)
matrix_len = len(edu_words)
weights_matrix = np.zeros((matrix_len, self.glove_size))
for i, word in enumerate(edu_words):
try:
weights_matrix[i] = self.glove[word.lower()]
except KeyError:
weights_matrix[i] = np.zeros(self.glove_size)
return torch.FloatTensor([weights_matrix])
|
1615928
|
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
datapath = '../util/stock_dfs/'
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def ret(x, y):
return np.log(y/x)
def get_zscore(x):
return (x -x.mean())/x.std()
def make_inputs(filepath):
D = pd.read_csv(filepath).set_index('Date')
#D.index = pd.to_datetime(D.index,format='%Y-%m-%d') # Set the indix to a datetime
Res = pd.DataFrame()
ticker = get_ticker(filepath)
Res['c_2_o'] = get_zscore(ret(D.Open,D.Close))
Res['h_2_o'] = get_zscore(ret(D.Open,D.High))
Res['l_2_o'] = get_zscore(ret(D.Open,D.Low))
Res['c_2_h'] = get_zscore(ret(D.High,D.Close))
Res['h_2_l'] = get_zscore(ret(D.High,D.Low))
Res['c1_c0'] = ret(D.Close,D.Close.shift(-1)).fillna(0) #Tommorows return
Res['vol'] = get_zscore(D.Volume)
Res['ticker'] = ticker
return Res
def merge_all_data(datapath):
all = pd.DataFrame()
for f in os.listdir(datapath):
filepath = os.path.join(datapath,f)
if filepath.endswith('.csv'):
print(filepath)
Res = make_inputs(filepath)
all = all.append(Res)
return all
def embed(df, str):
"str: choice of return, class, multi_class"
pivot_columns = df.columns[:-1]
P = df.pivot_table(index=df.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
mi = P.columns.tolist()
new_ind = pd.Index(e[1] + '_' + e[0] for e in mi)
P.columns = new_ind
clean_and_flat = P.dropna(axis=1)
target_cols = list(filter(lambda x: 'c1_c0' in x, clean_and_flat.columns.values))
input_cols = list(filter(lambda x: 'c1_c0' not in x, clean_and_flat.columns.values))
inputDF = clean_and_flat[input_cols]
targetDF = clean_and_flat[target_cols]
TotalReturn = ((1 - np.exp(targetDF)).sum(axis=1)) / len(targetDF.columns) # If i put one dollar in each stock at the close, this is how much I'd get back
Labeled = pd.DataFrame()
Labeled['return'] = TotalReturn
Labeled['class'] = TotalReturn.apply(labeler, 1)
Labeled['multi_class'] = pd.qcut(TotalReturn, 11, labels=range(11))
pd.qcut(TotalReturn, 5).unique()
return inputDF, Labeled[str]
def labeler(x):
if x>0.0029:
return 1
if x<-0.00462:
return -1
else:
return 0
'''
if __name__ == "__main__":
all = merge_all_data(datapath)
inputdf, targetdf = embed(all)
labeled = process_target(targetdf)
print(inputdf.head())
print(labeled.head())
'''
|
1615940
|
import argparse
import os
import shlex
import unittest
from gooey.gui import formatters
class TestFormatters(unittest.TestCase):
def test_counter_formatter(self):
"""
Should return the first option repeated N times
None if N is unspecified
Issue #316 - using long-form argument caused formatter to produce incorrect output
"""
expected_outputs = [
(['-v', '--verbose'], '-v', 1),
(['-v', '--verbose'], '-v -v', 2),
(['-v', '--verbose'], '-v -v -v', 3),
(['-v', '--verbose'], '', 0),
# ensuring that log-forms are handled correctly
(['--verbose', '-v'], '--verbose', 1),
(['--verbose', '-v'], '--verbose --verbose', 2),
(['--verbose', '-v'], '--verbose --verbose --verbose', 3),
# single args
(['-v'], '-v', 1),
(['-v'], '-v -v', 2),
(['--verbose'], '--verbose', 1),
# bad inputs
(['-v'], None, None),
(['-v'], None, 'some-garbage'),
(['-v'], None, 'af3gd'),
]
for commands, expected, vebosity_level in expected_outputs:
result = formatters.counter({'commands': commands}, vebosity_level)
self.assertEqual(result, expected)
# make sure that argparse actually accepts it as valid.
if result:
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='count')
parser.parse_args(result.split())
def test_multifilechooser_formatter(self):
"""
Should return files (quoted), separated by spaces if there is more
than one, preceeded by optional command if the argument is optional.
Assumes the argument has been created with some form of nargs, which
only makes sense for possibly choosing multiple values.
"""
# Helper function to generalize the variants we need to test
def multifilechooser_helper(names):
# Note that the MultiFileChooser widget produces a single string with
# paths separated by os.pathsep.
if names:
prefix = names[0] + ' '
else:
prefix = ''
expected_outputs = [
(names, None, ''),
(names, prefix + '"abc"', 'abc'),
(names, prefix + '"abc" "def"', os.pathsep.join(['abc', 'def'])),
# paths with spaces
(names, prefix + '"a b c"', 'a b c'),
(names, prefix + '"a b c" "d e f"', os.pathsep.join(['a b c', 'd e f'])),
]
for commands, expected, widget_result in expected_outputs:
result = formatters.multiFileChooser({'commands': commands}, widget_result)
self.assertEqual(result, expected)
# make sure that argparse actually accepts it as valid.
if result:
parser = argparse.ArgumentParser()
if not names:
names = ["file"]
parser.add_argument(names[0], nargs='+')
parser.parse_args(shlex.split(result))
# Positional argument, with nargs
multifilechooser_helper([])
# Optional argument, with nargs
multifilechooser_helper(["-f", "--file"])
|
1615966
|
import torch.nn as nn
from timm.models.layers import trunc_normal_
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
num_layers=2,
act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.layers = nn.ModuleList(
[nn.Linear(hidden_features if i != 0 else in_features,
hidden_features if i != num_layers - 1 else out_features
) for i in range(num_layers)]
)
self.act = act_layer()
self.drop = nn.Dropout(drop)
def forward(self, x):
for i, linear in enumerate(self.layers):
x = linear(x)
if i != len(self.layers) - 1:
x = self.act(x)
x = self.drop(x)
return x
class MlpHead(nn.Module):
def __init__(self, dim, W, H):
super(MlpHead, self).__init__()
self.cls_mlp = Mlp(dim, out_features=1, num_layers=3)
self.reg_mlp = Mlp(dim, out_features=4, num_layers=3)
self.W = W
self.H = H
self.reset_parameters()
def reset_parameters(self):
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
self.apply(_init_weights)
def forward(self, x):
'''
Args:
x (torch.Tensor): (B, H * W, C) input feature map
Returns:
Dict: {
'cls_score' (torch.Tensor): (B, 1, H, W)
'bbox' (torch.Tensor): (B, H, W, 4)
}
'''
cls = self.cls_mlp(x)
reg = self.reg_mlp(x).sigmoid()
class_score = cls
B, L, C = class_score.shape
class_score = class_score.view(B, self.H, self.W, C)
class_score = class_score.permute(0, 3, 1, 2)
class_score = class_score.sigmoid()
bbox = reg.view(B, self.H, self.W, 4)
return {'class_score': class_score, 'bbox': bbox}
def build_single_scale_mlp_head(head_parameters, shape):
return MlpHead(head_parameters['dim'], shape[0], shape[1])
def build_mlp_head(network_config, with_multi_scale_wrapper):
head_config = network_config['head']
assert head_config['type'] == 'Mlp'
head_parameters = head_config['parameters']
if 'scales' not in head_parameters:
shape = head_config['output_protocol']['parameters']['label']
return build_single_scale_mlp_head(head_parameters, shape['size'])
else:
shapes = head_config['output_protocol']['parameters']['label']['scales']
heads = [build_single_scale_mlp_head(single_scale_head_parameters, shape['size']) for single_scale_head_parameters, shape in zip(head_parameters['scales'], shapes)]
if with_multi_scale_wrapper:
from .multi_scale_head import MultiScaleHead
return MultiScaleHead(heads)
else:
return heads
|
1615974
|
import argparse
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.datasets import cifar10
import numpy as np
import os
parser = argparse.ArgumentParser(description='DeepJudge Seed Selection Process')
parser.add_argument('--model', required=True, type=str, help='victim model path')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset for the seed selection')
parser.add_argument('--num', default=1000, type=int, help='number of selected seeds')
parser.add_argument('--order', default='max', type=str, help='largest certainties or least. choice: max/min')
parser.add_argument('--output', default='./seeds', type=str, help='seeds saved dir')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def seedSelection(model, x, y, num=1000, order='max'):
true_idx = np.where(np.argmax(model(x), axis=1) == np.argmax(y, axis=1))[0]
x, y = x[true_idx], y[true_idx]
ginis = np.sum(np.square(model(x).numpy()), axis=1)
if order == 'max':
ranks = np.argsort(-ginis)
else:
ranks = np.argsort(ginis)
return x[ranks[:num]], y[ranks[:num]]
if __name__ == '__main__':
opt = parser.parse_args()
if opt.dataset == 'cifar10':
cifar10 = tf.keras.datasets.cifar10
(training_images, training_labels), (test_images, test_labels) = cifar10.load_data()
elif opt.dataset == 'mnist':
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
test_images = test_images.reshape(10000, 28, 28, 1)
else:
raise NotImplementedError()
# select seeds from the testing dataset
x_test = test_images / 255.0
y_test = tf.keras.utils.to_categorical(test_labels, 10)
victim_model = load_model(opt.model)
seeds_x, seeds_y = seedSelection(victim_model, x_test, y_test, num=opt.num, order=opt.order)
log_dir = opt.output
if not os.path.exists(log_dir):
os.makedirs(log_dir)
save_path = f"{log_dir}/{opt.dataset}_{opt.order}_{opt.num}seeds.npz"
np.savez(save_path, seeds_x=seeds_x, seeds_y=seeds_y)
print('Selected seeds saved at ' + save_path)
|
1616007
|
from __future__ import annotations
import asyncio
import typing
from ctc import spec
from ... import management
from ... import connect_utils
from ... import intake_utils
from . import blocks_statements
from ..block_timestamps import block_timestamps_statements
async def async_intake_block(
block: spec.Block,
network: spec.NetworkReference,
) -> None:
"""intake block and extract relevant information to db tables
under normal operation should store raw block or block timestamp, noth both
"""
block_coroutine = async_intake_raw_block(
block=block,
network=network,
)
timestamp_coroutine = async_intake_block_timestamp(
block=block,
network=network,
)
await asyncio.gather(
block_coroutine,
timestamp_coroutine,
)
async def async_intake_raw_block(
block: spec.Block,
network: spec.NetworkReference,
) -> None:
# check whether to intake
if not management.get_active_schemas().get('blocks'):
return
if not await intake_utils.async_is_block_fully_confirmed(
block=block, network=network
):
return
# store in db
engine = connect_utils.create_engine(
schema_name='block_timestamps',
network=network,
)
if engine is None:
return
with engine.begin() as conn:
await blocks_statements.async_upsert_block(
conn=conn,
block=block,
network=network,
)
async def async_intake_block_timestamp(
block: spec.Block | None,
*,
block_number: int | None = None,
timestamp: int | None = None,
network: spec.NetworkReference,
) -> None:
if block_number is None or timestamp is None:
if block is None:
raise Exception('must specify block or block_number and timestamp')
block_number = block['number']
timestamp = block['timestamp']
# check whether to intake
if not management.get_active_schemas().get('block_timestamps'):
return
if not await intake_utils.async_is_block_fully_confirmed(
block=block_number, network=network
):
return
# store in db
engine = connect_utils.create_engine(
schema_name='block_timestamps',
network=network,
)
if engine is None:
return
with engine.begin() as conn:
await block_timestamps_statements.async_upsert_block_timestamp(
conn=conn,
block_number=block_number,
timestamp=timestamp,
)
async def async_intake_blocks(
blocks: typing.Sequence[spec.Block],
network: spec.NetworkReference,
) -> None:
blocks_coroutine = async_intake_raw_blocks(blocks=blocks, network=network)
timestamps_coroutine = async_intake_block_timestamps(
blocks=blocks, network=network
)
await asyncio.gather(blocks_coroutine, timestamps_coroutine)
async def async_intake_raw_blocks(
blocks: typing.Sequence[spec.Block],
network: spec.NetworkReference,
) -> None:
if not management.get_active_schemas().get('blocks'):
return
confirmed = await intake_utils.async_filter_fully_confirmed_blocks(
blocks=blocks,
network=network,
)
if len(confirmed) > 0:
engine = connect_utils.create_engine(schema_name='blocks', network=network)
if engine is None:
return
with engine.begin() as conn:
await blocks_statements.async_upsert_blocks(
conn=conn,
blocks=confirmed,
network=network,
)
async def async_intake_block_timestamps(
blocks: typing.Sequence[spec.Block] | None = None,
*,
block_timestamps: typing.Mapping[int, int] | None = None,
network: spec.NetworkReference,
) -> None:
if blocks is not None and block_timestamps is not None:
raise Exception('cannot specify both blocks and block_timestamps')
if not management.get_active_schemas().get('block_timestamps'):
return
# determine which blocks have enough confirmations
if blocks is not None:
confirmed_blocks = (
await intake_utils.async_filter_fully_confirmed_blocks(
blocks=blocks,
network=network,
)
)
if len(confirmed_blocks) == 0:
return
confirmed_block_timestamps = None
elif block_timestamps is not None:
confirmed_numbers = (
await intake_utils.async_filter_fully_confirmed_blocks(
blocks=list(block_timestamps.keys()),
network=network,
)
)
if len(confirmed_numbers) == 0:
return
confirmed_block_timestamps = {
number: block_timestamps[number] for number in confirmed_numbers
}
confirmed_blocks = None
else:
raise Exception('specify either blocks or block_timestamps')
# store in database
engine = connect_utils.create_engine(
schema_name='block_timestamps',
network=network,
)
if engine is None:
return
with engine.begin() as conn:
await block_timestamps_statements.async_upsert_block_timestamps(
conn=conn,
blocks=confirmed_blocks,
block_timestamps=confirmed_block_timestamps,
)
#
# # second draft
#
# async def async_intake_blocks(
# blocks: typing.Sequence[spec.Block],
# provider: spec.ProviderSpec = None,
# ) -> None:
# """intake block and extract relevant information to db tables"""
# active_block_schemas = get_active_schemas('block')
# if len(active_block_schemas) == 0:
# return
# intake_blocks = await async_should_intake_blocks(
# blocks=blocks, provider=provider
# )
# if len(intake_blocks) > 0:
# with engine.begin() as conn:
# coroutines = []
# for schema in active_block_schemas:
# if schema == 'blocks':
# coroutine = db_statements.async_upsert_blocks(
# conn=conn,
# blocks=intake_blocks,
# )
# coroutines.append(coroutine)
# elif schema == 'block_timestamps':
# coroutine = db_statements.async_upsert_blocks_timestamp(
# conn=conn,
# blocks=should_upsert_block_timestamps,
# )
# coroutines.append(coroutine)
# elif schema == 'block_gas_stats':
# coroutine = db_statements.async_upsert_blocks_gas_stats(
# conn=conn,
# blocks=should_upsert_blocks_gas_stats,
# )
# coroutines.append(coroutine)
# else:
# raise Exception('unknown schema: ' + str(schema))
# await asyncio.gather(*coroutines)
# async def async_should_intake_raw_block(block, network):
# # check whether already stored
# db_statements.does_block_exist()
# async def async_should_intake_blocks(blocks, provider):
# required_confirmations = management.get_required_confirmations(
# provider=provider
# )
# latest_block = None
# latest_block = await rpc.async_eth_block_number(provider=provider)
# intake_blocks = []
# for block in blocks:
# pass
# if block['number'] <= max_block - required_confirmations:
# return True
# else:
# latest_block = await rpc.async_eth_block_number(provider=provider)
# return block['number'] <= latest_block - min_confirmations
##
## # old
##
# async def async_intake_block(
# block: spec.Block,
# provider: spec.ProviderSpec = None,
# ) -> None:
# """intake block and extract relevant information to db tables"""
# # determine whether to store block
# network = rpc.get_provider_network(provider=provider)
# min_confirmations = management.get_min_confirmations(
# schema='block_timestamps',
# network=network,
# )
# engine = connect_utils.create_engine(
# schema='block_timestamps',
# network=network,
# )
# if engine is None:
# return
# check_if_exists = False
# with engine.connect() as conn:
# if (
# check_if_exists
# and db_statements.get_block_timestamp(
# conn=conn, block_number=block['number']
# )
# is not None
# ):
# store = False
# else:
# max_block = db_statements.get_max_block_number(conn=conn, network=network)
# if block['number'] <= max_block - min_confirmations:
# store = True
# else:
# latest_block = await rpc.async_eth_block_number(
# provider=provider,
# )
# store = block['number'] <= latest_block - min_confirmations
# # store data in db
# if store:
# with engine.begin() as conn:
# db_statements.set_block_timestamp(
# conn=conn,
# block_number=block['number'],
# timestamp=block['timestamp'],
# )
# async def async_intake_blocks(
# blocks: typing.Sequence[spec.Block],
# provider: spec.ProviderSpec = None,
# ) -> None:
# """
# TODO: database should store a max_complete_block number
# - indicates that ALL blocks below this height are stored
# - enables not re-storing anything below this height upon intake
# """
# # determine whether to store block
# network = rpc.get_provider_network(provider=provider)
# min_confirmations = management.get_min_confirmations(
# schema='block_timestamps',
# network=network,
# )
# engine = connect_utils.create_engine(
# schema='block_timestamps',
# network=network,
# )
# if engine is None:
# return
# with engine.connect() as conn:
# max_intake_block = max(block['number'] for block in blocks)
# max_stored_block = db_statements.get_max_block_number(
# conn=conn, network=network
# )
# if max_intake_block <= max_stored_block - min_confirmations:
# store_blocks = blocks
# else:
# latest_block = await rpc.async_eth_block_number(
# provider=provider,
# )
# store_blocks = [
# block
# for block in blocks
# if block['number'] <= latest_block - min_confirmations
# ]
# # store data in db
# if len(store_blocks) > 0:
# block_timestamps = {
# block['number']: block['timestamp'] for block in store_blocks
# }
# with engine.begin() as conn:
# db_statements.set_block_timestamps(
# conn=conn,
# block_timestamps=block_timestamps,
# )
|
1616044
|
import os.path
from datetime import datetime
from unittest import mock
from unittest.mock import MagicMock
import chardet
import tablib
from core.admin import (
AuthorAdmin,
BookAdmin,
BookResource,
CustomBookAdmin,
ImportMixin,
)
from core.models import Author, Book, Category, EBook, Parent
from django.contrib.admin.models import DELETION, LogEntry
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest
from django.test.testcases import TestCase
from django.test.utils import override_settings
from django.utils.translation import gettext_lazy as _
from tablib import Dataset
from import_export import formats
from import_export.admin import (
ExportActionMixin,
ExportActionModelAdmin,
ExportMixin,
ImportExportActionModelAdmin,
)
from import_export.formats.base_formats import DEFAULT_FORMATS
from import_export.tmp_storages import TempFolderStorage
class ImportExportAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '<EMAIL>',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.client.login(username='admin', password='password')
def test_import_export_template(self):
response = self.client.get('/admin/core/book/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'admin/import_export/change_list_import_export.html')
self.assertContains(response, _('Import'))
self.assertContains(response, _('Export'))
@override_settings(TEMPLATE_STRING_IF_INVALID='INVALID_VARIABLE')
def test_import(self):
# GET the import form
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books.csv')
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, Book._meta.verbose_name_plural)
)
def test_delete_from_admin(self):
# test delete from admin site (see #432)
# create a book which can be deleted
b = Book.objects.create(id=1)
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-for-delete.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
# check the LogEntry was created as expected
deleted_entry = LogEntry.objects.latest('id')
self.assertEqual("delete through import_export", deleted_entry.change_message)
self.assertEqual(DELETION, deleted_entry.action_flag)
self.assertEqual(b.id, int(deleted_entry.object_id))
self.assertEqual("", deleted_entry.object_repr)
@override_settings(TEMPLATE_STRING_IF_INVALID='INVALID_VARIABLE')
def test_import_mac(self):
# GET the import form
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-mac.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books-mac.csv')
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, Book._meta.verbose_name_plural)
)
def test_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
data = {
'file_format': '0',
}
date_str = datetime.now().strftime('%Y-%m-%d')
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Book-{}.csv"'.format(date_str)
)
def test_returns_xlsx_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
for i, f in enumerate(DEFAULT_FORMATS):
if f().get_title() == 'xlsx':
xlsx_index = i
break
else:
self.fail('Unable to find xlsx format. DEFAULT_FORMATS: %r' % DEFAULT_FORMATS)
data = {'file_format': str(xlsx_index)}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'],
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
def test_import_export_buttons_visible_without_add_permission(self):
# issue 38 - Export button not visible when no add permission
original = BookAdmin.has_add_permission
BookAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/book/')
BookAdmin.has_add_permission = original
self.assertContains(response, _('Export'))
self.assertContains(response, _('Import'))
def test_import_buttons_visible_without_add_permission(self):
# When using ImportMixin, users should be able to see the import button
# without add permission (to be consistent with ImportExportMixin)
original = AuthorAdmin.has_add_permission
AuthorAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/author/')
AuthorAdmin.has_add_permission = original
self.assertContains(response, _('Import'))
self.assertTemplateUsed(response, 'admin/import_export/change_list.html')
def test_import_file_name_in_tempdir(self):
# 65 - import_file_name form field can be use to access the filesystem
import_file_name = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
data = {
'input_format': "0",
'import_file_name': import_file_name,
'original_file_name': 'books.csv'
}
with self.assertRaises(FileNotFoundError):
self.client.post('/admin/core/book/process_import/', data)
def test_csrf(self):
response = self.client.get('/admin/core/book/process_import/')
self.assertEqual(response.status_code, 405)
def test_import_log_entry(self):
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
book = LogEntry.objects.latest('id')
self.assertEqual(book.object_repr, "Some book")
self.assertEqual(book.object_id, str(1))
def test_import_log_entry_with_fk(self):
Parent.objects.create(id=1234, name='Some Parent')
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'child.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/child/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/child/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
child = LogEntry.objects.latest('id')
self.assertEqual(child.object_repr, 'Some - child of Some Parent')
self.assertEqual(child.object_id, str(1))
def test_logentry_creation_with_import_obj_exception(self):
# from https://mail.python.org/pipermail/python-dev/2008-January/076194.html
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
# Cause an exception in import_row, but only after import is confirmed,
# so a failure only occurs when ImportMixin.process_import is called.
class R(BookResource):
def import_obj(self, obj, data, dry_run, **kwargs):
if dry_run:
super().import_obj(obj, data, dry_run, **kwargs)
else:
raise Exception
@monkeypatch_method(BookAdmin)
def get_resource_class(self):
return R
# Verify that when an exception occurs in import_row, when raise_errors is False,
# the returned row result has a correct import_type value,
# so generating log entries does not fail.
@monkeypatch_method(BookAdmin)
def process_dataset(self, dataset, confirm_form, request, *args, **kwargs):
resource = self.get_import_resource_class()(**self.get_import_resource_kwargs(request, *args, **kwargs))
return resource.import_data(dataset,
dry_run=False,
raise_errors=False,
file_name=confirm_form.cleaned_data['original_file_name'],
user=request.user,
**kwargs)
dataset = Dataset(headers=["id","name","author_email"])
dataset.append([1, "Test 1", "<EMAIL>"])
input_format = '0'
content = dataset.csv
f = SimpleUploadedFile("data.csv", content.encode(), content_type="text/csv")
data = {
"input_format": input_format,
"import_file": f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
def test_import_with_customized_forms(self):
"""Test if admin import works if forms are customized"""
# We reuse import scheme from `test_import` to import books.csv.
# We use customized BookAdmin (CustomBookAdmin) with modified import
# form, which requires Author to be selected (from available authors).
# Note that url is /admin/core/ebook/import (and not: ...book/import)!
# We need at least a single author in the db to select from in the
# admin import custom forms
Author.objects.create(id=11, name='<NAME>')
# GET the import form
response = self.client.get('/admin/core/ebook/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as fobj:
data = {'author': 11,
'input_format': input_format,
'import_file': fobj}
response = self.client.post('/admin/core/ebook/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
self.assertIsInstance(confirm_form,
CustomBookAdmin(EBook, 'ebook/import')
.get_confirm_import_form())
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books.csv')
response = self.client.post('/admin/core/ebook/process_import/',
data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, EBook._meta.verbose_name_plural)
)
def test_get_skip_admin_log_attribute(self):
m = ImportMixin()
m.skip_admin_log = True
self.assertTrue(m.get_skip_admin_log())
def test_get_tmp_storage_class_attribute(self):
"""Mock dynamically loading a class defined by an attribute"""
target = "SomeClass"
m = ImportMixin()
m.tmp_storage_class = "tmpClass"
with mock.patch("import_export.admin.import_string") as mock_import_string:
mock_import_string.return_value = target
self.assertEqual(target, m.get_tmp_storage_class())
def test_get_import_data_kwargs_with_form_kwarg(self):
"""
Test that if a the method is called with a 'form' kwarg,
then it is removed and the updated dict is returned
"""
request = MagicMock(spec=HttpRequest)
m = ImportMixin()
kw = {
"a": 1,
"form": "some_form"
}
target = {
"a": 1
}
self.assertEqual(target, m.get_import_data_kwargs(request, **kw))
def test_get_import_data_kwargs_with_no_form_kwarg_returns_empty_dict(self):
"""
Test that if a the method is called with no 'form' kwarg,
then an empty dict is returned
"""
request = MagicMock(spec=HttpRequest)
m = ImportMixin()
kw = {
"a": 1,
}
target = {}
self.assertEqual(target, m.get_import_data_kwargs(request, **kw))
def test_get_context_data_returns_empty_dict(self):
m = ExportMixin()
self.assertEqual(dict(), m.get_context_data())
def test_media_attribute(self):
"""
Test that the 'media' attribute of the ModelAdmin class is overridden to include
the project-specific js file.
"""
mock_model = mock.MagicMock()
mock_site = mock.MagicMock()
class TestExportActionModelAdmin(ExportActionModelAdmin):
def __init__(self):
super().__init__(mock_model, mock_site)
m = TestExportActionModelAdmin()
target_media = m.media
self.assertEqual('import_export/action_formats.js', target_media._js[-1])
class TestImportExportActionModelAdmin(ImportExportActionModelAdmin):
def __init__(self, mock_model, mock_site, error_instance):
self.error_instance = error_instance
super().__init__(mock_model, mock_site)
def write_to_tmp_storage(self, import_file, input_format):
mock_storage = MagicMock(spec=TempFolderStorage)
mock_storage.read.side_effect = self.error_instance
return mock_storage
class ImportActionDecodeErrorTest(TestCase):
mock_model = mock.Mock(spec=Book)
mock_model.__name__ = "mockModel"
mock_site = mock.MagicMock()
mock_request = MagicMock(spec=HttpRequest)
mock_request.POST = {'a': 1}
mock_request.FILES = {}
@mock.patch("import_export.admin.ImportForm")
def test_import_action_handles_UnicodeDecodeError(self, mock_form):
mock_form.is_valid.return_value = True
b_arr = b'\x00\x00'
m = TestImportExportActionModelAdmin(self.mock_model, self.mock_site,
UnicodeDecodeError('codec', b_arr, 1, 2, 'fail!'))
res = m.import_action(self.mock_request)
self.assertEqual(
"<h1>Imported file has a wrong encoding: \'codec\' codec can\'t decode byte 0x00 in position 1: fail!</h1>",
res.content.decode())
@mock.patch("import_export.admin.ImportForm")
def test_import_action_handles_error(self, mock_form):
mock_form.is_valid.return_value = True
m = TestImportExportActionModelAdmin(self.mock_model, self.mock_site,
ValueError("fail"))
res = m.import_action(self.mock_request)
self.assertRegex(
res.content.decode(),
r"<h1>ValueError encountered while trying to read file: .*</h1>")
class ExportActionAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '<EMAIL>',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.cat1 = Category.objects.create(name='Cat 1')
self.cat2 = Category.objects.create(name='Cat 2')
self.client.login(username='admin', password='password')
def test_export(self):
data = {
'action': ['export_admin_action'],
'file_format': '0',
'_selected_action': [str(self.cat1.id)],
}
response = self.client.post('/admin/core/category/', data)
self.assertContains(response, self.cat1.name, status_code=200)
self.assertNotContains(response, self.cat2.name, status_code=200)
self.assertTrue(response.has_header("Content-Disposition"))
date_str = datetime.now().strftime('%Y-%m-%d')
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Category-{}.csv"'.format(date_str)
)
def test_export_no_format_selected(self):
data = {
'action': ['export_admin_action'],
'_selected_action': [str(self.cat1.id)],
}
response = self.client.post('/admin/core/category/', data)
self.assertEqual(response.status_code, 302)
def test_get_export_data_raises_PermissionDenied_when_no_export_permission_assigned(self):
request = MagicMock(spec=HttpRequest)
class TestMixin(ExportMixin):
model = Book
def has_export_permission(self, request):
return False
m = TestMixin()
with self.assertRaises(PermissionDenied):
m.get_export_data('0', Book.objects.none(), request=request)
class TestExportEncoding(TestCase):
mock_request = MagicMock(spec=HttpRequest)
mock_request.POST = {'file_format': 0}
class TestMixin(ExportMixin):
def __init__(self, test_str=None):
self.test_str = test_str
def get_data_for_export(self, request, queryset, *args, **kwargs):
dataset = Dataset(headers=["id", "name"])
dataset.append([1, self.test_str])
return dataset
def get_export_queryset(self, request):
return list()
def get_export_filename(self, request, queryset, file_format):
return "f"
def setUp(self):
self.file_format = formats.base_formats.CSV()
self.export_mixin = self.TestMixin(test_str="teststr")
def test_to_encoding_not_set_default_encoding_is_utf8(self):
self.export_mixin = self.TestMixin(test_str="teststr")
data = self.export_mixin.get_export_data(self.file_format, list(), request=self.mock_request)
csv_dataset = tablib.import_set(data)
self.assertEqual("teststr", csv_dataset.dict[0]["name"])
def test_to_encoding_set(self):
self.export_mixin = self.TestMixin(test_str="ハローワールド")
data = self.export_mixin.get_export_data(self.file_format, list(), request=self.mock_request, encoding="shift-jis")
encoding = chardet.detect(bytes(data))["encoding"]
self.assertEqual("SHIFT_JIS", encoding)
def test_to_encoding_set_incorrect(self):
self.export_mixin = self.TestMixin()
with self.assertRaises(LookupError):
self.export_mixin.get_export_data(self.file_format, list(), request=self.mock_request, encoding="bad-encoding")
def test_to_encoding_not_set_for_binary_file(self):
self.export_mixin = self.TestMixin(test_str="teststr")
self.file_format = formats.base_formats.XLSX()
data = self.export_mixin.get_export_data(self.file_format, list(), request=self.mock_request)
binary_dataset = tablib.import_set(data)
self.assertEqual("teststr", binary_dataset.dict[0]["name"])
@mock.patch("import_export.admin.ImportForm")
def test_export_action_to_encoding(self, mock_form):
mock_form.is_valid.return_value = True
self.export_mixin.to_encoding = "utf-8"
with mock.patch("import_export.admin.ExportMixin.get_export_data") as mock_get_export_data:
self.export_mixin.export_action(self.mock_request)
encoding_kwarg = mock_get_export_data.call_args_list[0][1]["encoding"]
self.assertEqual("utf-8", encoding_kwarg)
@mock.patch("import_export.admin.ImportForm")
def test_export_admin_action_to_encoding(self, mock_form):
class TestExportActionMixin(ExportActionMixin):
def get_export_filename(self, request, queryset, file_format):
return "f"
self.mock_request.POST = {'file_format': '1'}
self.export_mixin = TestExportActionMixin()
self.export_mixin.to_encoding = "utf-8"
mock_form.is_valid.return_value = True
with mock.patch("import_export.admin.ExportMixin.get_export_data") as mock_get_export_data:
self.export_mixin.export_admin_action(self.mock_request, list())
encoding_kwarg = mock_get_export_data.call_args_list[0][1]["encoding"]
self.assertEqual("utf-8", encoding_kwarg)
|
1616066
|
class DocumentType(Enum,IComparable,IFormattable,IConvertible):
"""
Types of Revit documents.
enum DocumentType,values: BuildingComponent (4),Family (1),IFC (3),Other (100),Project (0),Template (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
BuildingComponent=None
Family=None
IFC=None
Other=None
Project=None
Template=None
value__=None
|
1616069
|
A = set(input().split())
N = int(input())
is_strict_superset = True
for x in range(N):
S = set(input().split())
# (A > S)
is_strict_superset &= (A.issuperset(S) and A.isdisjoint(S))
print (is_strict_superset)
|
1616108
|
from __future__ import absolute_import, print_function
import glob
import json
import os
import pickle
from typing import Dict
import numpy as np
from loguru import logger
from tqdm import tqdm
_VALID_SUBSETS = ['train', 'test']
class LaSOT(object):
r"""`LaSOT <https://cis.temple.edu/lasot/>`_ Datasets.
Publication:
``LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking``,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, and <NAME>., CVPR 2019.
Args:
root_dir (string): Root directory of dataset where sequence
folders exist.
subset (string, optional): Specify ``train`` or ``test``
subset of LaSOT.
"""
data_dict = {subset: dict() for subset in _VALID_SUBSETS}
def __init__(self,
root_dir,
subset='test',
return_meta=False,
check_integrity=True,
cache_path=None,
ignore_cache=False):
super(LaSOT, self).__init__()
subset = subset.split('_')
assert set(subset).issubset({'train', 'test'}), 'Unknown subset.'
self.root_dir = root_dir
self.subset = subset
self.return_meta = return_meta
# check seems useless, disabled
# if check_integrity:
# self._check_integrity(root_dir)
self.cache_path = cache_path
self.ignore_cache = ignore_cache
self.anno_files = sorted(
glob.glob(os.path.join(root_dir, '*/*/groundtruth.txt')))
self.seq_dirs = [
os.path.join(os.path.dirname(f), 'img') for f in self.anno_files
]
self.seq_names = [
os.path.basename(os.path.dirname(f)) for f in self.anno_files
]
# load subset sequence names
split_file = os.path.join(os.path.dirname(__file__), 'lasot.json')
with open(split_file, 'r') as f:
splits = json.load(f)
self.splits = splits
self.seq_names = []
for s in subset:
self.seq_names.extend(splits[s])
# Former seq_dirs/anno_files have been replaced by caching mechanism.
# See _ensure_cache for detail.
# image and annotation paths
# self.seq_dirs = [os.path.join(
# root_dir, n[:n.rfind('-')], n, 'img')
# for n in self.seq_names]
# self.anno_files = [os.path.join(
# os.path.dirname(d), 'groundtruth.txt')
# for d in self.seq_dirs]
self._ensure_cache()
self.seq_names = [
k for subset in self.subset
for k, _ in LaSOT.data_dict[subset].items()
]
self.seq_names = sorted(self.seq_names)
self.seq_datas = {
k: v
for subset in self.subset
for k, v in LaSOT.data_dict[subset].items()
}
def __getitem__(self, index):
r"""
Args:
index (integer or string): Index or name of a sequence.
Returns:
tuple: (img_files, anno) if ``return_meta`` is False, otherwise
(img_files, anno, meta), where ``img_files`` is a list of
file names, ``anno`` is a N x 4 (rectangles) numpy array, while
``meta`` is a dict contains meta information about the sequence.
"""
# if isinstance(index, six.string_types):
# if not index in self.seq_names:
# raise Exception('Sequence {} not found.'.format(index))
# index = self.seq_names.index(index)
if isinstance(index, int):
index = self.seq_names[index]
seq_data = self.seq_datas[index]
img_files = seq_data["img_files"]
anno = seq_data["anno"]
meta = seq_data["meta"]
# img_files = sorted(glob.glob(os.path.join(
# self.seq_dirs[index], '*.jpg')))
# anno = np.loadtxt(self.anno_files[index], delimiter=',')
if self.return_meta:
meta = self._fetch_meta(self.seq_dirs[index])
return img_files, anno, meta
else:
return img_files, anno
def __len__(self):
return len(self.seq_names)
def _check_integrity(self, root_dir):
seq_names = os.listdir(root_dir)
seq_names = [n for n in seq_names if not n[0] == '.']
if os.path.isdir(root_dir) and len(seq_names) > 0:
# check each sequence folder
for seq_name in seq_names:
seq_dir = os.path.join(root_dir, seq_name)
if not os.path.isdir(seq_dir):
print('Warning: sequence %s not exists.' % seq_name)
else:
# dataset not exists
raise Exception('Dataset not found or corrupted.')
def _fetch_meta(self, seq_dir):
seq_dir = os.path.dirname(seq_dir)
meta = {}
# attributes
for att in ['full_occlusion', 'out_of_view']:
att_file = os.path.join(seq_dir, att + '.txt')
meta[att] = np.loadtxt(att_file, delimiter=',')
# nlp
nlp_file = os.path.join(seq_dir, 'nlp.txt')
with open(nlp_file, 'r') as f:
meta['nlp'] = f.read().strip()
return meta
def _ensure_cache(self):
"""Perform all overheads related to cache (building/loading/check)
"""
# check if subset cache already exists in LaSOT.data_dict and is valid w.r.t. list.txt
if self._check_cache_for_current_subset():
return
# load subset cache into LaSOT.data_dict
cache_path = self._get_cache_path(cache_path=self.cache_path)
self.cache_path = cache_path
if all([os.path.isfile(p)
for p in self.cache_path.values()]) and not self.ignore_cache:
logger.info("{}: cache file exists: {} ".format(
LaSOT.__name__, cache_path))
self._load_cache_for_current_subset(cache_path)
if self._check_cache_for_current_subset():
logger.info(
"{}: record check has been processed and validity is confirmed for cache file: {} "
.format(LaSOT.__name__, cache_path))
return
else:
logger.info(
"{}: cache file {} not valid, rebuilding cache...".format(
LaSOT.__name__, cache_path))
# build subset cache in LaSOT.data_dict and cache to storage
self._build_cache_for_current_subset()
logger.info("{}: current cache file: {} ".format(
LaSOT.__name__, self.cache_path))
logger.info(
"{}: need to clean this cache file if you move dataset directory".
format(LaSOT.__name__))
logger.info(
"{}: consider cleaning this cache file in case of erros such as FileNotFoundError or IOError"
.format(LaSOT.__name__))
def _get_cache_path(self, cache_path: Dict[str, str] = None):
r"""Ensure cache_path.
If cache_path does not exist, turn to default set: root_dir/subset.pkl.
"""
if (cache_path is None) or any(
[not os.path.isfile(cache_path) for p in cache_path.values()]):
logger.info(
"{}: passed cache file {} invalid, change to default cache path"
.format(LaSOT.__name__, cache_path))
cache_path = {
subset: os.path.join(self.root_dir, subset + ".pkl")
for subset in self.subset
}
return cache_path
def _check_cache_for_current_subset(self) -> bool:
r""" check if LaSOT.data_dict[subset] exists and contains all record in seq_names
"""
is_valid_data_dict = all([subset in LaSOT.data_dict for subset in self.subset]) and \
(set([seq_name for subset in self.subset for seq_name in LaSOT.data_dict[subset].keys()]) == set(self.seq_names))
return is_valid_data_dict
def _build_cache_for_current_subset(self):
r"""Build cache for current subset (self.subset)
"""
root_dir = self.root_dir
subset = self.subset
for s in subset:
logger.info("{}: start loading {}".format(LaSOT.__name__, s))
seq_names = self.splits[s]
for seq_name in tqdm(seq_names):
seq_dir = os.path.join(root_dir, seq_name[:seq_name.rfind('-')],
seq_name)
img_files, anno, meta = self.load_single_sequence(seq_dir)
LaSOT.data_dict[s][seq_name] = dict(img_files=img_files,
anno=anno,
meta=meta)
with open(self.cache_path[s], "wb") as f:
pickle.dump(LaSOT.data_dict[s], f)
logger.info("{}: dump cache file to {}".format(
LaSOT.__name__, self.cache_path[s]))
def _load_cache_for_current_subset(self, cache_path: Dict[str, str]):
for subset in self.subset:
assert os.path.exists(
cache_path[subset]
), "cache_path does not exist: %s " % cache_path[subset]
with open(cache_path[subset], "rb") as f:
LaSOT.data_dict[subset] = pickle.load(f)
logger.info("{}: loaded cache file {}".format(
LaSOT.__name__, cache_path[subset]))
def load_single_sequence(self, seq_dir):
img_files = sorted(glob.glob(os.path.join(seq_dir, 'img/*.jpg')))
anno = np.loadtxt(os.path.join(seq_dir, "groundtruth.txt"),
delimiter=',')
assert len(img_files) == len(anno)
if self.return_meta:
meta = self._fetch_meta(seq_dir)
return img_files, anno, meta
else:
return img_files, anno, None
|
1616113
|
import itertools
import os
from typing import Dict, List
import dask
import fsspec
import pandas as pd
import xarray as xr
import zarr
awc_fill = 50 # mm
hist_time = slice("1950", "2014")
future_time = slice("2015", "2120")
chunks = {"time": -1, "x": 50, "y": 50}
skip_unmatched = True
# xy_region = {'x': slice(0, 100), 'y': slice(0, 100)}
xy_region = None
def get_cmip_runs(comp=True, unique=True, has_match=True):
with fsspec.open(
"az://carbonplan-downscaling/cmip6/ssps_with_matching_historical_members.csv",
mode="r",
account_name="carbonplan",
) as f:
df = pd.read_csv(f).drop(columns=["Unnamed: 0", "path"])
if has_match:
df = df[df.has_match]
df["comp"] = [
len(set(df[(df["model"] == d[1]["model"]) & (df["member"] == d[1]["member"])]["scenario"]))
== 4
for d in df.iterrows()
]
df["unique"] = [
d[1]["member"] == df[(df["model"] == d[1]["model"])]["member"].values[0]
for d in df.iterrows()
]
if comp and unique:
df = df[df.comp & df.unique]
elif comp and not unique:
df = df[df.comp]
elif unique and not comp:
df = df[df.unique]
return df[["model", "scenario", "member"]]
@dask.delayed(pure=True, traverse=False)
def finish_store(store, regions):
zarr.consolidate_metadata(store)
return store
@dask.delayed(pure=True, traverse=False)
def dummy_store(store):
print(store)
return store
def preprocess(ds: xr.Dataset) -> xr.Dataset:
"""preprocess datasets after loading them"""
if "month" in ds:
ds = ds.drop("month")
return ds
def load_coords(ds: xr.Dataset) -> xr.Dataset:
"""helper function to pre-load coordinates"""
return ds.update(ds[list(ds.coords)].load())
def maybe_slice_region(ds: xr.Dataset, region: Dict) -> xr.Dataset:
"""helper function to pull out region of dataset"""
if region:
return ds.isel(**region)
return ds
def get_slices(length: int, chunk_size: int) -> List:
"""helper function to create a list of slices along one axis"""
xi = range(0, length, chunk_size)
slices = [slice(left, right) for left, right in zip(xi, xi[1:])] + [slice(xi[-1], length + 1)]
return slices
def get_regions(ds: xr.Dataset) -> xr.Dataset:
"""create a list of regions (dict of slices)"""
x_slices = get_slices(ds.dims["x"], chunks["x"])
y_slices = get_slices(ds.dims["y"], chunks["y"])
t_slices = [slice(None)]
keys = ["x", "y", "time"]
return [dict(zip(keys, s)) for s in itertools.product(x_slices, y_slices, t_slices)]
def get_store(bucket, prefix, account_key=None):
"""helper function to create a zarr store"""
if account_key is None:
account_key = os.environ.get("AccountKey", None)
store = zarr.storage.ABSStore(
bucket, prefix=prefix, account_name="cmip6downscaling", account_key=account_key
)
return store
|
1616140
|
import re
import sys
import warnings
from typing import Any, Callable, Generic, TypeVar, Union
import wrapt
from ..utils import misc
from ..utils.translations import trans
_T = TypeVar("_T")
class ReadOnlyWrapper(wrapt.ObjectProxy):
"""
Disable item and attribute setting with the exception of ``__wrapped__``.
"""
def __setattr__(self, name, val):
if name != '__wrapped__':
raise TypeError(
trans._(
'cannot set attribute {name}',
deferred=True,
name=name,
)
)
super().__setattr__(name, val)
def __setitem__(self, name, val):
raise TypeError(
trans._('cannot set item {name}', deferred=True, name=name)
)
_SUNDER = re.compile('^_[^_]')
class PublicOnlyProxy(wrapt.ObjectProxy, Generic[_T]):
"""Proxy to prevent private attribute and item access, recursively."""
__wrapped__: _T
def __getattr__(self, name: str):
if name.startswith("_"):
# allow napari to access private attributes and get an non-proxy
frame = sys._getframe(1) if hasattr(sys, "_getframe") else None
if frame.f_code.co_filename.startswith(misc.ROOT_DIR):
return super().__getattr__(name)
typ = type(self.__wrapped__).__name__
warnings.warn(
trans._(
"Private attribute access ('{typ}.{name}') in this context (e.g. inside a plugin widget or dock widget) is deprecated and will be unavailable in version 0.5.0",
deferred=True,
name=name,
typ=typ,
),
category=FutureWarning,
stacklevel=2,
)
# name = f'{type(self.__wrapped__).__name__}.{name}'
# raise AttributeError(
# trans._(
# "Private attribute access ('{typ}.{name}') not allowed in this context.",
# deferred=True,
# name=name,
# typ=typ,
# )
# )
return self.create(super().__getattr__(name))
def __getitem__(self, key):
return self.create(super().__getitem__(key))
def __repr__(self):
return repr(self.__wrapped__)
def __dir__(self):
return [x for x in dir(self.__wrapped__) if not _SUNDER.match(x)]
@classmethod
def create(cls, obj: Any) -> Union['PublicOnlyProxy', Any]:
# restrict the scope of this proxy to napari objects
mod = getattr(type(obj), '__module__', None) or ''
if not mod.startswith('napari'):
return obj
if isinstance(obj, PublicOnlyProxy):
return obj # don't double-wrap
if callable(obj):
return CallablePublicOnlyProxy(obj)
return PublicOnlyProxy(obj)
class CallablePublicOnlyProxy(PublicOnlyProxy[Callable]):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
|
1616149
|
import os
import sys
import json
import torch
import logging
from tqdm import tqdm
from . import loader_utils
from ..constant import BOS_WORD, EOS_WORD, Tag2Idx
logger = logging.getLogger()
# -------------------------------------------------------------------------------------------
# preprocess label
# ------------------------------------------------------------------------------------------
def get_tag_label(start_end_pos, doc_length):
# flatten, rank, filter overlap for answer positions
sorted_positions = loader_utils.flat_rank_pos(start_end_pos)
filter_positions = loader_utils.strict_filter_overlap(sorted_positions)
if len(filter_positions) != len(sorted_positions):
overlap_flag = True
else:
overlap_flag = False
label = [Tag2Idx['O']] * doc_length
for s, e in filter_positions:
if s == e:
label[s] = Tag2Idx['U']
elif (e-s) == 1:
label[s] = Tag2Idx['B']
label[e] = Tag2Idx['E']
elif (e-s) >=2:
label[s] = Tag2Idx['B']
label[e] = Tag2Idx['E']
for i in range(s+1, e):
label[i] = Tag2Idx['I']
else:
logger.info('ERROR')
break
return {'label':label, 'overlap_flag':overlap_flag}
def bert2tag_preprocessor(examples, tokenizer, max_token, pretrain_model, mode, max_phrase_words, stem_flag=False):
logger.info('start preparing (%s) features for bert2tag (%s) ...' % (mode, pretrain_model))
overlap_num = 0
new_examples = []
for idx, ex in enumerate(tqdm(examples)):
# tokenize
tokenize_output = loader_utils.tokenize_for_bert(doc_words=ex['doc_words'], tokenizer=tokenizer)
if len(tokenize_output['tokens']) < max_token:
max_word = max_token
else:
max_word = tokenize_output['tok_to_orig_index'][max_token-1] + 1
new_ex = {}
new_ex['url'] = ex['url']
new_ex['tokens'] = tokenize_output['tokens'][:max_token]
new_ex['valid_mask'] = tokenize_output['valid_mask'][:max_token]
new_ex['doc_words'] = ex['doc_words'][:max_word]
assert len(new_ex['tokens']) == len(new_ex['valid_mask'])
assert sum(new_ex['valid_mask']) == len(new_ex['doc_words'])
if mode == 'train':
parameter = {'start_end_pos': ex['start_end_pos'],
'doc_length': len(ex['doc_words'])}
# ------------------------------------------------
label_dict = get_tag_label(**parameter)
new_ex['label'] = label_dict['label'][:max_word]
assert sum(new_ex['valid_mask']) == len(new_ex['label'])
if label_dict['overlap_flag']:
overlap_num += 1
new_examples.append(new_ex)
logger.info('Delete Overlap Keyphrase : %d (overlap / total = %.2f'
%(overlap_num, float(overlap_num / len(examples) * 100)) + '%)')
return new_examples
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# batch batchfy
def bert2tag_converter(index, ex, tokenizer, mode, max_phrase_words):
''' convert each batch data to tensor ; add [CLS] [SEP] tokens ;'''
src_tokens = [BOS_WORD] + ex['tokens'] + [EOS_WORD]
valid_ids = [0] + ex['valid_mask'] + [0]
src_tensor = torch.LongTensor(tokenizer.convert_tokens_to_ids(src_tokens))
valid_mask = torch.LongTensor(valid_ids)
orig_doc_len = sum(valid_ids)
if mode == 'train':
label_tensor = torch.LongTensor(ex['label'])
return index, src_tensor, valid_mask, orig_doc_len, label_tensor
else:
return index, src_tensor, valid_mask, orig_doc_len
def batchify_bert2tag_features_for_train(batch):
''' train dataloader & eval dataloader .'''
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
label_list = [ex[4] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
# ---------------------------------------------------------------
# valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, :v.size(0)].copy_(v)
# ---------------------------------------------------------------
# label tensor
labels = torch.LongTensor(len(label_list), max_word_len).zero_()
active_mask = torch.LongTensor(len(label_list), max_word_len).zero_()
for i, t in enumerate(label_list):
labels[i, :t.size(0)].copy_(t)
active_mask[i, :t.size(0)].fill_(1)
# -------------------------------------------------------------------
# [6] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return input_ids, input_mask, valid_ids, active_mask, valid_output, labels, ids
def batchify_bert2tag_features_for_test(batch):
''' test dataloader for Dev & Public_Valid.'''
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, :v.size(0)].copy_(v)
# ---------------------------------------------------------------
# valid length tensor
active_mask = torch.LongTensor(len(doc_word_lens), max_word_len).zero_()
for i, l in enumerate(doc_word_lens):
active_mask[i, :l].fill_(1)
# -------------------------------------------------------------------
# [4] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return input_ids, input_mask, valid_ids, active_mask, valid_output, doc_word_lens, ids
|
1616162
|
import json
import httpretty
import pytest
from instagram_api.client import Client
from instagram_api.request.request import ApiRequest
from instagram_api.constants import Constants
from instagram_api.utils.http import ClientCookieJar
@httpretty.activate
def test_send_request(instagram):
url = f'{Constants.API_URLS[1]}feed/timeline/'.replace('https', 'http')
httpretty.register_uri(
httpretty.GET,
url,
json.dumps({
'message': 'challenge_required',
'challenge': {
'url': 'https://i.instagram.com/challenge/16184608445/UD3hGZC5Fx/',
'api_path': '/challenge/16184608445/UD3hGZC5Fx/',
'hide_webview_header': True,
'lock': True,
'logout': False,
'native_flow': True
},
'status': 'fail',
'error_type': 'checkpoint_challenge_required'
})
)
c = instagram.client
# rb = ApiRequest()
# c = Client(None)
jar = ClientCookieJar()
jar.set('test', 'value')
# c2 = Client(None, cookies=jar)
response = c(url)
print(httpretty.last_request())
# response2 = c2(url, headers=rb.force_headers)
print(httpretty.last_request())
d = httpretty
print(response.status_code)
|
1616173
|
from data import DoomImage
import numpy as np
import time
from torch.utils.data import DataLoader
import tensorflow as tf
from tqdm import tqdm
from imageio import imwrite as imsave
from data import Places, PlacesRoom, PlacesOutdoor
from habitat_baselines.rl.models.resnet import ResNetEncoder
import habitat_baselines.rl.models.resnet as resnet
import torch
from gym.spaces import Box
from gym import spaces
import torch.nn.functional as F
import torch
torch.manual_seed(1)
from util import adjust_learning_rate, AverageMeter, accuracy
from tensorflow.python.platform import flags
import torch.nn as nn
import sys
import torch.optim as optim
import tensorboard_logger as tb_logger
import torchvision.models as models
import torchvision.models as models
FLAGS = flags.FLAGS
flags.DEFINE_bool('places_full', False, 'use all of places')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate')
flags.DEFINE_list('lr_decay_epochs', [30, 40, 50], 'epochs to decay learning rate')
flags.DEFINE_string('mode', 'crl', 'type of model to load')
flags.DEFINE_bool('policy', False, 'whether to use model or policy')
class PlacesLinear(nn.Module):
def __init__(self, classes):
super(PlacesLinear, self).__init__()
self.fc = nn.Linear(2048, classes)
def forward(self, inp):
logits = self.fc(inp)
return logits
class TorchVisionResNet50(nn.Module):
r"""
Takes in observations and produces an embedding of the rgb component.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
device: torch.device
"""
def __init__(
self, pretrained=True, spatial_output: bool = False
):
super().__init__()
self.device = torch.device('cuda')
self.resnet_layer_size = 2048
linear_layer_input_size = 0
self.cnn = models.resnet50(pretrained=pretrained)
self.layer_extract = self.cnn._modules.get("avgpool")
def forward(self, observations):
r"""Sends RGB observation through the TorchVision ResNet50 pre-trained
on ImageNet. Sends through fully connected layer, activates, and
returns final embedding.
"""
def resnet_forward(observation):
resnet_output = torch.zeros(1, dtype=torch.float32, device=self.device)
def hook(m, i, o):
resnet_output.set_(o)
# output: [BATCH x RESNET_DIM]
h = self.layer_extract.register_forward_hook(hook)
self.cnn(observation)
h.remove()
return resnet_output
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH]
rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
rgb_observations = rgb_observations / 255.0 # normalize RGB
resnet_output = resnet_forward(rgb_observations.contiguous())
return resnet_output
def set_optimizer(args, classifier):
# optimizer = optim.SGD(classifier.parameters(),
# lr=args.learning_rate,
# momentum=0.9,
# weight_decay=0.0)
# if args.policy:
# optimizer = optim.Adam(classifier.parameters(),
# lr=1e-4)
# else:
optimizer = optim.Adam(classifier.parameters(),
lr=1e-3)
return optimizer
def train(epoch, train_loader, model, classifier, criterion, optimizer):
"""
one epoch training
"""
model.eval()
classifier.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for idx, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.float()
input = input.cuda()
target = target.cuda().long()
# ===================forward=====================
with torch.no_grad():
im = input * 255
im = im.float()
im = im.cuda()
feat = model({'rgb': im})
feat = feat.mean(dim=2).mean(dim=2)
output = classifier(feat)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
return top1.avg, top5.avg, losses.avg
def validate(val_loader, model, classifier, criterion):
"""
evaluation
"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
classifier.eval()
with torch.no_grad():
end = time.time()
for idx, (input, target) in enumerate(val_loader):
input = input.float()
input = input.cuda()
target = target.cuda().long()
im = input * 255
im = im.float()
im = im.cuda()
feat = model({'rgb': im})
feat = feat.mean(dim=2).mean(dim=2)
output = classifier(feat)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % 10 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
if __name__ == "__main__":
resnet_baseplanes = 32
backbone = "resnet50"
if FLAGS.places_full:
classes = 205
data_fn = Places
else:
classes = 59
data_fn = PlacesRoom
rgb_box = Box(0, 255, (256, 256, 3))
model = ResNetEncoder(
spaces.Dict({"rgb": rgb_box}),
baseplanes=resnet_baseplanes*2,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=True,
obs_transform=None,
backbone_only=True,
dense=True
)
# ckpt = torch.load("/private/home/yilundu/sandbox/habitat/habitat-lab/checkpoints/curiosity_pointnav_pretrain_resnet50_301_resume/curiosity_pointnav_pretrain/curiosity_pointnav_pretrain.16.pth")
if FLAGS.mode != "random" and FLAGS.mode != "imagenet":
if FLAGS.mode == "crl":
ckpt = torch.load("/private/home/yilundu/sandbox/habitat/habitat-lab/checkpoints/curiosity_pointnav_pretrain_resnet50_301_resume/curiosity_pointnav_pretrain/curiosity_pointnav_pretrain.16.pth")
elif FLAGS.mode == "rnd":
ckpt = torch.load("/private/home/yilundu/sandbox/habitat/habitat-lab/checkpoints/rnd_pointnav_pretrain_resnet50_301/rnd_pointnav_pretrain/rnd_pointnav_pretrain.16.pth")
elif FLAGS.mode == "atc":
ckpt = torch.load("/private/home/yilundu/sandbox/habitat/habitat-lab/checkpoints/atc_real_pointnav_pretrain_resnet50_301/atc_pointnav_pretrain/atc_pointnav_pretrain.16.pth")
elif FLAGS.mode == "pointnav":
ckpt = torch.load("/private/home/yilundu/sandbox/habitat/habitat-lab/checkpoints/baseline_pointnav_resnet50_301/baseline_pointnav_pretrain/baseline_pointnav_pretrain.16.pth")
state_dict = ckpt['state_dict']
weights_new = {}
for k, v in state_dict.items():
split_layer_name = k.split(".")[2:]
if len(split_layer_name) == 0:
continue
if FLAGS.policy:
if "visual_resnet" == split_layer_name[0]:
layer_name = ".".join(split_layer_name[1:])
weights_new[layer_name] = v
else:
if "model_encoder" == split_layer_name[0]:
layer_name = ".".join(split_layer_name[1:])
weights_new[layer_name] = v
model.load_state_dict(weights_new, strict=False)
else:
model = TorchVisionResNet50()
model = model.cuda()
model = model.eval()
classifier = PlacesLinear(classes).cuda()
optimizer = set_optimizer(FLAGS, classifier)
train_data = PlacesRoom(train=True)
test_data = PlacesRoom(train=False)
train_dataloader = DataLoader(train_data, batch_size=256, num_workers=6, shuffle=True, drop_last=True)
val_dataloader = DataLoader(test_data, batch_size=256, num_workers=6, shuffle=True, drop_last=False)
criterion = nn.CrossEntropyLoss().cuda()
logger = tb_logger.Logger(logdir="linear_finetune", flush_secs=2)
for epoch in range(60):
# adjust_learning_rate(epoch, FLAGS, optimizer)
print("==> training...")
train_acc, train_acc5, train_loss = train(epoch, train_dataloader, model, classifier, criterion, optimizer)
logger.log_value('train_acc', train_acc, epoch)
logger.log_value('train_acc5', train_acc5, epoch)
logger.log_value('train_loss', train_loss, epoch)
print("==> testing...")
test_acc, test_acc5, test_loss = validate(val_dataloader, model, classifier, criterion)
logger.log_value('test_acc', test_acc, epoch)
logger.log_value('test_acc5', test_acc5, epoch)
logger.log_value('test_loss', test_loss, epoch)
|
1616273
|
import cadquery
from copy import copy
import logging
from cqparts.utils import CoordSystem
from cqparts.utils.misc import property_buffered
from . import _casting
log = logging.getLogger(__name__)
# --------------------- Effect ----------------------
class Effect(object):
pass
class VectorEffect(Effect):
"""
An evaluator effect is the conclusion to an evaluation with regard to
a single solid.
Effects are sortable (based on proximity to evaluation origin)
"""
def __init__(self, location, part, result):
"""
:param location: where the fastener is to be applied (eg: for a screw
application will be along the -Z axis)
:type location: :class:`CoordSystem`
:param part: effected solid
:type part: cadquery.Workplane
:param result: result of evaluation
:type result: cadquery.Workplane
"""
self.location = location
self.part = part
self.result = result
@property
def start_point(self):
"""
Start vertex of effect
:return: vertex (as vector)
:rtype: :class:`cadquery.Vector`
"""
edge = self.result.wire().val().Edges()[0]
return edge.Vertices()[0].Center()
@property
def start_coordsys(self):
"""
Coordinate system at start of effect.
All axes are parallel to the original vector evaluation location, with
the origin moved to this effect's start point.
:return: coordinate system at start of effect
:rtype: :class:`CoordSys`
"""
coordsys = copy(self.location)
coordsys.origin = self.start_point
return coordsys
@property
def end_point(self):
"""
End vertex of effect
:return: vertex (as vector)
:rtype: :class:`cadquery.Vector`
"""
edge = self.result.wire().val().Edges()[-1]
return edge.Vertices()[-1].Center()
@property
def end_coordsys(self):
"""
Coordinate system at end of effect.
All axes are parallel to the original vector evaluation location, with
the origin moved to this effect's end point.
:return: coordinate system at end of effect
:rtype: :class:`CoordSys`
"""
coordsys = copy(self.location)
coordsys.origin = self.end_point
return coordsys
@property
def origin_displacement(self):
"""
planar distance of start point from self.location along :math:`-Z` axis
"""
return self.start_point.sub(self.location.origin).dot(-self.location.zDir)
@property
def wire(self):
edge = cadquery.Edge.makeLine(self.start_point, self.end_point)
return cadquery.Wire.assembleEdges([edge])
@property
def _wire_wp(self):
"""Put self.wire in it's own workplane for display purposes"""
return cadquery.Workplane('XY').newObject([self.wire])
# bool
def __bool__(self):
if self.result.edges().objects:
return True
return False
__nonzero__ = __bool__
# Comparisons
def __lt__(self, other):
return self.origin_displacement < other.origin_displacement
def __le__(self, other):
return self.origin_displacement <= other.origin_displacement
def __gt__(self, other):
return self.origin_displacement > other.origin_displacement
def __ge__(self, other):
return self.origin_displacement >= other.origin_displacement
# --------------------- Evaluator ----------------------
class Evaluator(object):
"""
An evaluator determines which parts may be effected by a fastener, and how.
"""
# Constructor
def __init__(self, parts, parent=None):
"""
:param parts: parts involved in fastening
:type parts: list of :class:`cqparts.Part`
:param parent: parent object
:type parent: :class:`Fastener <cqparts_fasteners.fasteners.base.Fastener>`
"""
# All evaluators will take a list of parts
self.parts = parts
self.parent = parent
def perform_evaluation(self):
"""
Evaluate the given parts using any additional parameters passed
to this instance.
.. note::
Override this function in your *evaluator* class to assess what
parts are effected, and how.
Default behaviour: do nothing, return nothing
:return: ``None``
"""
return None
@property_buffered
def eval(self):
"""
Return the result of :meth:`perform_evaluation`, and buffer it so it's
only run once per :class:`Evaluator` instance.
:return: result from :meth:`perform_evaluation`
"""
return self.perform_evaluation()
class VectorEvaluator(Evaluator):
effect_class = VectorEffect
def __init__(self, parts, location, parent=None):
"""
:param parts: parts involved in fastening
:type parts: list of :class:`cqparts.Part`
:param location: where the fastener is to be applied (eg: for a screw
application will be along the -Z axis)
:type location: :class:`CoordSystem`
:param parent: parent object
:type parent: :class:`Fastener <cqparts_fasteners.fasteners.base.Fastener>`
**Location**
The orientation of ``location`` may not be important; it may be for a
basic application of a screw, in which case the :math:`-Z` axis will be
used to perform the evaluation, and the :math:`X` and :math`Y` axes are
of no consequence.
For *some* fasteners, the orientation of ``location`` will be
important.
"""
super(VectorEvaluator, self).__init__(
parts=parts,
parent=parent,
)
self.location = location
@property_buffered
def max_effect_length(self):
"""
:return: The longest possible effect vector length.
:rtype: float
In other words, the *radius* of a sphere:
- who's center is at ``start``.
- all ``parts`` are contained within the sphere.
"""
# Method: using each solid's bounding box:
# - get vector from start to bounding box center
# - get vector from bounding box center to any corner
# - add the length of both vectors
# - return the maximum of these from all solids
def max_length_iter():
for part in self.parts:
if part.local_obj.findSolid():
bb = part.local_obj.findSolid().BoundingBox()
yield abs(bb.center - self.location.origin) + (bb.DiagonalLength / 2)
try:
return max(max_length_iter())
except ValueError as e:
# if iter returns before yielding anything
return 0
def perform_evaluation(self):
"""
Determine which parts lie along the given vector, and what length
:return: effects on the given parts (in order of the distance from
the start point)
:rtype: list(:class:`VectorEffect`)
"""
# Create effect vector (with max length)
if not self.max_effect_length:
# no effect is possible, return an empty list
return []
edge = cadquery.Edge.makeLine(
self.location.origin,
self.location.origin + (self.location.zDir * -(self.max_effect_length + 1)) # +1 to avoid rounding errors
)
wire = cadquery.Wire.assembleEdges([edge])
wp = cadquery.Workplane('XY').newObject([wire])
effect_list = [] # list of self.effect_class instances
for part in self.parts:
solid = part.world_obj.translate((0, 0, 0))
intersection = solid.intersect(copy(wp))
effect = self.effect_class(
location=self.location,
part=part,
result=intersection,
)
if effect:
effect_list.append(effect)
return sorted(effect_list)
class CylinderEvaluator(Evaluator):
effect_class = VectorEffect
|
1616329
|
import configparser
import os
from warnings import warn
from .constants import globalconfigfile, localconfigfile
from . import backends
defaults = {
'DEFAULT': {
'header': '',
'footer': '',
},
'slurm': {},
'gridengine': {},
}
def ensure_initialized():
settings = configparser.ConfigParser()
settings.read_dict(defaults)
if not os.path.exists(localconfigfile):
with open(localconfigfile, 'w') as config_file:
settings.write(config_file)
def print_config(config):
for section in config.sections():
print('[{}]'.format(section))
for key, value in dict(config[section]).items():
quote = '"""' if '\n' in value else ''
print('{} = {}{}{}'.format(key, quote, value, quote))
print()
def get_active_config():
ensure_initialized()
settings = configparser.ConfigParser()
settings.read(globalconfigfile)
settings.read(localconfigfile)
return settings
def config(args):
if args.read and args.write:
raise RuntimeError('Cannot read and write at the same time.')
if args.read and args.global_ and args.local:
args.global_ = False
args.local = False
if args.write and args.global_ and args.local:
raise RuntimeError('Cannot write to global and local config files simultaneously.')
settings = configparser.ConfigParser()
ensure_initialized()
settings.read_dict(defaults)
if args.global_ or not args.local:
settings.read(globalconfigfile)
if args.local or not args.global_:
settings.read(localconfigfile)
if args.read:
print_config(settings)
elif args.write:
for (backend, key, value) in args.write:
if backend not in backends.__all__:
warn('Unable to set {}.{}={} (invalid backend: {}).'.format(backend, key, value, backend))
continue
# if key not in settings[backend].keys():
# warn('Unable to set {}.{}={} (invalid key: {}).'.format(backend, key, value, key))
# continue
settings[backend][key] = value
if args.global_:
with open(globalconfigfile, 'w') as config_file:
settings.write(config_file)
else:
with open(localconfigfile, 'w') as config_file:
settings.write(config_file)
|
1616342
|
import threading
import time
import httplib
import json
import os
import datetime
# TODO put this in a properties file
mem_url = "/cat?href=/device/mem/"
cpu_url = "/cat?href=/device/cpu/"
meta_url = "/cat?href=/device/meta/"
ip_url = "/cat?href=/device/ip/"
# Till here
class resource_updater:
registry_url = ""
registry_port = None
device_uuid = 0
update_frequency = 60
usage_file = '/app/resource_usage.log'
def __init__(self, registry_url, registry_port, device_uuid, update_frequency, usage_file):
self.registry_port = registry_port
self.registry_url = registry_url
self.device_uuid = device_uuid
self.update_frequency = update_frequency
self.usage_file = usage_file
def get_cpu_usage(self):
return str(round(float(os.popen('''grep 'cpu ' /proc/stat | awk '{usage=($2+$4)*100/($2+$4+$5)} END {print usage }' ''').readline()),2))
def get_mem_usage(self):
tot_m, used_m, free_m = map(int, os.popen('free -t -m').readlines()[-1].split()[1:])
return str((used_m * 1.0) / tot_m * 100)
def get_cpu_payload(self, id):
cpu_data = dict()
cpu_data['item-metadata'] = [
{
"val": "CPU Meta Data",
"rel": "urn:X-hypercat:rels:hasDescription:en"
},{
"val": self.get_cpu_usage(),
"rel": "CPUUtil"
}
]
cpu_data['href'] = '/device/cpu/' + id
return json.dumps(cpu_data)
def get_mem_payload(self, id):
mem_data = dict()
mem_data['href'] = '/device/mem/' + id
mem_data['item-metadata'] = [
{
"val": "Memory Meta Data",
"rel": "urn:X-hypercat:rels:hasDescription:en"
},{
"val": self.get_mem_usage(),
"rel": "MemUtil"
}
]
return json.dumps(mem_data)
def update_registry(self, url, id, payload):
conn = httplib.HTTPConnection(self.registry_url, self.registry_port)
#header = {'Content-type': 'text/plain', 'Accept-Language': 'en-US,en;q=0.5'}
conn.request('POST', url + id, payload )
response = conn.getresponse()
print self.registry_url
print self.registry_port
print url
print id
print payload
print response.status
def update_loop_trigger(self):
while True:
self.update_registry(mem_url, self.device_uuid, self.get_mem_payload(self.device_uuid))
self.update_registry(cpu_url, self.device_uuid, self.get_cpu_payload(self.device_uuid))
print "should be updated right?"
#do the update
with open(self.usage_file, 'a') as myfile:
for i in range(4):
timestamp = str(datetime.datetime.now()).split('.')[0]
myfile.write('{} CPU usage = {} MEM usage = {}\n'.format(timestamp, self.get_cpu_usage(), self.get_mem_usage()))
time.sleep(self.update_frequency/4)
myfile.flush()
def start_updater(self):
thread = threading.Thread(target=self.update_loop_trigger)
return thread.start()
def register_device(self, address):
meta_data = dict()
meta_data['item-metadata'] = [
{
"val":"Device Meta Data",
"rel": "urx:X-hypercat:rels:hasDescription:en"
},{
"val": self.device_uuid,
"rel": "DeviceUUID"
},{
"val": "400",
"rel": "Total CPU Available"
},{
"val": "1000",
"rel": "Total Memory Available"
},{
"val": "1",
"rel": "isAccelerated"
}
]
meta_data['href'] = '/device/meta/' + self.device_uuid
ip_data = dict()
ip_data['item-metadata'] = [
{
"val": "IP",
"rel": "urn:X-hypercat:rels:hasDescription:en"
},{
"val": address,
"rel": "IP"
}
]
ip_data['href'] = '/device/ip/' + self.device_uuid
conn = httplib.HTTPConnection(self.registry_url, self.registry_port)
#header = {'Content-type': 'text/plain', 'Accept-Language': 'en-US,en;q=0.5'}
conn.request('POST', meta_url + self.device_uuid, json.dumps(meta_data))
conn = httplib.HTTPConnection(self.registry_url, self.registry_port)
conn.request('POST', ip_url + self.device_uuid, json.dumps(ip_data))
|
1616354
|
from django import template
register = template.Library()
@register.filter
def diff(value, arg):
"""subtract arg from value
:param value: origin value
:type value: int
:param arg: value to be subtracted
:type arg: int
:return: result of subtraction
:rtype: int
"""
return value - arg
|
1616357
|
import re
import bs4
import requests
class Basic:
def parse(self, link):
if link['type'] == 'stickers':
return self.parse_stickers(link['link'])
if link['type'] == 'user':
return self.parse_user(link['link'])
if link['type'] == 'bot':
return self.parse_bot(link['link'])
raise NotImplementedError(f'Type {link["type"]} not implemented')
def parse_user(self, link):
url = 'https://t.me/' + link # getting data from link
r = requests.get(url, stream=True)
soup = bs4.BeautifulSoup(r.text, "lxml", )
type_link = str(soup.find_all('a', class_="tgme_action_button_new"))
members_str = str(soup.find_all('div', class_="tgme_page_extra"))
if 'Preview channel' in type_link:
return {
'type': 'channel',
'link': url
}
if 'Preview channel' not in type_link and 'members' in members_str: # check for group
return {
'type': 'group',
'link': url
}
if 'tgme_action_button_new' in type_link and 'member' not in members_str and 'Send Message' in type_link:
return {
'type': 'user',
'link': url
}
return None
def parse_stickers(self, link):
url_stickers = 'https://t.me/addstickers/' + link # getting data from link
r_stickers = requests.get(url_stickers, stream=True)
soup_stickers = bs4.BeautifulSoup(r_stickers.text, "lxml", )
type_link = str(soup_stickers.find_all('div', class_="tgme_page_description")).replace(u'\xa0', ' ').replace(';',
':')
if re.search('Sticker Set', type_link): # check for channel
return None
return {
'type': 'stickers',
'link': url_stickers
}
def parse_bot(self, link):
url_bot = 'https://t.me/' + link
r_bot = requests.get(url_bot, stream=True)
soup_bot = bs4.BeautifulSoup(r_bot.text, "lxml", )
type_link = soup_bot.find_all('div', class_="tgme_page_extra")
if type_link != []:
return {
'type': 'bot',
'link': url_bot
}
return None
class FullInfo:
def parse(self, link):
if link['type'] == 'stickers':
return self.parse_stickers(link['link'])
if link['type'] == 'user':
return self.parse_user(link['link'])
if link['type'] == 'bot':
return self.parse_bot(link['link'])
raise NotImplementedError(f'Type {link["type"]} not implemented')
def parse_user(self, link):
title = None
description = None
members = None
url = 'https://t.me/' + link # getting data from link
s = requests.Session()
r = s.get(url, stream=True)
soup = bs4.BeautifulSoup(r.text, "lxml", )
type_link = str(soup.find_all('a', class_="tgme_action_button_new"))
members_str = str(soup.find_all('div', class_="tgme_page_extra"))
try:
title = str(soup.find('div', class_="tgme_page_title").text)[
1:-1].replace(';', ':')
try:
description = str(
soup.find('div', class_="tgme_page_description").text).replace(';', ':')
except:
pass
except AttributeError:
return None
if re.search('Preview channel', type_link): # check for channel
members_int = re.findall(r'\d+', members_str)
members = ''
members = members.join(members_int)
if members == '':
members = '0'
return {
'type': 'channel',
'link': url,
'title': title,
'description': description,
'members': members
}
if 'Preview channel' not in type_link and 'members' in members_str: # check for group
members_str = members_str.split(',')[0]
members_int = re.findall(r'\d+', members_str)
members = ''
members = members.join(members_int)
if members == '':
members = '0'
return {
'type': 'group',
'link': url,
'title': title,
'description': description,
'members': members
}
if 'tgme_action_button_new' in type_link and 'member' not in members_str and 'Send Message' in type_link:
return {
'type': 'user',
'link': url,
'title': title,
'description': description
}
return None
def parse_stickers(self, link):
url_stickers = 'https://t.me/addstickers/' + link # getting data from link
r_stickers = requests.get(url_stickers, stream=True)
soup_stickers = bs4.BeautifulSoup(r_stickers.text, "lxml", )
type_link = str(soup_stickers.find_all('div', class_="tgme_page_description")).replace(u'\xa0', ' ').replace(';',
':')
if re.search('Sticker Set', type_link):
return None
start_name = [(m.start(0), m.end(0))
for m in re.finditer("<strong>", type_link)][1][1]
end_name = [(m.start(0), m.end(0))
for m in re.finditer("</strong>", type_link)][1][0]
title_stickers = type_link[start_name:end_name]
return {
'type': 'stickers',
'link': url_stickers,
'title': title_stickers
}
def parse_bot(self, link):
url_bot = 'https://t.me/' + link
r_bot = requests.get(url_bot, stream=True)
soup_bot = bs4.BeautifulSoup(r_bot.text, "lxml", )
type_link = soup_bot.find_all('div', class_="tgme_page_extra")
if type_link != []:
title_bot = soup_bot.find('div', class_='tgme_page_title').text
try:
description_bot = soup_bot.find(
'div', class_='tgme_page_description').text
except:
description_bot = None
return {
'type': 'bot',
'link': url_bot,
'title': title_bot,
'description': description_bot
}
return None
|
1616409
|
import json
import os
import shutil
import subprocess
from conftest import edl
def test_write_chapters_to_file(intro_file, tmpdir, monkeypatch):
def check_chapter(cmd):
text = subprocess.check_output(cmd)
chapter_info = json.loads(text)
if chapter_info.get('chapters'):
return True
return False
if os.name == 'nt':
fn = '"%s"' % str(intro_file)
else:
fn = str(intro_file)
# Confirm that no chapters exists in that file.
base_cmd = ['ffprobe', '-i', fn, '-print_format', 'json', '-show_chapters', '-loglevel', 'error']
if os.name == 'nt':
cmd = '%s' % ' '.join(base_cmd)
else:
cmd = base_cmd
assert check_chapter(cmd) is False
f = tmpdir.join('hello.mkv')
shutil.copyfile(intro_file, f)
modified_file = edl.write_chapters_to_file(str(f), input_edl={"intro": [1, 2, 3]})
new_cmd = list(base_cmd)
if os.name == 'nt':
fn_mod = '"%s"' % str(modified_file)
else:
fn_mod = str(modified_file)
new_cmd[2] = fn_mod
if os.name == 'nt':
new_cmd = '%s' % ' '.join(new_cmd)
assert check_chapter(new_cmd)
|
1616411
|
import sys
import argparse
import os
from video_classification.generator.attention_cnn_lstm_classifer import BidirectionalLSTMVideoClassifier
def check_args(args):
if not os.path.exists(args.model_path):
print('Model path {} does not exist, please check.')
exit(1)
if not os.path.exists(args.video_path):
print('Video path {} does not exist, please check.')
exit(1)
if not os.path.exists(args.config_path):
print('Config file {} does not exist, please check.')
exit(1)
return args
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple script for attention integrated CNN + LSTM video classification')
parser.add_argument('cnn_model', help='Specify which CNN model is used (VGG16/VGG19/InceptionV3/Resnet50/Xception')
parser.add_argument('model_path', help='Specify the model path')
parser.add_argument('video_path', help='Specify the input video path')
parser.add_argument('config_path', help='Specify the config file path')
return parser.parse_args(args)
def main(args=None):
if args is None:
args = sys.argv[1:]
args = parse_args(args)
classifier = BidirectionalLSTMVideoClassifier(args.cnn_model, args.model_path)
predicted_label = classifier.predict(args.video_path, args.config_path)
print('{} belongs to {}'.format(args.video_path, predicted_label))
if __name__ == '__main__':
main()
|
1616427
|
from tbparser.events_reader import EventsFileReader, EventReadingError
from tbparser.summary_reader import SummaryReader
from tbparser.version import __version__
__all__ = [
'EventsFileReader',
'EventReadingError',
'SummaryReader',
'__version__',
]
|
1616433
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScLagunaSpider(FecamGazetteSpider):
name = "sc_laguna"
FECAM_QUERY = "cod_entidade:146"
TERRITORY_ID = "4209409"
|
1616453
|
import json
import logging
import os
import re
import requests
import retrying
import shakedown
from dcos import marathon
log = logging.getLogger(__name__)
logging.basicConfig(format='[%(levelname)s] %(message)s', level='INFO')
def get_json(file_name):
""" Retrieves json app definitions for Docker and UCR backends.
"""
with open(file_name) as f:
return json.load(f)
def find_app_port(config, app_name):
""" Finds the port associated with the app in haproxy_getconfig.
This is done through regex pattern matching.
"""
pattern = re.search(r'{0}(.+?)\n bind .+:\d+'.format(app_name), config)
return pattern.group()[-5:]
@retrying.retry(stop_max_delay=10000)
def get_app_port(app_name, ip):
""" Returns the port that the app is configured on.
"""
get_config = requests.get('http://{}:9090/_haproxy_getconfig'.format(ip))
port = find_app_port(get_config.content.decode("utf-8"), app_name)
return port
@retrying.retry(stop_max_delay=10000)
def get_app_content(app_port, ip):
""" Returns the content of the app.
"""
get_port = requests.get('http://{}:{}'.format(ip, app_port))
return (get_port.content.decode("utf-8").rstrip(), get_port.status_code)
def test_backends():
""" Tests Marathon-lb against a number of Docker and UCR backends.
All backends are defined in backends/ & backends_1.9/.
The test retrieves the port to which each app is bound on.
This is done through retrieving the port from _haproxy_getconfig.
Each app is configured to display its id as content if launched healthy.
The test asserts whether the text response matches the expected response.
"""
public_ip = os.environ['PUBLIC_AGENT_IP']
if os.environ['DCOS_VERSION'] == '1.9':
app_defs = [get_json('backends_1.9/' + filename)
for filename in os.listdir('backends_1.9/')]
else:
app_defs = [get_json('backends/' + filename)
for filename in os.listdir('backends/')]
for app_def in app_defs:
app_id = app_def['id']
app_name = app_id[1:] if app_id[0] == '/' else app_id
print(app_name)
log.info('{} is being tested.'.format(app_name))
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
app = client.get_app(app_id)
tasks = app['tasksRunning']
instances = app_def['instances']
assert tasks == instances, ("Number of tasks is {}, {} were expected."
.format(tasks, instances))
log.info('Number of tasks for {} is {}'.format(app_name, tasks))
port = get_app_port(app_name, public_ip)
expected_port = app_def["labels"]["HAPROXY_0_PORT"]
msg = "{} bound to {}, not {}.".format(app_name, port, expected_port)
assert port == expected_port, msg
log.info('{} is bound to port {}.'.format(app_name, port))
text_response, status_code = get_app_content(port, public_ip)
expected_response = app_name
msg = "Response is {}, not {}".format(text_response, expected_response)
if status_code == 200:
assert text_response == expected_response, msg
log.info('Text response is {}.'.format(text_response))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.