id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11558052
|
import pytest
import numpy as np
from astropy.time import Time
from ..astropy import _checkTime
def test__checkTime():
# Create an array of epochs
times = np.linspace(59580, 59590, 100)
# Test that an error is raised when times are not
# an astropy time object
with pytest.raises(TypeError):
_checkTime(times, "test")
# Test that _checkTime passes when an astropy time object is
# given as intended
times_astropy = Time(times, format="mjd", scale="utc")
return
|
11558074
|
import os
import time
import glob
import pytest
import logging
import nbformat
import subprocess
from nbconvert.preprocessors import ExecutePreprocessor
logging.basicConfig(level=logging.INFO)
EXECUTE_NOTEBOOKS = []
AVOID_NOTEBOOKS = [
'docs/examples/advanced_use_cases/building_a_dashboard.ipynb',
'docs/examples/advanced_use_cases/combining_two_datasets.ipynb',
'docs/examples/advanced_use_cases/revenue_prediction.ipynb',
'docs/examples/advanced_use_cases/territory_management_1layer.ipynb',
'docs/examples/advanced_use_cases/territory_management_2layers.ipynb',
'docs/examples/data_management/change_carto_table_privacy.ipynb',
'docs/examples/data_observatory/do_access_premium_data.ipynb',
'docs/examples/data_observatory/do_data_enrichment.ipynb',
'docs/examples/data_observatory/do_dataset_notebook_template.ipynb',
'docs/examples/data_observatory/do_geography_notebook_template.ipynb',
'docs/examples/data_visualization/publish_and_share/publish_visualization_gdf.ipynb',
'docs/examples/data_visualization/publish_and_share/publish_visualization_layout.ipynb',
'docs/examples/data_visualization/publish_and_share/publish_visualization_private_table.ipynb',
'docs/examples/data_visualization/publish_and_share/publish_visualization_public_table.ipynb',
]
OVERWRITE = os.environ.get('OVERWRITE', 'true').lower() == 'true'
TIMEOUT = int(os.environ.get('TIMEOUT', 600))
KERNEL = os.environ.get('KERNEL', 'python3').lower()
SCOPE = os.environ.get('SCOPE', 'all').lower()
with open('tests/notebooks/creds.json', 'r') as creds_file:
CREDS_FILE = creds_file.read()
def find_notebooks():
notebooks = []
if EXECUTE_NOTEBOOKS:
notebooks = list(set(EXECUTE_NOTEBOOKS) - set(AVOID_NOTEBOOKS))
else:
if SCOPE in ['all', 'guides']:
notebooks += glob.glob('docs/guides/**/*.ipynb', recursive=True)
if SCOPE in ['all', 'examples']:
notebooks += glob.glob('docs/examples/**/*.ipynb', recursive=True)
notebooks = list(set(notebooks) - set(AVOID_NOTEBOOKS))
notebooks.sort()
return notebooks
class TestNotebooks:
def teardown(self):
time.sleep(0.1)
def custom_setup(self, path):
with open('{}/creds.json'.format(path), 'w') as creds_file:
creds_file.write(CREDS_FILE)
def custom_teardown(self, path):
os.remove('{}/creds.json'.format(path))
@pytest.mark.parametrize('notebook_filename', find_notebooks())
def test_docs(self, notebook_filename):
try:
path = os.path.dirname(notebook_filename)
self.custom_setup(path)
self.execute_notebook(notebook_filename, path)
finally:
self.custom_teardown(path)
def execute_notebook(self, notebook_filename, path):
with open(notebook_filename) as f:
logging.info('\nExecuting notebook: %s', notebook_filename)
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=TIMEOUT, kernel_name=KERNEL, allow_errors=OVERWRITE,
store_widget_state=OVERWRITE, record_timing=False)
ep.preprocess(nb, {'metadata': {'path': path}})
if OVERWRITE:
logging.info('Overwriting notebook: %s', notebook_filename)
with open(notebook_filename, 'w') as fwrite:
nbformat.write(nb, fwrite)
logging.info('Trusting notebook: %s', notebook_filename)
p_jupyter = subprocess.Popen('jupyter trust {}'.format(notebook_filename), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr_jupyter = p_jupyter.communicate()
if len(stderr_jupyter) > 0:
raise RuntimeError('Error trusting the notebook ({}): {}'.format(notebook_filename, stderr_jupyter))
|
11558077
|
from typing import *
from enum import Enum
class LanguageId(Enum):
Unknown = -1
Vernac = 1
Gallina = 2
Ltac = 3
Comment = 4
# code-mixed lids are only assigned on sentences but not tokens
LtacMixedWithGallina = 11
VernacMixedWithGallina = 12
def debug_repr(self) -> str:
return self.__repr__()
def __repr__(self):
return {
LanguageId.Unknown: "UNK",
LanguageId.Vernac: "V",
LanguageId.Gallina: "G",
LanguageId.Ltac: "L",
LanguageId.Comment: "C",
LanguageId.LtacMixedWithGallina: "LG",
LanguageId.VernacMixedWithGallina: "VG",
}[self]
def __str__(self):
return self.__repr__()
@property
def base_lid(self) -> "LanguageId":
"""
:return the base lid of a code-mixed lid; if self is not a code-mixed lid, return self.
"""
if self == LanguageId.LtacMixedWithGallina:
return LanguageId.Ltac
elif self == LanguageId.VernacMixedWithGallina:
return LanguageId.Vernac
else:
return self
# end if
|
11558082
|
import pathlib
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass
from os import path
from jinja2 import Environment, PackageLoader
jinja = Environment(loader=PackageLoader("splashgen"), autoescape=False)
_assigned_component = None
class Component(ABC):
jinja = jinja
build_dir = "build"
@abstractmethod
def render(self) -> str:
pass
def __str__(self) -> str:
return self.render()
def write_asset_to_build(self, src: str) -> str:
self._mkbuild()
dest = shutil.copy(src, path.join(self.build_dir, path.basename(src)))
uri = f"{path.basename(dest)}"
return uri
def into_template(self, template: str, extras: dict = None):
tmpl = self.jinja.get_template(template)
data = self.__dict__
if extras is None:
extras = {}
context = {**data, **extras}
return tmpl.render(**context)
def _mkbuild(self):
pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True)
@dataclass
class MetaTags:
title: str
description: str
image: str
canonical_url: str
def launch(root: Component) -> None:
global _assigned_component
_assigned_component = root
|
11558116
|
from scrapy.http import Request, TextResponse
def mock_response(file_name=None, url=None):
"""
Create a fake Scrapy HTTP response
file_name can be a relative file path or the desired contents of the mock
"""
if not url:
url = 'http://www.ubc.ca'
request = Request(url=url)
if file_name:
try:
file_path = "sleuth_crawler/tests" + file_name
file_content = open(file_path, 'r').read()
except OSError:
# Allow mocker to directly input desired mock content
file_content = file_name
else:
file_content = ""
return TextResponse(
url=url,
request=request,
body=file_content,
encoding='utf-8'
)
|
11558181
|
from __future__ import print_function
import os, time
from weeutil.weeutil import startOfInterval
interval = 195
os.environ['TZ'] = 'America/Los_Angeles'
time.tzset()
start_ts = time.mktime(time.strptime("2013-07-04 01:57:35", "%Y-%m-%d %H:%M:%S"))
print(start_ts)
rev = int(start_ts/interval)
start_ts = rev * interval
print(start_ts)
print(time.ctime(start_ts))
# begin = startOfInterval(start_ts, interval)
# print(time.ctime(begin), time.ctime(begin+interval))
alt = int(start_ts / interval)* interval
print(time.ctime(alt), time.ctime(alt+interval))
|
11558183
|
import torch.nn as nn
import torchvision.models as models
from .helper import init, make_standard_block
class VGG(nn.Module):
def __init__(self, use_bn=True): # Original implementation doesn't use BN
super(VGG, self).__init__()
if use_bn:
vgg = models.vgg19(pretrained=True)
layers_to_use = list(list(vgg.children())[0].children())[:23]
else:
vgg = models.vgg19_bn(pretrained=True)
layers_to_use = list(list(vgg.children())[0].children())[:33]
self.vgg = nn.Sequential(*layers_to_use)
self.feature_extractor = nn.Sequential(make_standard_block(512, 256, 3),
make_standard_block(256, 128, 3))
init(self.feature_extractor)
def forward(self, x):
x = self.vgg(x)
x = self.feature_extractor(x)
return x
|
11558218
|
import time
import os
import math
import argparse
from glob import glob
from collections import OrderedDict
import random
import warnings
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import pandas as pd
import joblib
import cv2
import yaml
from lib.utils.vis import visualize
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default=None)
parser.add_argument('--uncropped', action='store_true')
parser.add_argument('--write', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
df = pd.read_csv('outputs/submissions/test/%s.csv' %args.name).fillna('')
img_ids = df['ImageId'].values
img_paths = np.array('inputs/test_images/' + df['ImageId'].values + '.jpg')
if args.uncropped:
cropped_img_ids = pd.read_csv('inputs/testset_cropped_imageids.csv')['ImageId'].values
for i, img_id in enumerate(img_ids):
if img_id in cropped_img_ids:
img_paths[i] = 'inputs/test_images_uncropped/' + img_id + '.jpg'
os.makedirs(os.path.join('tmp', args.name), exist_ok=True)
for i in tqdm(range(len(df))):
dets = np.array(df.loc[i, 'PredictionString'].split()).reshape([-1, 7]).astype('float')
img = cv2.imread(img_paths[i])
img_pred = visualize(img, dets)
if not args.write:
plt.imshow(img_pred[..., ::-1])
plt.show()
else:
cv2.imwrite(os.path.join('tmp', args.name, os.path.basename(img_paths[i])), img_pred)
if __name__ == '__main__':
main()
|
11558219
|
from builtins import object
class MockUtils(object):
class Placeholder(object):
def __init__(self):
self.args = None
self.kwargs = None
self.times = 0
@staticmethod
def raise_(exception):
raise exception
@staticmethod
def called_with(target, method, monkeypatch):
def method_mock(*args, **kwargs):
called_with.args = args
called_with.kwargs = kwargs
called_with = MockUtils.Placeholder()
monkeypatch.setattr(target, method, method_mock)
return called_with
@staticmethod
def count_calls(target, method, monkeypatch):
def method_mock(*args):
called.times += 1
called = MockUtils.Placeholder()
monkeypatch.setattr(target, method, method_mock)
return called
|
11558237
|
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from meddlr.ops.categorical import categorical_to_one_hot, logits_to_prob, one_hot_to_categorical
@pytest.mark.parametrize("use_numpy", [False, True])
def test_categorical_to_one_hot(use_numpy):
labels = torch.tensor([3, 0, 1, 0, 2, 0, 0, 3])
expected = torch.tensor(
[
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
]
)
if use_numpy:
labels = labels.numpy()
expected = expected.numpy()
all_func = np.all
else:
all_func = torch.all
dtype = torch.int64
out = categorical_to_one_hot(labels, channel_dim=-1, background=None)
assert all_func(out == expected)
out = categorical_to_one_hot(labels, channel_dim=-1, background=0)
assert all_func(out == expected[:, 1:])
out = categorical_to_one_hot(labels, channel_dim=-1, background=None, num_categories=3)
assert all_func(out == expected)
if not use_numpy:
out = categorical_to_one_hot(labels.T, channel_dim=0, background=0, dtype=dtype)
assert out.dtype == dtype
assert all_func(out.T == expected[:, 1:])
@pytest.mark.parametrize("use_numpy", [False, True])
def test_one_hot_to_categorical(use_numpy):
one_hot = torch.tensor(
[
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
]
)
expected = torch.tensor([3, 0, 1, 0, 2, 0, 0, 3])
if use_numpy:
one_hot = one_hot.numpy()
expected = expected.numpy()
all_func = np.all
else:
all_func = torch.all
out = one_hot_to_categorical(one_hot, channel_dim=-1, background=None)
assert all_func(out == expected + 1)
out = one_hot_to_categorical(one_hot, channel_dim=-1, background=0)
assert all_func(out == expected)
@pytest.mark.parametrize("use_numpy", [False, True])
def test_logits_to_prob(use_numpy):
logits = torch.randn(10, 4)
sigmoid = torch.sigmoid(logits)
softmax = F.softmax(logits, dim=1)
if use_numpy:
logits = logits.numpy()
sigmoid = sigmoid.numpy()
softmax = softmax.numpy()
all_func = np.all
else:
all_func = torch.all
assert all_func(logits_to_prob(logits, "sigmoid") == sigmoid)
assert all_func(logits_to_prob(logits, "softmax") == softmax)
with pytest.raises(ValueError):
logits_to_prob(logits, "invalid")
|
11558244
|
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from skimage.transform import rotate, AffineTransform, warp
from skimage.util import random_noise
from tensorflow.keras.utils import Sequence
from augmentation import Automold as am
from augmentation import Helpers as hp
# The dataset is sampled at a high frame rate that causes dataset to be redundant. Use `rate` to specify good sampling rate.
# Too similar images are not good, nor too dissimilar. Set rate accordingly
fig = plt.figure(figsize=(17, 17))
columns = 4
rows = 8
start = 24520
rate = 4
HEIGHT = 160
WIDTH = 320
for j, i in enumerate(range(start, start + rows * columns * rate, rate)):
img = cv2.imread(f'all/{i}.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fig.add_subplot(rows, columns, j + 1)
def load_data(rate=4, action_file='actions.csv', angle_file='angles.csv', actions=None, val_size=0.2, test_size=0.1,
scale=False, shuffle=True):
df_angles = pd.read_csv(angle_file)
df_angles['angle'] = df_angles['angle'].astype('float')
if actions:
df_actions = pd.read_csv(action_file)
df_actions['action'] = df_actions['action'].astype('str')
df = df_angles.merge(df_actions, on='filename', how='inner')
else:
df = df_angles
df = df[::rate]
scaler = StandardScaler()
if scale:
df[['angle']] = scaler.fit_transform(df[['angle']])
if actions:
if not (len(actions) == 1 and actions[0] == 'all'):
df = df.loc[df['action'].isin(actions)]
train_df, val_df = train_test_split(df, test_size=(val_size + test_size), random_state=42, shuffle=shuffle)
val_df, test_df = train_test_split(val_df, test_size=test_size, random_state=42, shuffle=shuffle)
df.reset_index(inplace=True)
train_df.reset_index(inplace=True)
val_df.reset_index(inplace=True)
test_df.reset_index(inplace=True)
return df, train_df, val_df, test_df, scaler
def load_data_3D_CNN(rate=3):
df_angles = pd.read_csv('angles.csv')
df_angles['angle'] = df_angles['angle'].astype('float')
df_actions = pd.read_csv('actions.csv')
df_actions['action'] = df_actions['action'].astype('category')
df = df_angles.merge(df_actions, on='filename', how='inner')
df = df[::rate]
total = len(df)
train_size = int(total * 0.8)
test_size = val_size = int(total * 0.1)
train_df = df[:train_size]
val_df = df[train_size: train_size + val_size]
test_df = df[train_size + val_size: train_size + val_size + test_size]
if len(df) != len(df_actions) != len(df_angles):
print('WARNING: length of columns soed not match !')
df.reset_index(inplace=True)
train_df.reset_index(inplace=True)
val_df.reset_index(inplace=True)
test_df.reset_index(inplace=True)
return df, train_df, val_df, test_df
def anticlockwise_rotation(image):
angle = random.randint(0, 15)
return rotate(image, angle)
def clockwise_rotation(image):
angle = random.randint(0, 15)
return rotate(image, -angle)
def add_noise(image):
return random_noise(image)
def shift_left(image):
x_shift = random.randint(0, 30)
y_shift = random.randint(0, 30)
transform = AffineTransform(translation=(x_shift, y_shift))
shifted = warp(image, transform, preserve_range=True)
return shifted
def shift_right(image):
x_shift = random.randint(0, 30)
y_shift = random.randint(0, 30)
transform = AffineTransform(translation=(-x_shift, -y_shift))
shifted = warp(image, transform, preserve_range=True)
return shifted
def augment(image, functions=[anticlockwise_rotation, clockwise_rotation, shift_left, shift_right, add_noise]):
function = random.choice(functions)
aug_img = function(image)
return aug_img
aug_img = augment(img / 255)
class DataGenerator2D(Sequence):
"""Generates data for Keras
Sequence based data generator. Suitable for building data generator for training and prediction.
"""
def __init__(self, img_paths, angles, actions, base_path, augmentation_rate=0.4,
to_fit=True, return_actions=False, batch_size=32, dim=(320, 160), shuffle=True, scale_image=True,
lower_augmentation_angle=-999, upper_augmentation_angle=999):
self.img_paths = img_paths.copy()
if actions:
self.actions = actions.copy()
self.angles = angles.copy()
self.base_path = base_path
self.to_fit = to_fit
self.batch_size = batch_size
self.dim = dim
self.shuffle = shuffle
self.on_epoch_end()
self.return_actions = return_actions
self.augmentation_rate = augmentation_rate
self.scale_image = scale_image
self.upper_augmentation_angle = upper_augmentation_angle
self.lower_augmentation_angle = lower_augmentation_angle
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return int(len(self.img_paths) // self.batch_size)
def __getitem__(self, index):
"""Generate one batch of data
:param index: index of the batch
:return: X and y when fitting. X only when predicting
"""
# Generate indexes of the batch
current_indexes = list(range(index * self.batch_size, (index + 1) * self.batch_size))
img_paths_temp = self.img_paths[current_indexes]
# Generate data
X, flipped_indexes, augmented_indexes = self._generate_X(img_paths_temp)
if self.to_fit:
y = self._generate_y(current_indexes, flipped_indexes, augmented_indexes)
return X, y
else:
return X
def on_epoch_end(self):
"""Updates indexes after each epoch
"""
if self.shuffle == True:
indices = np.arange(len(self.img_paths))
np.random.shuffle(indices)
self.img_paths, self.angles = self.img_paths[indices], self.angles[indices]
self.img_paths.reset_index(drop=True, inplace=True)
self.angles.reset_index(drop=True, inplace=True)
def _generate_X(self, img_paths_temp):
"""Generates data containing batch_size images
:param img_paths_temp: list of label ids to load
:return: batch of images
"""
# Initialization
X = []
augmented_indexes = []
flipped_indexes = []
# Generate data
for idx, path in zip(img_paths_temp.index, img_paths_temp):
# Store sample
img, is_flipped, is_augmented = self._load_image(path, self.angles[idx])
if is_flipped:
flipped_indexes.append(idx)
if is_augmented:
augmented_indexes.append(idx)
if self.return_actions:
X.append(np.array([img, self.actions[idx]]))
else:
X.append(img)
return np.array(X), flipped_indexes, augmented_indexes
def _generate_y(self, current_indexes, flipped_indexes, augmented_indexes):
#rn: batch if masks
y = self.angles.iloc[current_indexes].copy()
for idx in flipped_indexes:
y[idx] *= -1
return y.values
def _load_image(self, image_path, angle):
is_augmented = False
is_flipped = False
img = cv2.imread(self.base_path + '/' + image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Image augmentation using automould
if (np.random.random() < self.augmentation_rate):
img = cv2.resize(img, (1280, 720))
img = am.augment_random(img, volume='same',
aug_types=["add_shadow", "add_snow", "add_rain", "add_fog", "add_gravel",
"add_sun_flare", "add_speed"])
if (np.random.random() < (self.augmentation_rate + 0.1)) and (
(angle >= self.lower_augmentation_angle) & (angle <= self.upper_augmentation_angle)):
is_flipped = True
img = np.flip(img, 1)
img = cv2.resize(img, self.dim)
if (np.random.random() < (self.augmentation_rate + 0.1)) and (
(angle >= self.lower_augmentation_angle) & (angle <= self.upper_augmentation_angle)):
is_augmented = True
img = augment(img)
if self.scale_image:
img = img / 255.0
return (img, is_flipped, is_augmented)
class DataGeneratorModified2D(Sequence):
"""Generates data for Keras
Sequence based data generator. Suitable for building data generator for training and prediction.
"""
def __init__(self, img_paths, angles, actions, base_path, augmentation_rate=0.4,
to_fit=True, return_actions=False, depth=8, dim=(303, 170), shuffle=True, overlap=5):
self.img_paths = img_paths.copy()
self.actions = actions.copy()
self.angles = angles.copy()
self.base_path = base_path
self.to_fit = to_fit
self.depth = depth
self.dim = dim
self.shuffle = shuffle
self.on_epoch_end()
self.return_actions = return_actions
self.augmentation_rate = augmentation_rate
self.overlap = overlap
self.delta = self.depth - self.overlap
self.sequences = []
temp_indexes = list(range(len(self.img_paths)))
for i in range(0, len(self.img_paths) - self.depth, self.depth - self.overlap):
self.sequences.append(temp_indexes[i:i + depth])
self.sequences = np.array(self.sequences)
np.random.shuffle(self.sequences)
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return int(np.ceil((len(self.img_paths) - self.depth) / (self.depth - self.overlap)))
def __getitem__(self, index):
"""Generate one batch of data
:param index: index of the batch
:return: X and y when fitting. X only when predicting
"""
# Generate indexes of the batch
current_indexes = self.sequences[index]
img_paths_temp = self.img_paths[current_indexes]
# Generate data
X, flipped_indexes, augmented_indexes = self._generate_X(img_paths_temp)
if self.to_fit:
y = self._generate_y(current_indexes, flipped_indexes, augmented_indexes)
return X, y
else:
return X
def on_epoch_end(self):
"""Updates indexes after each epoch
"""
if self.shuffle == True:
np.random.shuffle(self.img_paths)
def _generate_X(self, img_paths_temp):
"""Generates data containing batch_size images
:param img_paths_temp: list of label ids to load
:return: batch of images
"""
# Initialization
X = []
augmented_indexes = []
flipped_indexes = []
# Generate data
for idx, path in zip(img_paths_temp.index, img_paths_temp):
# Store sample
img, is_flipped, is_augmented = self._load_image(path, self.angles[idx])
if is_flipped:
flipped_indexes.append(idx)
if is_augmented:
augmented_indexes.append(idx)
if self.return_actions:
X.append(np.array([img, self.actions[idx]]))
else:
X.append(img)
return np.array(X), flipped_indexes, augmented_indexes
def _generate_y(self, current_indexes, flipped_indexes, augmented_indexes):
y = self.angles.iloc[current_indexes].copy()
for idx in flipped_indexes:
y[idx] *= -1
return y
def _load_image(self, image_path, angle):
is_augmented = False
is_flipped = False
img = cv2.imread(self.base_path + '/' + image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (1280, 720))
if np.random.random() < self.augmentation_rate:
is_augmented = True
img = am.augment_random(img, volume='same',
aug_types=["add_shadow", "add_snow", "add_rain", "add_fog", "add_gravel",
"add_sun_flare", "add_speed"])
if np.random.random() < 0 and int(angle) != 0:
is_flipped = True
img = np.flip(img, 1)
img = cv2.resize(img, self.dim)
img = img / 255
return (img, is_flipped, is_augmented)
class DataGenerator3D(Sequence):
def __init__(self, img_paths, angles, actions, base_path, augmentation_rate=0,
to_fit=True, return_actions=False, batch_size=32, depth=32,
dim=(303, 170), shuffle=False, return_all_ys=False, overlap=4):
self.generator2D = DataGeneratorModified2D(img_paths=img_paths, angles=angles, actions=actions,
base_path=base_path, augmentation_rate=augmentation_rate,
to_fit=to_fit, return_actions=return_actions,
dim=dim, shuffle=shuffle, overlap=overlap, depth=depth)
self.to_fit = to_fit
self.depth = depth
self.batch_size = batch_size
self.return_all_ys = return_all_ys
self.overlap = overlap
self.delta = self.depth - self.overlap
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return self.generator2D.__len__() // self.batch_size
def __getitem__(self, index):
current_indexes = list(range(index * self.batch_size, (index + 1) * self.batch_size))
X = []
y = []
for i in current_indexes:
if self.to_fit:
X_temp, y_temp = self.generator2D.__getitem__(i)
X.append(X_temp)
if self.return_all_ys:
y.append(y_temp)
else:
y.append(y_temp.iloc[-1])
else:
X_temp = self.generator2D.__getitem__(i)
if self.to_fit:
return np.array(X), np.array(y)
else:
return np.array(X)
|
11558292
|
import re
from config.Config import Config
from engine.component.TemplateModuleComponent import TemplateModuleComponent
from enums.Language import Language
class DefineComponent(TemplateModuleComponent):
def __init__(self, code=None, language=Language.CPP):
placeholder = Config().get("PLACEHOLDERS", "DEFINE")
super().__init__(code, placeholder)
self.__code = code
self.language = language
self.prefix = ""
self.suffix = ""
@property
def code(self):
if self.language == Language.CSHARP:
return f""
elif self.language == Language.CPP:
if not self.__code.find("#define") > -1:
msg = self.prefix + "\n".join([f"#define {c.strip()}" for c in self.__code.split("\n") if len(c.strip()) > 0]) + self.suffix
else:
msg = f"{self.prefix}{self.__code}{self.suffix}"
return msg
elif self.language == Language.POWERSHELL:
return f""
else:
return self.__code
def wrap_if_ndef(self):
self.prefix = "#ifndef\n"
self.suffix = "#endif\n"
|
11558297
|
import pytest
from mach import exceptions, templates
@pytest.mark.parametrize(
"url, zone",
(
("https://api.labd.io", "labd.io"),
("https://api.test.mach-examples.net", "test.mach-examples.net"),
("api.test.mach-examples.net", "test.mach-examples.net"),
("http://api.test.mach-examples.net", "test.mach-examples.net"),
),
)
def test_zone_name_filter(url, zone):
assert templates.zone_name(url) == zone
def test_render_tfvalue():
assert templates.render_tfvalue(True) == "true"
assert templates.render_tfvalue(False) == "false"
assert templates.render_tfvalue(12) == 12
assert (
templates.render_tfvalue(["value1", False, "value2"])
== """["value1",false,"value2"]"""
)
assert (
templates.render_tfvalue(r"${component.infra.db_password}")
== "module.infra.db_password"
)
def test_render_config_variable():
assert (
templates.parse_config_variable(r"${component.infra.db_password}")
== "module.infra.db_password"
)
assert templates.parse_config_variable(r"${component.infra.db_password") is None
with pytest.raises(exceptions.MachError):
assert templates.parse_config_variable(r"${component.infra}")
|
11558328
|
from flask import Flask, render_template, redirect, url_for, request
from werkzeug.utils import secure_filename
import os
from aqg.app1 import Application
from aqg.utils.summarizer import TextSummarizer
from aqg.utils.pdfgenration import pdfgeneration
from aqg.utils.mail_agent import mail_agent as ma
from aqg.utils.pdfgenration import pdfgeneration as pdf
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template("welcome.html")
@app.route('/index.html')
def hello_world1():
return render_template("index.html")
@app.route('/procedure.html' )
def hello_world2():
return render_template("procedure.html")
@app.route('/question.html',methods = ['POST', 'GET'])
def hello_world3():
Name = request.form['Name']
Number = request.form['Number']
Email = request.form['Email']
outputformat = request.form['outputformat']
optionsRadios = request.form['optionsRadios']
if(optionsRadios == "text"):
Text1 = request.form['Text1']
file_object = open("InputText.txt",'w')
file_object.write(Text1)
file_object.close()
apps = Application()
question_ans_dataframe = apps.ques_application("InputText.txt",outputformat,Email)
pdf2 = pdf()
pdf2.generate_pdf_quesans(question_ans_dataframe)
mail_age = ma()
mail_age.mail_pdf(Email)
return render_template("question.html", qad = question_ans_dataframe)
elif(optionsRadios == "file"):
File1 = request.files['File1']
File1.save(secure_filename(File1.filename))
apps = Application()
question_ans_dataframe = apps.ques_application(File1.filename,outputformat,Email)
pdf2 = pdf()
pdf2.generate_pdf_quesans(question_ans_dataframe)
mail_age = ma()
mail_age.mail_pdf(Email)
return render_template("question.html", qad = question_ans_dataframe)
elif(optionsRadios == "link"):
Link1 = request.form['Link1']
t = TextSummarizer(25)
t.summarize_from_url(Link1)
apps = Application()
question_ans_dataframe = apps.ques_application("summarizer_output2.txt",outputformat,Email)
pdf2 = pdf()
pdf2.generate_pdf_quesans(question_ans_dataframe)
mail_age = ma()
mail_age.mail_pdf(Email)
return render_template("question.html", qad = question_ans_dataframe)
return render_template("question.html")
@app.route('/summarization.html')
def hello_world4():
return render_template("summarization.html")
@app.route('/summarized.html',methods = ['POST', 'GET'])
def hello_world5():
Name = request.form['Name']
Number = request.form['Number']
Email = request.form['Email']
Nolines = request.form['Nolines']
optionsRadios = request.form['optionsRadios']
t = TextSummarizer(Nolines)
if(optionsRadios == "text"):
Text1 = request.form['Text1']
t.summarize_from_text(Text1)
pdf = pdfgeneration()
pdf.generate_pdf_summarizer("summarizer_output2.txt","summarized.pdf")
mail_age = ma()
mail_age.mail_pdf(Email,"summarized.pdf",1)
f = open("summarizer_output.txt")
summarized_text = f.read()
return render_template("summarized.html", summarized_text = summarized_text)
elif(optionsRadios == "file"):
File1 = request.files['File1']
File1.save(secure_filename(File1.filename))
t.summarize_from_file(File1.filename)
pdf = pdfgeneration()
pdf.generate_pdf_summarizer("summarizer_output2.txt","summarized.pdf")
mail_age = ma()
mail_age.mail_pdf(Email,"summarized.pdf",1)
f = open("summarizer_output.txt")
summarized_text = f.read()
return render_template("summarized.html", summarized_text = summarized_text)
elif(optionsRadios == "link"):
Link1 = request.form['Link1']
t.summarize_from_url(Link1)
pdf = pdfgeneration()
pdf.generate_pdf_summarizer("summarizer_output2.txt","summarized.pdf")
mail_age = ma()
mail_age.mail_pdf(Email,"summarized.pdf",1)
f = open("summarizer_output.txt")
summarized_text = f.read()
return render_template("summarized.html", summarized_text = summarized_text)
return render_template("summarization.html")
if __name__ == '__main__':
app.run()
|
11558348
|
import itertools
from string import Template
import numpy as np
from PuzzleLib.Compiler.Codegen.Types import half_t, float_t
from PuzzleLib.Cuda.Utils import roundUpDiv
transformTmpl = Template("""
extern "C"
__global__ void transform2d$ext($T * __restrict__ dst, const $T * __restrict__ src, int dstOffset, int srcOffset,
int dstStride0, int dstStride1, int srcStride0, int len0, int len1)
{
for (int i = threadIdx.x + $NT * blockIdx.x; i < len0 * len1; i += $NT * blockDim.x)
{
int i0 = i / len1, i1 = i % len1;
dst[dstOffset + i0 * dstStride0 + i1 * dstStride1] = src[srcOffset + i0 * srcStride0 + i1];
}
}
extern "C"
__global__ void transform3d$ext($T * __restrict__ dst, const $T * __restrict__ src, int dstOffset, int srcOffset,
int dstStride0, int dstStride1, int dstStride2, int srcStride0, int srcStride1,
int len0, int len1, int len2)
{
for (int i = threadIdx.x + $NT * blockIdx.x; i < len0 * len1 * len2; i += $NT * blockDim.x)
{
int i0 = i / (len1 * len2), i1 = (i / len2) % len1, i2 = i % len2;
int outoffset = dstOffset + i0 * dstStride0 + i1 * dstStride1 + i2 * dstStride2;
int inoffset = srcOffset + i0 * srcStride0 + i1 * srcStride1 + i2;
dst[outoffset] = src[inoffset];
}
}
extern "C"
__global__ void transform4d$ext($T * __restrict__ dst, const $T * __restrict__ src, int dstOffset, int srcOffset,
int dstStride0, int dstStride1, int dstStride2, int dstStride3,
int srcStride0, int srcStride1, int srcStride2, int len0, int len1, int len2, int len3)
{
for (int i = threadIdx.x + $NT * blockIdx.x; i < len0 * len1 * len2 * len3; i += $NT * blockDim.x)
{
int i0 = i / (len1 * len2 * len3), i1 = (i / (len2 * len3)) % len1;
int i2 = (i / len3) % len2, i3 = i % len3;
int outoffset = dstOffset + i0 * dstStride0 + i1 * dstStride1 + i2 * dstStride2 + i3 * dstStride3;
int inoffset = srcOffset + i0 * srcStride0 + i1 * srcStride1 + i2 * srcStride2 + i3;
dst[outoffset] = src[inoffset];
}
}
extern "C"
__global__ void transform5d$ext($T * __restrict__ dst, const $T * __restrict__ src, int dstOffset, int srcOffset,
int dstStride0, int dstStride1, int dstStride2, int dstStride3, int dstStride4,
int srcStride0, int srcStride1, int srcStride2, int srcStride3,
int len0, int len1, int len2, int len3, int len4)
{
for (int i = threadIdx.x + $NT * blockIdx.x; i < len0 * len1 * len2 * len3 * len4; i += $NT * blockDim.x)
{
int i0 = i / (len1 * len2 * len3 * len4), i1 = (i / (len2 * len3 * len4)) % len1;
int i2 = (i / (len3 * len4)) % len2, i3 = (i / len4) % len3, i4 = i % len4;
int offs = dstOffset + i0 * dstStride0 + i1 * dstStride1 + i2 * dstStride2 + i3 * dstStride3 + i4 * dstStride4;
int inoffset = srcOffset + i0 * srcStride0 + i1 * srcStride1 + i2 * srcStride2 + i3 * srcStride3 + i4;
dst[offs] = src[inoffset];
}
}
""")
class MemoryModule:
def __init__(self, backend):
self.backend = backend
self.GPUArray, self.NT = backend.GPUArray, backend.nthreads
self.mod = backend.SourceModule("#include <cuda_fp16.h>\n\n%s%s" % (
transformTmpl.substitute(NT=self.NT, T=half_t, ext="FP16"),
transformTmpl.substitute(NT=self.NT, T=float_t, ext="")
))
def transform(self, tensor, shape, strides, out, inoffset=0, outoffset=0):
assert tensor.dtype == np.float32 or tensor.dtype == np.float16
assert tensor.ndim <= 5 and tensor.ndim == len(strides) and tensor.ndim == len(shape)
ndim = tensor.ndim
if ndim == 1:
out.set(tensor)
return out
if ndim == 2:
transform = self.mod.transform2d if tensor.dtype == np.float32 else self.mod.transform2dFP16
elif ndim == 3:
transform = self.mod.transform3d if tensor.dtype == np.float32 else self.mod.transform3dFP16
elif ndim == 4:
transform = self.mod.transform4d if tensor.dtype == np.float32 else self.mod.transform4dFP16
elif ndim == 5:
transform = self.mod.transform5d if tensor.dtype == np.float32 else self.mod.transform5dFP16
else:
assert False
transform(
out, tensor, np.int32(outoffset), np.int32(inoffset),
*(np.int32(s // tensor.dtype.itemsize) for s in strides),
*(np.int32(s // tensor.dtype.itemsize) for s in tensor.strides[:-1]),
*(np.int32(dim) for dim in shape),
block=(self.NT, 1, 1), grid=(roundUpDiv(tensor.size, self.NT), 1, 1)
)
return out
def transpose(self, tensor, axes=None, out=None, allocator=None):
assert axes is None or len(axes) == tensor.ndim
axes = tuple(reversed(range(tensor.ndim))) if axes is None else axes
shape = tuple(tensor.dimAt(axis) for axis in axes)
if out is None:
out = self.GPUArray.empty(shape, dtype=tensor.dtype, allocator=allocator)
else:
assert out.shape == shape
outstrides = [0] * len(axes)
for i, axis in enumerate(axes):
outstrides[axis] = out.strideAt(i)
return self.transform(tensor, tensor.shape, outstrides, out)
def moveaxis(self, data, src, dst, out=None, allocator=None):
if src < dst:
axes = tuple(range(src)) + tuple(range(src + 1, dst + 1)) + (src, ) + tuple(range(dst + 1, data.ndim))
else:
axes = tuple(range(dst)) + (src, ) + tuple(range(dst, src)) + tuple(range(src + 1, data.ndim))
return self.transpose(data, axes, out=out, allocator=allocator)
def swapaxes(self, data, axis1, axis2, out=None, allocator=None):
if axis1 == axis2:
axes = tuple(range(data.ndim))
else:
axis1, axis2 = (axis1, axis2) if axis1 < axis2 else (axis2, axis1)
axes = tuple(range(axis1)) + (axis2, ) + tuple(range(axis1 + 1, axis2)) + \
(axis1, ) + tuple(range(axis2 + 1, data.ndim))
return self.transpose(data, axes, out=out, allocator=allocator)
def depthConcat(self, tensors, out=None, allocator=None):
assert all(tn.ndim == 4 and tn.dtype == tensors[0].dtype for tn in tensors)
assert all(tn.dimAt(0) == tensors[0].dimAt(0) for tn in tensors)
h, w, depth = 0, 0, 0
for tn in tensors:
depth += tn.dimAt(1)
h, w = max(h, tn.dimAt(2)), max(w, tn.dimAt(3))
if out is None:
out = self.GPUArray.zeros(
shape=(tensors[0].dimAt(0), depth, h, w), dtype=tensors[0].dtype, allocator=allocator
)
else:
assert out.shape == (tensors[0].dimAt(0), depth, h, w)
stride = 0
for i, tn in enumerate(tensors):
center = (h - tn.dimAt(2)) // 2 * out.strideAt(2) + (w - tn.dimAt(3)) // 2 * out.strideAt(3)
self.transform(tn, tn.shape, out.strides, out=out, outoffset=stride + center // tn.dtype.itemsize)
stride += out.strideAt(1) * tn.dimAt(1) // tn.dtype.itemsize
return out
def depthSplit(self, grad, tensors, allocator=None):
assert all(tn.ndim == 4 and tn.dtype == tensors[0].dtype for tn in tensors)
assert all(tn.dimAt(0) == tensors[0].dimAt(0) for tn in tensors)
ingrads = [self.GPUArray.empty(shape=tn.shape, dtype=tn.dtype, allocator=allocator) for tn in tensors]
stride = 0
for i, gr in enumerate(ingrads):
center = (grad.dimAt(2) - gr.dimAt(2)) // 2 * grad.strideAt(2) + (grad.dimAt(3) - gr.dimAt(3)) // 2 * \
grad.strideAt(3)
self.transform(grad, gr.shape, gr.strides, gr, inoffset=stride + center // gr.dtype.itemsize)
stride += grad.strideAt(1) * gr.dimAt(1) // gr.dtype.itemsize
return ingrads
def unittest():
from PuzzleLib.Cuda import Backend
backendTest(Backend)
def backendTest(Backend):
for deviceIdx in range(Backend.getDeviceCount()):
module = MemoryModule(Backend.getBackend(deviceIdx, initmode=2))
for dtype, _ in module.backend.dtypesSupported():
transposeTest(module.backend, module, dtype)
moveAxisTest(module.backend, module, dtype)
swapAxesTest(module.backend, module, dtype)
depthConcatTest(module.backend, module, dtype)
def transposeTest(bnd, module, dtype):
shapes = [(10, ), (10, 3), (10, 3, 5, 4, 2)]
for shape in shapes:
for axes in itertools.permutations(range(len(shape))):
hostData = np.random.randn(*shape).astype(dtype)
data = bnd.GPUArray.toGpu(hostData)
outdata = module.transpose(data, axes=axes)
hostOutData = np.transpose(hostData, axes=axes)
assert np.allclose(hostOutData, outdata.get())
def moveAxisTest(bnd, module, dtype):
shapes = [(10, ), (10, 3), (10, 3, 5, 4, 2)]
for shape in shapes:
for src, dst in itertools.product(range(len(shape)), range(len(shape))):
hostData = np.random.randn(*shape).astype(dtype)
data = bnd.GPUArray.toGpu(hostData)
outdata = module.moveaxis(data, src=src, dst=dst)
hostOutData = np.moveaxis(hostData, source=src, destination=dst)
assert np.allclose(hostOutData, outdata.get())
def swapAxesTest(bnd, module, dtype):
shapes = [(10, ), (10, 3), (10, 3, 5, 4, 2)]
for shape in shapes:
for axis1, axis2 in itertools.product(range(len(shape)), range(len(shape))):
hostData = np.random.randn(*shape).astype(dtype)
data = bnd.GPUArray.toGpu(hostData)
outdata = module.swapaxes(data, axis1=axis1, axis2=axis2)
hostOutData = np.swapaxes(hostData, axis1=axis1, axis2=axis2)
assert np.allclose(hostOutData, outdata.get())
def depthConcatTest(bnd, module, dtype):
hostData1 = np.random.randn(3, 4, 3, 3).astype(dtype)
hostData2 = np.random.randn(3, 2, 6, 6).astype(dtype)
hostData3 = np.random.randn(3, 5, 4, 4).astype(dtype)
allHostData = [hostData1, hostData2, hostData3]
allData = [bnd.GPUArray.toGpu(data) for data in allHostData]
outdata = module.depthConcat(allData)
depth, h, w = 0, 0, 0
for data in allHostData:
depth += data.shape[1]
h, w = max(h, data.shape[2]), max(w, data.shape[3])
hostOutData = np.zeros(shape=(allHostData[0].shape[0], depth, h, w), dtype=dtype)
hostOutData[:, :4, 1:4, 1:4] = hostData1
hostOutData[:, 4:6, :, :] = hostData2
hostOutData[:, 6:, 1:5, 1:5] = hostData3
assert np.allclose(hostOutData, outdata.get())
hostGrad = np.random.randn(*hostOutData.shape).astype(dtype)
grad = bnd.GPUArray.toGpu(hostGrad)
ingrads = module.depthSplit(grad, allData)
hostInGrads = [
hostGrad[:, :4, 1:4, 1:4],
hostGrad[:, 4:6, :, :],
hostGrad[:, 6:, 1:5, 1:5]
]
assert all(np.allclose(hostInGrad, ingrads[i].get()) for i, hostInGrad in enumerate(hostInGrads))
if __name__ == "__main__":
unittest()
|
11558384
|
class Solution:
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
if size < 3:
return max(nums)
first = float('-inf')
second = float('-inf')
third = float('-inf')
for n in nums:
if first < n:
third = second
second = first
first = n
elif first == n:
continue
elif second < n:
third = second
second = n
elif second == n:
continue
elif third < n:
third = n
return third if third != float('-inf') else first
|
11558387
|
import os
import numpy as np
import pickle
import tensorflow as tf
def vgg_block(outputs, params, name, data_format, num_conv):
for i in range(num_conv):
layer_name = name + "_" + str(i + 1)
w = np.swapaxes(np.swapaxes(np.swapaxes(params[layer_name][0], 0, 3), 1, 2), 0, 1)
b = params[layer_name][1]
outputs = tf.layers.conv2d(
outputs,
filters=w.shape[3],
kernel_size=(w.shape[0], w.shape[1]),
strides=(1, 1),
padding=("SAME"),
data_format=data_format,
kernel_initializer=tf.constant_initializer(w),
bias_initializer=tf.constant_initializer(b),
activation=tf.nn.relu,
name=layer_name)
return outputs
def vgg_pool(outputs, params, name, data_format, pool_sz=2, pool_stride=2):
layer_name = "pool" + name[-1]
outputs = tf.layers.max_pooling2d(
outputs,
pool_size=(pool_sz, pool_sz),
strides=(pool_stride, pool_stride),
padding="SAME",
data_format=data_format,
name=layer_name)
return outputs
def vgg_mod(outputs, params, name, data_format, dilation=1):
w = np.swapaxes(np.swapaxes(np.swapaxes(params[name][0], 0, 3), 1, 2), 0, 1)
b = params[name][1]
outputs = tf.layers.conv2d(
outputs,
filters=w.shape[3],
kernel_size=(w.shape[0], w.shape[1]),
strides=(1, 1),
padding=("SAME"),
data_format=data_format,
dilation_rate=(dilation, dilation),
kernel_initializer=tf.constant_initializer(w),
bias_initializer=tf.constant_initializer(b),
activation=tf.nn.relu,
name=name)
return outputs
def vgg(outputs, params, data_format):
outputs = vgg_block(outputs, params, "conv1", data_format, num_conv=2)
outputs = vgg_pool(outputs, params, "pool1", data_format)
outputs = vgg_block(outputs, params, "conv2", data_format, num_conv=2)
outputs = vgg_pool(outputs, params, "pool2",data_format)
outputs = vgg_block(outputs, params, "conv3", data_format, num_conv=3)
outputs = vgg_pool(outputs, params, "pool3", data_format)
outputs_conv4_3 = vgg_block(outputs, params, "conv4", data_format, num_conv=3)
outputs = vgg_pool(outputs_conv4_3, params, "pool4", data_format)
outputs = vgg_block(outputs, params, "conv5", data_format, num_conv=3)
outputs = vgg_pool(outputs, params, "pool5", data_format, pool_sz=3, pool_stride=1)
outputs = vgg_mod(outputs, params, "fc6", data_format, dilation=6)
outputs_fc7 = vgg_mod(outputs, params, "fc7", data_format)
return [outputs_conv4_3, outputs_fc7]
def net(inputs, data_format, VGG_PARAMS_FILE):
params = pickle.load(open(VGG_PARAMS_FILE, "rb"))
with tf.variable_scope(name_or_scope='VGG',
values=[inputs],
reuse=tf.AUTO_REUSE):
outputs = vgg(inputs, params, data_format)
return outputs
|
11558430
|
import argparse
import logging
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
def check_nans(df):
""" Auxiliary function for data wrangling logs """
n_rows = len(df)
check_df = {'col': [], 'dtype': [], 'nan_prc': []}
for col in df.columns:
check_df['col'].append(col)
check_df['dtype'].append(df[col].dtype)
check_df['nan_prc'].append(df[col].isna().sum()/n_rows)
check_df = pd.DataFrame(check_df)
print("\n")
print(f"dataframe n_rows {n_rows}")
print(check_df)
print("\n")
def one_hot_encoding(df, unique_id):
"""
Creates a new dataframe with one hot encoded variables
Arguments
---------
df: Pandas DataFrame
Original data with categorical or object columns to encode
unique_id: str
String index that identifies each unique observation in df
Returns
-------
one_hot_concat_df: Pandas DataFrame
Processed data with one hot encoded variables.
The column names identify each category and its levels.
i.e. category_[level1], category_[level2] ...
"""
encoder = OneHotEncoder()
columns = list(df.columns)
columns.remove(unique_id)
one_hot_concat_df = pd.DataFrame(df[unique_id].values, columns=[unique_id])
for col in columns:
dummy_columns = [f'{col}_[{x}]' for x in list(df[col].unique())]
dummy_values = encoder.fit_transform(df[col].values.reshape(-1,1)).toarray()
one_hot_df = pd.DataFrame(dummy_values, columns=dummy_columns)
one_hot_concat_df = pd.concat([one_hot_concat_df, one_hot_df], axis=1)
return one_hot_concat_df
def numpy_balance(*arrs):
N = len(arrs)
out = np.transpose(np.meshgrid(*arrs, indexing='ij'),
np.roll(np.arange(N + 1), -1)).reshape(-1, N)
return out
def numpy_ffill(arr):
mask = np.isnan(arr)
idx = np.where(~mask, np.arange(mask.shape[1]), 0)
np.maximum.accumulate(idx, axis=1, out=idx)
out = arr[np.arange(idx.shape[0])[:,None], idx]
return out
def numpy_bfill(arr):
mask = np.isnan(arr)
idx = np.where(~mask, np.arange(mask.shape[1]), mask.shape[1] - 1)
idx = np.minimum.accumulate(idx[:, ::-1], axis=1)[:, ::-1]
out = arr[np.arange(idx.shape[0])[:,None], idx]
return out
def temporal_preprocessing(temporal, unique_id, ds,
zfill_cols=[], ffill_cols=[],
original_date_range=True):
"""
Creates a new panel dataframe with balanced temporal data.
Arguments
---------
temporal: Pandas DataFrame
Original data with temporal observations to process
unique_id: str
String index that identifies each unique trajectory in df
ds: str
String index that identifies dates for each trajectory.
zfill_cols: str list
String list with columns to be filled with zeros after balance.
ffill_cols: str list
String list with columns to be filled with forward fill ('ffill')
after balance.
original_date_range: bool
Boolean with option to filter the trajectories to original date range.
If false completely balanced panel is returned.
Returns
-------
balanced_df: Pandas DataFrame
Preprocessed data balanced and filled nans.
"""
df = temporal.copy()
UIDS = temporal[unique_id].unique()
DATES = temporal[ds].unique()
# Balance panel (fixed UIDS)
balanced_prod = numpy_balance(UIDS, DATES)
balanced_df = pd.DataFrame(balanced_prod, columns=[unique_id, ds])
balanced_df[ds] = balanced_df[ds].astype(DATES.dtype)
df.set_index([unique_id, ds], inplace=True)
balanced_df.set_index([unique_id, ds], inplace=True)
balanced_df = balanced_df.merge(df, how='left',
left_on=[unique_id, ds],
right_index=True).reset_index()
# FFill and ZFill
for col in zfill_cols:
balanced_df[col] = balanced_df[col].fillna(0)
for col in ffill_cols:
col_values = balanced_df[col].astype('float32').values
col_values = col_values.reshape(len(UIDS), len(DATES))
col_values = numpy_ffill(col_values)
#col_values = numpy_bfill(col_values)
balanced_df[col] = col_values.flatten()
#balanced_df[col] = balanced_df[col].fillna(0)
# Match original date interval through filter
if original_date_range:
date_range_df = temporal.groupby(unique_id).agg({ds: ['min', 'max']})
date_range_df = date_range_df.droplevel(1, axis=1).reset_index()
date_range_df.columns = [unique_id, 'min_ds', 'max_ds']
date_range_df.set_index([unique_id], inplace=True)
balanced_df.set_index([unique_id], inplace=True)
balanced_df = balanced_df.merge(date_range_df, how='left',
left_on=[unique_id],
right_index=True).reset_index()
balanced_df['ds_in_range'] = (balanced_df[ds] >= balanced_df['min_ds']) & \
(balanced_df[ds] <= balanced_df['max_ds'])
balanced_df = balanced_df[balanced_df['ds_in_range']]
balanced_df.drop(['ds_in_range', 'min_ds', 'max_ds'], axis=1, inplace=True)
return balanced_df
class TSPreprocess:
"""Preprocess time series."""
def __init__(self, filename: str,
filename_output: str,
kind: str,
unique_id_column: str,
ds_column: str, y_column: str) -> 'TSPreprocess':
self.filename = filename
if filename_output is None:
self.filename_output = f'{kind}-preprocessed.csv'
else:
self.filename_output = filename_output
self.kind = kind
self.unique_id_column = unique_id_column
self.ds_column = ds_column
self.y_column = y_column
self.df: pd.DataFrame
self.df = self._read_file()
def _read_file(self) -> pd.DataFrame:
logger.info('Reading file...')
df = pd.read_csv(f'/opt/ml/processing/input/{self.filename}')
logger.info('File read.')
renamer = {self.unique_id_column: 'unique_id',
self.ds_column: 'ds',
self.y_column: 'y'}
df.rename(columns=renamer, inplace=True)
if self.kind == 'balance':
df['ds'] = pd.to_datetime(df['ds'])
return df
def get_one_hot_encoded(self) -> None:
"""One-hot encodes."""
logger.info('One hot encoding...')
df_ohe = one_hot_encoding(self.df, 'unique_id')
logger.info('OHE finished.')
logger.info('Writing file...')
df_ohe.to_csv(f'/opt/ml/processing/output/{self.filename_output}',
index=False)
logger.info('File written...')
def get_balanced_data(self) -> None:
"""Balance data."""
logger.info('Balancing data...')
balanced_data = temporal_preprocessing(self.df, 'unique_id', 'ds',
['y'])
logger.info('Finished.')
logger.info('Writing file...')
balanced_data.to_csv(f'/opt/ml/processing/output/{self.filename_output}',
index=False)
logger.info('File written...')
def preprocess(self) -> None:
if self.kind == 'onehot':
self.get_one_hot_encoded()
elif self.kind == 'balance':
self.get_balanced_data()
else:
raise ValueError(
'`kind` only accepts "onehot" or "balance"'
' as input'
)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str, required=True)
parser.add_argument('--kind', type=str, required=True,
help='onehot or balance')
parser.add_argument('--filename-output', type=str, default=None)
parser.add_argument('--unique-id-column', type=str, default='unique_id')
parser.add_argument('--ds-column', type=str, default='ds')
parser.add_argument('--y-column', type=str, default='y')
args = parser.parse_args()
tspreprocess = TSPreprocess(**vars(args))
tspreprocess.preprocess()
|
11558445
|
import json
import os
import platform
import sys
TESTING_BOX = os.path.exists('/etc/canvas/testing')
TESTING = 'test' in sys.argv
STAGING = os.path.exists('/etc/canvas/staging')
DRAWQUEST_ADMIN = os.path.exists('/etc/canvas/drawquest_admin')
DRAWQUEST_SEARCH = os.path.exists('/etc/canvas/drawquest_search')
PRODUCTION = (not STAGING and not TESTING and not TESTING_BOX and
(os.path.exists('/etc/canvas') or DRAWQUEST_ADMIN))
AWS_CREDENTIALS_PATH = '/etc/canvas/aws.json'
AWS_ALT_CREDENTIALS_PATH = os.path.expanduser('~/aws.json')
LOCAL_SANDBOX = platform.system() == 'Darwin'
Config = {
# AWS credentials are filled in by puppet based on the role.
'aws': {'access_key': None, 'secret_key': None},
'facebook': {'app_id': '176245562391022', 'secret': '<KEY>'},
'image_bucket': 'canvas_public_ugc',
'image_fs': ('s3', 'canvas_public_ugc') if PRODUCTION else ('local', '/var/canvas/website/ugc'),
'chunk_fs': ('s3', 'canvas-upload-pieces') if PRODUCTION else ('local', '/var/canvas/website/ugc/upload_pieces'),
'script_bucket': 'canvas-public-plugins',
'script_base_url': 'http://canvas-public-plugins.s3-website-us-east-1.amazonaws.com/' if PRODUCTION else 'http://savnac.com:9000/ugc/local_script_drop/',
'compress_bucket': 'canvas-dynamic-assets',
'default_following_groups': ['funny'],
'featured_groups': ['abstract', 'canvas', 'cute', 'drawing', 'funny', 'gif_bin', 'photography', 'pop_culture', 'video_games'],
'remixable_comments_daves_local': ["q6w", "q7f", "q8y", "q9h", "qa0", "qbj"],
'remixable_comments': ["nep4u", "ne708", "nvtun", "nx8v5", "nc5ae", "nsbyo"],
'additional_whitelisted_groups': ['drawcast', 'cats'],
# All comparisons done against lowercase version of username.
'blocked_usernames': ['moot', 'm00t', 'mootykins', 'm00tykins', 'chrispoole', 'christopherpoole', 'public', 'private', 'admin'],
'blocked_username_fragments': ['canvas', 'administrator', 'drawquest', 'bitch', 'bltch', 'b1tch', 'nigg', 'n1gg', 'sucksdick', 'blowjob', 'penis', 'asshole', 'fuck', 'hitler'],
'autoflag_words': ['fag', 'faggot', 'nigger', 'nigga', 'whore', 'jew', 'chink'],
# Top replies settings in threads view.
'minimum_top_replies': 3,
'maximum_top_replies': 20,
'posts_per_top_reply': 20,
# Feature switches.
# DEPRECATED, use apps.features instead.
'show_login': True,
# Reply boost.
'reply_boost': 1.1,
# If you change this, be sure to update the Jenkins node.
# Fact recording is currently disabled - see canvas/fact.py to reenable
'fact_host': '%s:9999' % ('ip-10-12-99-188.ec2.internal' if PRODUCTION else '127.0.0.1'),
'drawquest_fact_host': '%s:9999' % ('ip-10-34-102-100.ec2.internal' if PRODUCTION else '127.0.0.1'),
# Master.
'redis_host': 'ip-10-84-89-110.ec2.internal' if PRODUCTION else 'localhost',
'redis_slave': 'ip-10-218-7-231.ec2.internal' if PRODUCTION else None,
'memcache_hosts': ['cache_0.example.com:11211', 'cache_1.example.com:11211'] if PRODUCTION else ['127.0.0.1:11211'],
'autoscale_group': 'web-sg-bd3a',
'elb': 'canvas-load-balancer',
'test_bgwork_path': '/var/canvas/website/run/test_bgwork' if not PRODUCTION else '',
}
if PRODUCTION:
Config.update({
'drawquest_redis_host': 'ip-10-185-45-177.ec2.internal', # Massive Blow of Virture # old: Colossal Ring of Lucifer.
'drawquest_redis_slave': 'ip-10-185-195-229.ec2.internal', # Famous Sling of Belar #old: 'ip-10-166-20-27.ec2.internal', # Fierce Whip of Fire.
# old one that ran out of memory: 'drawquest_redis_host': 'ip-10-78-26-92.ec2.internal',
'drawquest_memcache_hosts': [
'cache_0.example.com:11211',
#'cache_1.example.com:11211',
#'cache_2.example.com:11211',
#'cache_3.example.com:11211',
],
'drawquest_image_fs': ('s3', 'drawquest_public_ugc'),
'drawquest_playback_fs': ('s3', 'drawquest-playbacks'),
})
elif STAGING:
Config.update({
'drawquest_redis_host': 'ip-10-32-151-250.ec2.internal',
'drawquest_redis_slave': 'ip-10-80-227-217.ec2.internal',
'drawquest_memcache_hosts': ['drawquest-staging.uifpjv.cfg.use1.cache.amazonaws.com:11211'],
'drawquest_image_fs': ('s3', 'drawquest_staging_ugc'),
'drawquest_playback_fs': ('s3', 'drawquest-staging-playbacks'),
})
else:
Config.update({
'drawquest_redis_host': 'localhost',
'drawquest_redis_slave': None,
'drawquest_memcache_hosts': ['127.0.0.1:11212'],
'drawquest_image_fs': ('local', '/var/canvas/website/drawquest/ugc'),
'drawquest_playback_fs': ('local', '/var/canvas/website/drawquest/ugc/playbacks'),
})
_load_config = lambda path: Config.update(json.load(open(path)))
assert Config['redis_host'] != Config['redis_slave'], 'sanity check, you probably forgot to update the standby!'
# Load the AWS credentials for the box.
if os.path.exists(AWS_CREDENTIALS_PATH):
_load_config(AWS_CREDENTIALS_PATH)
elif os.path.exists(AWS_ALT_CREDENTIALS_PATH):
_load_config(AWS_ALT_CREDENTIALS_PATH)
# For convenience
aws = (Config['aws']['access_key'], Config['aws']['secret_key'])
|
11558457
|
from django.conf.urls.defaults import *
urlpatterns = patterns('examples.hello.views',
(r'^html/$', 'hello_html'),
(r'^text/$', 'hello_text'),
(r'^write/$', 'hello_write'),
(r'^metadata/$', 'metadata'),
(r'^getdata/$', 'get_data'),
(r'^postdata/$', 'post_data'),
)
|
11558476
|
import numpy as np
import gym
from copy import deepcopy as copy
import tensorflow as tf
from abc import ABC, abstractmethod
import os
def single_elem_support(func):
"""aop func"""
type_list = (type([]), type(()), type(np.array(1)))
def wrapper(*args, **kwargs):
"""wrapper func"""
res = func(*args, **kwargs)
if type(res) in type_list and len(res) == 1:
return res[0]
elif type(res[0]) in type_list and len(res[0]) == 1:
return [x[0] for x in res]
else:
return res
return wrapper
class RecState(ABC):
def __init__(self, config, records):
self.config = config
self.records = records
self._init_state = self.records_to_state(records)
self._state = copy(self._init_state)
@staticmethod
def records_to_state(records):
pass
@property
def state(self):
return self._state
@property
@abstractmethod
def user(self):
pass
@property
@abstractmethod
def info(self):
pass
@abstractmethod
def act(self, actions):
pass
@abstractmethod
def to_string(self):
pass
class RecDataBase(object):
'''
file-based implementation of a RecommnedEnv's data source.
Pulls data from file, preps for use by RecommnedEnv and then
acts as data provider for each new episode.
'''
def __init__(self, config, state_cls):
self.config = config
self.sample_list = []
self.state_cls = state_cls
self.is_eval = config.get('is_eval', False)
self.cache_size = config.get('cache_size', 2048)
# sample file cache
self.fp = open(config['sample_file'], 'r')
# self.fp.readline()
@staticmethod
def seed(seed):
np.random.seed(seed)
def sample_cache(self, f, num):
for i in range(num):
tmp = f.readline().rstrip()
if len(tmp) < 1:
f.seek(0, 0)
f.readline()
self.sample_list.append(f.readline().rstrip())
else:
self.sample_list.append(tmp)
def sample(self, batch_size):
if self.is_eval:
assert self.cache_size == batch_size
assert len(self.sample_list) == batch_size
records = self.sample_list[:batch_size]
else:
records = np.random.choice(self.sample_list, batch_size)
samples = self.state_cls(self.config, records)
return samples
def reset(self, reset_file=False):
# self.state_list = []
self.sample_list = []
# self.rawstate_cache(self.fs, 10000)
if reset_file:
self.fp.seek(0, 0)
self.sample_cache(self.fp, self.cache_size)
class RecSimBase(ABC):
""" Implemention of core recommendation simulator"""
def __init__(self, config, state_cls):
self.config = config
self.max_steps = config['max_steps']
self.batch_size = config['batch_size']
model_file = config['model_file']
self.graph = tf.Graph()
with self.graph.as_default():
self.model = self.get_model(config)
if self.config.get('gpu', False):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
self.sess = tf.Session(graph=self.graph,
config=tf.ConfigProto(device_count={"CPU": 4}))
else:
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
self.sess = tf.Session(graph=self.graph)
self.saver = tf.train.Saver()
self.reload_model(model_file)
self._recData = RecDataBase(config, state_cls)
def reset(self, reset_file=False):
self._recData.reset(reset_file)
@abstractmethod
def get_model(self, config):
pass
@abstractmethod
def obs_fn(self, state):
pass
@abstractmethod
def forward(self, model, samples):
pass
def reload_model(self, model_file):
with self.sess.as_default():
with self.graph.as_default():
self.saver.restore(self.sess, model_file)
def seed(self, sd=0):
self._recData.seed(sd)
np.random.seed(sd)
def _step(self, samples, action, **kwargs):
step = kwargs['step']
samples.act(action)
next_state = samples.state
next_obs = self.obs_fn(next_state)
reward = self.forward(self.model, samples)
next_info = samples.info
if step < self.max_steps - 1:
done = [0] * self.batch_size
else:
done = [1] * self.batch_size
return next_obs, reward, done, next_info
def sample(self, batch_size):
samples = self._recData.sample(batch_size)
obs = self.obs_fn(samples.state)
return samples, obs
class RecEnvBase(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, recsim: RecSimBase):
self.config = recsim.config
self.batch_size = self.config['batch_size']
self.cur_step = 0
self.sim = recsim
self.sim.reset()
self.samples, self.obs = self.sim.sample(self.batch_size)
if self.config.get("rawstate_as_obs", False):
category_size = len(self.obs[0]['category_feature'])
dense_size = len(self.obs[0]['dense_feature'])
sequence_size = np.array(self.obs[0]['sequence_feature']).shape
features = {
"category_feature": gym.spaces.Box(-1000000.0, 1000000.0, shape=(category_size,)),
"dense_feature": gym.spaces.Box(-1000000.0, 1000000.0, shape=(dense_size,)),
"sequence_feature": gym.spaces.Box(-1000000.0, 1000000.0, shape=sequence_size),
}
if self.config.get("support_rllib_mask", False):
action_feature_size = len(self.obs[0]['action_mask'])
self.observation_space = gym.spaces.Dict({
"action_mask": gym.spaces.Box(0, 1, shape=(action_feature_size,)),
**features
})
else:
self.observation_space = gym.spaces.Dict(features)
else:
if self.config.get("support_rllib_mask", False):
action_feature_size = len(self.obs[0]['action_mask'])
self.observation_space = gym.spaces.Dict({
"action_mask": gym.spaces.Box(0, 1, shape=(action_feature_size,)),
"obs": gym.spaces.Box(-1000.0, 1000.0, shape=(len(self.obs[0]["obs"]),))
})
else:
self.observation_space = gym.spaces.Box(-1000.0, 1000.0, shape=(len(self.obs[0]),))
if self.config.get("support_conti_env", False):
self.action_space = gym.spaces.Box(-1, 1, shape=(self.config['action_emb_size'],))
else:
self.action_space = gym.spaces.Discrete(self.config['action_size'])
# if self.config.get("support_rllib_mask", False):
# action_feature_size = len(self.obs[0]['action_mask'])
# # avail_actions_size = len(self.obs[0]['avail_actions'][0])
# # self.action_space = gym.spaces.Discrete(self.config['action_size'])
# self.observation_space = gym.spaces.Dict({
# "action_mask": gym.spaces.Box(0, 1, shape=(action_feature_size,)),
# "obs": self.observation_space,
# })
# elif self.config.get("support_d3rl_mask", False):
# self.action_space = gym.spaces.Discrete(self.config['action_size'])
# else:
# self.action_space = gym.spaces.Discrete(self.config['action_size'])
self.reset()
def seed(self, sd=0):
self.sim.seed(sd)
np.random.seed(sd)
@property
@single_elem_support
def state(self):
return self.obs
@property
@single_elem_support
def user_id(self):
return self.samples.user
@property
@single_elem_support
def offline_action(self):
return self.samples.offline_action
@property
@single_elem_support
def offline_reward(self):
return self.samples.offline_reward
@single_elem_support
def step(self, action):
if not isinstance(action, (list, np.ndarray)):
action = [action]
obs, reward, done, info = \
self.sim._step(self.samples, action, step=self.cur_step)
self.cur_step += 1
return obs, reward, done, info
def reset(self, reset_file=False):
self.cur_step = 0
self.sim.reset(reset_file)
self.samples, self.obs = self.sim.sample(self.batch_size)
return self.state
def render(self, mode='human', close=False):
print('Current State:', '\n')
print(self.samples.to_string())
|
11558528
|
from .PackagerBase import PackagerBase
class PluginPackager(PackagerBase):
'''
Provides functionality for packaging an Unreal plugin.
'''
def __init__(self, root, version, archive='{name}-{version}-{platform}', strip_debug=False, strip_manifests=False, stage=[], verbose=True):
'''
Creates a new PluginPackager with the specified configuration.
See `PackagerBase.__init__()` for details on the input parameters.
'''
super().__init__(root, version, archive, strip_debug, strip_manifests, stage, verbose)
# "Private" methods
def _extension(self):
'''
Returns the file extension for the descriptor files supported by this packager type
'''
return '.uplugin'
|
11558536
|
from django.urls import path, include
from . import views
urlpatterns = [
path('login/', views.LoginView.as_view(), name='login'),
path('logout/', views.logout_view, name='logout'),
path('', include('django.contrib.auth.urls')),
]
|
11558600
|
import os
import vim
import time
import json
import ghost_log
import single_server
import vim_websocket_handler
_is_updating_from_remote = False
def start_server():
if not single_server.start_server():
return
for _ in range(3):
time.sleep(.1)
vim.command("let g:channel = ch_open('localhost:4002')")
if vim.eval('ch_status(g:channel)') == "open":
ghost_log.p('GhostText for vim started')
return
ghost_log.p('could not open channel to localhost:4002, retry...')
ghost_log.p('fail to start GhostText for vim')
def stop_server():
if (int(vim.eval('exists("g:channel")')) == 1 and
vim.eval('ch_status(g:channel)') == "open"):
ghost_log.p('closing channel')
vim.command('call ch_close(g:channel)')
ghost_log.p('stopping server')
single_server.stop_server()
def text_changed_from_vim():
name = os.path.basename(vim.current.buffer.name)
if not name.startswith("GhostText"):
return
if _is_updating_from_remote:
return
ghost_log.p('text changed from vim')
text = '\n'.join(vim.current.buffer)
# vim.command()
selections = [{'start': 1, 'end': 1}]
json_dict = json.dumps(
{
'buf_name': name,
'text': text,
'selections': selections
}
)
if (int(vim.eval('exists("g:channel")')) == 1 and
vim.eval('ch_status(g:channel)') == "open"):
cmd = ':call ch_sendraw(g:channel,{})'.format(
json.dumps(json_dict))
vim.command(cmd)
def update_text(name, lines, selections):
_is_updating_from_remote = True
if int(vim.eval('buffer_exists("{}")'.format(name))) == 1:
vim.command('buf ' + name)
else:
vim.command('enew')
vim.command('file ' + name)
# todo : if current buffer is not the `name`d buffer, switch it
vim.command(':b ' + name)
mode = vim.eval('mode()')
if not mode == 'n':
ghost_log.p('mode', mode)
vim.command('call feedkeys("\<esc>")')
vim.command(":redraw")
vim.current.buffer[:] = lines.split('\n')
vim.command(":redraw")
vim.command(":call cursor({})".format(selections))
_is_updating_from_remote = False
|
11558609
|
import emcee
import numpy as np
from robo.acquisition_functions.information_gain import InformationGain
class InformationGainPerUnitCost(InformationGain):
def __init__(self, model, cost_model,
lower, upper,
is_env_variable,
sampling_acquisition=None,
n_representer=50):
"""
Information gain per unit cost as described in Swersky et al. [1] which
computes the information gain of a configuration divided by it's cost.
This implementation slightly differs from the implementation of
Swersky et al. as it additionally adds the optimization overhead to
the cost. You can simply set the optimization overhead to 0 to obtain
the original formulation.
[1] <NAME>., <NAME>., and <NAME>.
Multi-task Bayesian optimization.
In Proc. of NIPS 13, 2013.
Parameters
----------
model : Model object
Models the objective function. The model has to be a
Gaussian process.
cost_model : model
Models the cost function. The model has to be a Gaussian Process.
lower : (D) numpy array
Specified the lower bound of the input space. Each entry
corresponds to one dimension.
upper : (D) numpy array
Specified the upper bound of the input space. Each entry
corresponds to one dimension.
is_env_variable : (D) numpy array
Specifies which input dimension is an environmental variable. If
the i-th input is an environmental variable than the i-th entry has
to be 1 and 0 otherwise.
n_representer : int, optional
The number of representer points to discretize the input space and
to compute pmin.
"""
self.cost_model = cost_model
self.n_dims = lower.shape[0]
self.is_env = is_env_variable
super(InformationGainPerUnitCost, self).__init__(model,
lower,
upper,
sampling_acquisition=sampling_acquisition,
Nb=n_representer)
def update(self, model, cost_model, overhead=None):
self.cost_model = cost_model
if overhead is None:
self.overhead = 0
else:
self.overhead = overhead
super(InformationGainPerUnitCost, self).update(model)
def compute(self, X, derivative=False):
"""
Computes the acquisition_functions value for a single point.
Parameters
----------
X : (1, D) numpy array
The input point for which the acquisition_functions functions is computed.
derivative : bool, optional
If it is equal to True also the derivatives with respect to X is
computed.
Returns
-------
acquisition_value: numpy array
The acquisition_functions value computed for X.
grad : numpy array
The computed gradient of the acquisition_functions function at X. Only
returned if derivative==True
"""
if len(X.shape) == 1:
X = X[np.newaxis, :]
# Predict the log costs for this configuration
log_cost = self.cost_model.predict(X)[0]
if derivative:
raise "Not implemented"
else:
dh = super(InformationGainPerUnitCost, self).compute(X,
derivative=derivative)
# We model the log cost, but we compute
# the information gain per unit cost
# Add the cost it took to pick the last configuration
cost = np.exp(log_cost)
acquisition_value = dh / (cost + self.overhead)
return acquisition_value
def sampling_acquisition_wrapper(self, x):
# Check if sample point is inside the configuration space
lower = self.lower[np.where(self.is_env == 0)]
upper = self.upper[np.where(self.is_env == 0)]
if np.any(x < lower) or np.any(x > upper):
return -np.inf
# Project point to subspace
proj_x = np.concatenate((x, self.upper[self.is_env == 1]))
return self.sampling_acquisition(np.array([proj_x]))[0]
def sample_representer_points(self):
# Sample representer points only in the
# configuration space by setting all environmental
# variables to 1
D = np.where(self.is_env == 0)[0].shape[0]
lower = self.lower[np.where(self.is_env == 0)]
upper = self.upper[np.where(self.is_env == 0)]
self.sampling_acquisition.update(self.model)
for i in range(5):
restarts = np.random.uniform(low=lower,
high=upper,
size=(self.Nb, D))
sampler = emcee.EnsembleSampler(self.Nb, D,
self.sampling_acquisition_wrapper)
self.zb, self.lmb, _ = sampler.run_mcmc(restarts, 50)
if not np.any(np.isinf(self.lmb)):
break
else:
print("Infinity")
if np.any(np.isinf(self.lmb)):
raise ValueError("Could not sample valid representer points! LogEI is -infinity")
if len(self.zb.shape) == 1:
self.zb = self.zb[:, None]
if len(self.lmb.shape) == 1:
self.lmb = self.lmb[:, None]
# Project representer points to subspace
proj = np.ones([self.zb.shape[0],
self.upper[self.is_env == 1].shape[0]])
proj *= self.upper[self.is_env == 1].shape[0]
self.zb = np.concatenate((self.zb, proj), axis=1)
|
11558623
|
import argparse
import asyncio
import json
import logging
import os
from .bot import Bot
from .entity_manager import EntityManager
from .db import MongoDBConnector
from .discord import Client
from .sites import AtCoder, CodeChef, Codeforces, SiteContainer
logger = logging.getLogger(__name__)
DISCORD_TOKEN = os.environ['DISCORD_TOKEN']
MONGODB_SRV = os.environ['MONGODB_SRV']
with open('./bot/config.json') as file:
CONFIG = json.load(file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--log', default='WARNING')
args = parser.parse_args()
numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError(f'Invalid log level: {args.log}')
logging.basicConfig(format='{levelname}:{name}:{message}', style='{', level=numeric_level)
discord_client = Client(DISCORD_TOKEN, name=CONFIG['name'], activity_name=CONFIG['activity'])
mongodb_connector = MongoDBConnector(MONGODB_SRV, CONFIG['db_name'])
entity_manager = EntityManager(mongodb_connector)
sites = [
AtCoder(**CONFIG['at_config']),
CodeChef(**CONFIG['cc_config']),
Codeforces(**CONFIG['cf_config']),
]
site_container = SiteContainer(sites=sites)
bot = Bot(CONFIG['name'], discord_client, site_container, entity_manager,
triggers=CONFIG['triggers'], allowed_channels=CONFIG['channels'])
try:
asyncio.run(bot.run())
except Exception:
logger.exception('Grinding halt')
if __name__ == '__main__':
main()
|
11558711
|
import requests
import time
import re
from jinja2 import Environment, select_autoescape
from tqdm import trange
from bs4 import BeautifulSoup
DOMAIN = "https://www.resetera.com"
BASE_URL = DOMAIN + "/forums/video-games.7/page-{}"
HEADERS = {
'User-Agent': 'ResetERA user minimaxir',
}
MAX_PAGES = 1000
# https://stackoverflow.com/a/12825283
def regex_replace(s, find, replace):
return re.sub(find, replace, s)
env = Environment()
env.filters['regex_replace'] = regex_replace
template_str = """~!~{{ title }}
{% for post in posts[:10] -%}
{% if post.div.article.text | regex_replace('\n\n+', '\n') | length > 4 %}{{ post['data-author'] }}: {{ post.div.article.text[:2000] | regex_replace('https?://\S+', '') | regex_replace('\n\n+', '\n') | trim }}
-----{% endif %}
{% endfor %}<|endoftext|>
"""
template = env.from_string(template_str)
def process_thread(thread_url, template):
req = requests.get(thread_url, headers=HEADERS)
soup = BeautifulSoup(req.text, features="html5lib")
# Remove special embeds
bbcodes = soup.find_all("div", {"class": "bbCodeBlock"})
for bbcode in bbcodes:
bbcode.decompose()
try:
title = soup.find("h1", {"class": "p-title-value"}).text
if '|' in title:
return None
posts = soup.find_all("article", {"class": "message"})
text = template.render(title=title, posts=posts)
except:
return None
return text
with open('resetera_videogames_1000.txt', 'w') as f:
for page in trange(1, MAX_PAGES+1):
url = BASE_URL.format(page)
req = requests.get(url, headers=HEADERS)
soup = BeautifulSoup(req.text, features="html5lib")
thread_urls = [DOMAIN + x.a['href'] for x in soup.find_all(
"div", {"class": "structItem-title"})]
for thread_url in thread_urls:
thread = process_thread(thread_url, template)
if thread is not None:
f.write(thread + "\n")
time.sleep(1) # to avoid overloading server
|
11558737
|
import copy
import logging
from bson.objectid import ObjectId
from flask import url_for
from flask_wtf import FlaskForm
from wtforms import SelectField, StringField
from scout.constants import CHROMOSOMES_38, EXPORT_HEADER
from scout.server.blueprints.variants.controllers import (
compounds_need_updating,
gene_panel_choices,
match_gene_txs_variant_txs,
populate_chrom_choices,
sv_variants,
update_form_hgnc_symbols,
variant_export_lines,
variants,
variants_export_header,
)
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
def test_compounds_need_updating():
"""Test function that checks if variant compound objects need updating"""
# GIVEN a list of compounds for a variant missing the "not loaded key"
compounds = [{"variant": "aaa"}, {"variant": "bbb"}]
# THEN the function that checks if the compounds need updating should return True
assert compounds_need_updating(compounds, []) is True
# GIVEN a list of dismissed variants for a case
dismissed_variants = ["aaa", "ddd"]
# GIVEN a list of compounds with a compound missing dismissed status
# THEN the function that checks if the compounds need updating should return True
assert compounds_need_updating(compounds, dismissed_variants) is True
# GIVEN a list of compounds with a dismissed one that is not up-to-date (not in list of case dismissed variants)
compounds = [{"variant": "ccc", "is_dismissed": True}, {"variant": "bbb"}]
# THEN the function that checks if the compounds need updating should return True
assert compounds_need_updating(compounds, dismissed_variants) is True
# GIVEN an up-to-date list of compounds
compounds = [
{"variant": "aaa", "is_dismissed": True, "not_loaded": False},
{"variant": "bbb", "not_loaded": True},
]
# THEN the function that checks if the compounds need updating should return False
assert compounds_need_updating(compounds, dismissed_variants) is False
def test_populate_chrom_choices(app):
"""Test the function that populates the choices of the chromosome select in variants filters"""
# GIVEN a variants filter form
with app.test_client() as client:
class TestForm(FlaskForm):
chrom = SelectField("Chromosome", choices=[], default="")
form = TestForm()
# IF the case build is 38
case = {"genome_build": "38"}
# THEN chromosome choices should correspond to genome build 38 chromosomes
populate_chrom_choices(form, case)
choices = form.chrom.choices
for nr, choice in enumerate(choices[1:]): # first choice is not a chromosome, but all chroms
assert choice[0] == CHROMOSOMES_38[nr]
def test_gene_panel_choices(app, institute_obj, case_obj):
"""test controller function that populates gene panel filter select"""
# GIVEN a case with a gene panel
case_panel = {
"panel_name": "case_panel",
"version": 1,
"display_name": "Case panel",
"nr_genes": 3,
}
case_obj["panels"] = [case_panel]
# AND an institute with a custom gene panel:
institute_obj["gene_panels"] = {"institute_panel_name": "Institute Panel display name"}
# WHEN the functions creates the option for the filters select
panel_options = gene_panel_choices(store, institute_obj, case_obj)
# THEN case-specific panel should be represented
case_panel_option = (case_panel["panel_name"], case_panel["display_name"])
assert case_panel_option in panel_options
# HPO panel should also be represented
assert ("hpo", "HPO") in panel_options
# And institute-specific panel should be in the choices as well
assert ("institute_panel_name", "Institute Panel display name") in panel_options
def test_variants_assessment_shared_with_group(
mocker, real_variant_database, institute_obj, case_obj
):
mocker.patch(
"scout.server.blueprints.variants.controllers.user_institutes",
return_value=[{"_id": "cust000"}],
)
# GIVEN a db with variants,
adapter = real_variant_database
case_id = case_obj["_id"]
other_case_id = "other_" + case_id
other_case_obj = copy.deepcopy(case_obj)
other_case_obj["_id"] = other_case_id
## WHEN inserting an object with a group id
group_id = ObjectId("101010101010101010101010")
other_case_obj["group"] = [group_id]
adapter.case_collection.insert_one(other_case_obj)
# WHEN setting the same group id for the original case
adapter.case_collection.find_one_and_update({"_id": case_id}, {"$set": {"group": [group_id]}})
# GIVEN a clinical variant from one case
variant = adapter.variant_collection.find_one({"case_id": case_id, "variant_type": "clinical"})
# GIVEN a copy of the variant for the other case
other_variant_obj = copy.deepcopy(variant)
other_variant_obj["case_id"] = other_case_id
other_variant_obj["_id"] = "another_variant"
adapter.variant_collection.insert_one(other_variant_obj)
# WHEN updating an assessment on the same first case variant
adapter.variant_collection.find_one_and_update(
{"_id": variant["_id"]}, {"$set": {"acmg_classification": 4}}
)
# WHEN retrieving assessments for the variant from the other case
variants_query = {"variant_type": "clinical"}
variants_query_res = adapter.variants(
other_case_id, query=variants_query, category=variant["category"]
)
res = variants(adapter, institute_obj, other_case_obj, variants_query_res, 1000)
res_variants = res["variants"]
# THEN a group assessment is recalled on the other case,
# since the variant in the first case had an annotation
assert any(variant.get("group_assessments") for variant in res_variants)
def test_variants_research_no_shadow_clinical_assessments(
mocker, real_variant_database, institute_obj, case_obj
):
mocker.patch(
"scout.server.blueprints.variants.controllers.user_institutes",
return_value=[{"_id": "cust000"}],
)
# GIVEN a db with variants,
adapter = real_variant_database
case_id = case_obj["_id"]
# GIVEN a clinical variant from one case
variant_clinical = adapter.variant_collection.find_one(
{"case_id": case_id, "variant_type": "clinical"}
)
# GIVEN a copy of that variant marked research
variant_research = copy.deepcopy(variant_clinical)
variant_research["_id"] = "research_version"
variant_research["variant_type"] = "research"
adapter.variant_collection.insert_one(variant_research)
# WHEN filtering for that variant in research
variants_query = {"variant_type": "research"}
variants_query_res = adapter.variants(
case_obj["_id"], query=variants_query, category=variant_clinical["category"]
)
# NOTE in tests list length will be used, in live code count_documents{query} is
# called.
number_variants = len(list(variants_query_res.clone()))
res = variants(adapter, institute_obj, case_obj, variants_query_res, number_variants)
res_variants = res["variants"]
LOG.debug("Variants: {}".format(res_variants))
# THEN it is returned
assert any([variant["_id"] == variant_research["_id"] for variant in res_variants])
# THEN no previous annotations are reported back for the reseach case..
assert not any([variant.get("clinical_assessments") for variant in res_variants])
def test_variants_research_shadow_clinical_assessments(
mocker, real_variant_database, institute_obj, case_obj
):
mocker.patch(
"scout.server.blueprints.variants.controllers.user_institutes",
return_value=[{"_id": "cust000"}],
)
# GIVEN a db with variants,
adapter = real_variant_database
case_id = case_obj["_id"]
# GIVEN a clinical variant from one case
variant_clinical = adapter.variant_collection.find_one(
{"case_id": case_id, "variant_type": "clinical"}
)
# GIVEN a copy of that variant marked research
variant_research = copy.deepcopy(variant_clinical)
variant_research["_id"] = "research_version"
variant_research["variant_type"] = "research"
adapter.variant_collection.insert_one(variant_research)
# WHEN updating the manual assessments of the clinical variant
adapter.variant_collection.update_one(
{"_id": variant_clinical["_id"]},
{
"$set": {
"manual_rank": 2,
"mosaic_tags": ["1"],
"dismiss_variant": ["2", "3"],
"acmg_classification": 0,
}
},
)
# WHEN filtering for that variant in research
variants_query = {"variant_type": "research"}
variants_query_res = adapter.variants(
case_obj["_id"], query=variants_query, category=variant_clinical["category"]
)
# NOTE in tests list length will be used, in live code count_documents{query} is
# called.
number_variants = len(list(variants_query_res.clone()))
res = variants(adapter, institute_obj, case_obj, variants_query_res, number_variants)
res_variants = res["variants"]
# THEN it is returned
assert any([variant["_id"] == variant_research["_id"] for variant in res_variants])
# THEN previous annotations are reported back for the reseach case.
assert any([variant.get("clinical_assessments") for variant in res_variants])
def test_sv_variants_research_shadow_clinical_assessments(
mocker, real_variant_database, institute_obj, case_obj
):
mocker.patch(
"scout.server.blueprints.variants.controllers.user_institutes",
return_value=[{"_id": "cust000"}],
)
# GIVEN a db with variants,
adapter = real_variant_database
case_id = case_obj["_id"]
# GIVEN a clinical variant from one case
variant_clinical = adapter.variant_collection.find_one(
{"case_id": case_id, "variant_type": "clinical"}
)
# GIVEN the variant is an SV
adapter.variant_collection.update_one(
{"_id": variant_clinical["_id"]},
{"$set": {"category": "sv", "sub_category": "dup"}},
)
# GIVEN a copy of that variant marked research
variant_research = copy.deepcopy(variant_clinical)
variant_research["_id"] = "research_version"
variant_research["variant_type"] = "research"
variant_research["category"] = "sv"
variant_research["sub_category"]: "dup"
adapter.variant_collection.insert_one(variant_research)
# WHEN updating the manual assessments of the clinical variant
adapter.variant_collection.update_one(
{"_id": variant_clinical["_id"]},
{
"$set": {
"manual_rank": 2,
"mosaic_tags": ["1"],
"dismiss_variant": ["2", "3"],
}
},
)
# WHEN filtering for that variant in research
variants_query = {"variant_type": "research"}
variants_query_res = adapter.variants(case_obj["_id"], query=variants_query, category="sv")
assert variants_query_res
# NOTE in tests list length will be used, in live code count_documents{query} is
# called.
number_variants = len(list(variants_query_res.clone()))
res = sv_variants(adapter, institute_obj, case_obj, variants_query_res, number_variants)
res_variants = res["variants"]
LOG.debug("Variants: {}".format(res_variants))
# THEN it is returned
assert any([variant["_id"] == variant_research["_id"] for variant in res_variants])
# THEN previous annotations are reported back for the reseach case.
assert any([variant.get("clinical_assessments") for variant in res_variants])
def test_match_gene_txs_variant_txs():
"""Test function matching gene and variant transcripts to export variants to file"""
variant_gene = {
"hgnc_id": 17284,
"transcripts": [
{
"transcript_id": "ENST00000357628", # canonical
"coding_sequence_name": "c.903G>T",
"protein_sequence_name": "p.Gln301His",
"is_canonical": True,
},
{
"transcript_id": "ENST00000393329", # primary
"coding_sequence_name": "c.510G>T",
"protein_sequence_name": "p.Gln170His",
},
],
}
hgnc_gene = {
"hgnc_id": 17284,
"primary_transcripts": ["NM_001042594"],
"transcripts": [
{
"ensembl_transcript_id": "ENST00000357628", # canonical
"refseq_identifiers": ["NM_015450"],
"refseq_id": "NM_015450",
},
{
"ensembl_transcript_id": "ENST00000393329", # primary
"is_primary": True,
"refseq_identifiers": ["NM_001042594"],
"refseq_id": "NM_001042594",
},
],
}
canonical_txs, primary_txs = match_gene_txs_variant_txs(variant_gene, hgnc_gene)
assert canonical_txs == ["NM_015450/c.903G>T/p.Gln301His"]
assert primary_txs == ["NM_001042594/c.510G>T/p.Gln170His"]
def test_variant_csv_export(real_variant_database, case_obj):
adapter = real_variant_database
case_id = case_obj["_id"]
# Given a database with variants from a case
snv_variants = adapter.variant_collection.find({"case_id": case_id, "category": "snv"})
# Given 5 variants to be exported
variants_to_export = []
for variant in snv_variants.limit(5):
assert type(variant) is dict
variants_to_export.append(variant)
n_vars = len(variants_to_export)
assert n_vars == 5
# Collect export header from variants controller
export_header = variants_export_header(case_obj)
# Assert that exported document has n fields:
# n = (EXPORT_HEADER items in variants_export.py) + (3 * number of individuals analysed for the case)
assert len(export_header) == len(EXPORT_HEADER) + 3 * len(case_obj["individuals"])
# Given the lines of the document to be exported
export_lines = variant_export_lines(adapter, case_obj, variants_to_export)
# Assert that all five variants are going to be exported to CSV
assert len(export_lines) == 5
# Assert that all of 5 variants contain the fields specified by the document header
for export_line in export_lines:
export_cols = export_line.split(",")
assert len(export_cols) == len(export_header)
def test_update_form_hgnc_symbols_valid_gene_symbol(app, case_obj):
"""Test controller that populates HGNC symbols form filter in variants page. Provide valid gene symbol"""
# GIVEN a case analysed with a gene panel
test_panel = store.panel_collection.find_one()
case_obj["panels"] = [{"panel_id": test_panel["_id"]}]
# GIVEN a variants filter form
class TestForm(FlaskForm):
hgnc_symbols = StringField()
data = StringField()
form = TestForm()
# GIVEN a user trying to filter research variants using a valid gene symbol
form.hgnc_symbols.data = ["POT1"]
form.data = {"gene_panels": [], "variant_type": "research"}
updated_form = update_form_hgnc_symbols(store, case_obj, form)
# Form should be updated correctly
assert form.hgnc_symbols.data == ["POT1"]
def test_update_form_hgnc_symbols_valid_gene_id(app, case_obj):
"""Test controller that populates HGNC symbols form filter in variants page. Provide HGNC ID"""
# GIVEN a case analysed with a gene panel
test_panel = store.panel_collection.find_one()
case_obj["panels"] = [{"panel_id": test_panel["_id"]}]
# GIVEN a variants filter form
class TestForm(FlaskForm):
hgnc_symbols = StringField()
data = StringField()
form = TestForm()
# GIVEN a user trying to filter clinical variants using a valid gene ID
form.hgnc_symbols.data = ["17284"]
form.data = {"gene_panels": [], "variant_type": "clinical"}
updated_form = update_form_hgnc_symbols(store, case_obj, form)
# Form should be updated correctly
assert form.hgnc_symbols.data == ["POT1"]
|
11558757
|
d = [{"a":2, "b":3}, {"c":2, "d":3}, {"e":2, "f":3}]
i = 2
vct = [10, 20, 30, 40]
vct[13] = 40
vct[1:i+1] = 40
i = 20
def test(k, v):
for u in range(0,k + 2,1):
if u+3==5:
u += 2*v
print("U==2:", u)
else:
u -= 1-v
print("U:", u)
while (v > 10):
print(v)
v -= 2
println(vct)
k = 0
for i in d[k+1]:
print(i)
test(i, 100)
|
11558789
|
import gym
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, Dense, Flatten, Lambda
class DQNNetwork(Model):
"""
Class for DQN model architecture.
"""
def __init__(self, num_actions: int, agent_history_length: int):
super(DQNNetwork, self).__init__()
self.normalize = Lambda(lambda x: x / 255.0)
self.conv1 = Conv2D(filters=32, kernel_size=8, strides=4, kernel_initializer=tf.keras.initializers.VarianceScaling(scale=2.0), activation="relu", input_shape=(None, 84, 84, agent_history_length))
self.conv2 = Conv2D(filters=64, kernel_size=4, strides=2, kernel_initializer=tf.keras.initializers.VarianceScaling(scale=2.0), activation="relu")
self.conv3 = Conv2D(filters=64, kernel_size=3, strides=1, kernel_initializer=tf.keras.initializers.VarianceScaling(scale=2.0), activation="relu")
self.flatten = Flatten()
self.dense1 = Dense(512, kernel_initializer=tf.keras.initializers.VarianceScaling(scale=2.0), activation='relu')
self.dense2 = Dense(num_actions, kernel_initializer=tf.keras.initializers.VarianceScaling(scale=2.0), activation="linear")
@tf.function
def call(self, x):
normalized = self.normalize(x)
h1 = self.conv1(normalized)
h2 = self.conv2(h1)
h3 = self.conv3(h2)
h4 = self.flatten(h3)
h5 = self.dense1(h4)
out = self.dense2(h5)
return out
|
11558791
|
import os
import shutil
from datetime import datetime
from tempfile import TemporaryDirectory
from unittest import TestCase
from hypothesis.strategies import (
dates, datetimes, dictionaries, fixed_dictionaries, lists, text
)
from hypothesis import assume, given
from stl.db import ARCHIVE_DT_FORMAT
from stl.db import Database
class DatabaseTestCase(TestCase):
def setUp(self):
self.temp_dir = TemporaryDirectory()
self.db = Database(self.temp_dir.name)
def tearDown(self):
self.temp_dir.cleanup()
def _check_dt_equal(self, dt1, dt2, seconds=False):
self.assertEqual(dt1.year, dt2.year)
self.assertEqual(dt1.month, dt2.month)
self.assertEqual(dt1.day, dt2.day)
self.assertEqual(dt1.hour, dt2.hour)
self.assertEqual(dt1.minute, dt2.minute)
if seconds:
self.assertEqual(dt1.second, dt2.second)
@given(datetimes())
def test_get_path_with_create(self, dt):
file_path = self.db.get_path(dt.year, dt.month, create=True)
dir_path = os.path.dirname(file_path)
self.assertTrue(os.path.exists(dir_path))
@given(datetimes(), text())
def test_add_and_get_current(self, dt, t):
self.db.add_current(dt, t)
entry = self.db.get_current()
self._check_dt_equal(entry['stamp'], dt, seconds=True)
self.assertEqual(entry['task'], self.db._sanitise_text(t))
path = os.path.join(self.temp_dir.name, 'current')
self.assertTrue(os.path.exists(path))
@given(datetimes(), text())
def test_get_current_when_none(self, dt, t):
self.assertIsNone(self.db.get_current())
self.db.add_current(dt, t)
res1 = self.db.get_current()
res2 = self.db.get_current(delete=True)
self.assertEqual(res1, res2)
self.assertIsNone(self.db.get_current())
@given(datetimes(), datetimes(), text())
def test_add_complete(self, dt1, dt2, t):
self.db.add_complete(dt1, dt2, t)
logs = self.db.get_month(dt1.year, dt1.month)
self.assertEqual(len(logs), 1)
self._check_dt_equal(logs[0]['start'], dt1)
self._check_dt_equal(logs[0]['stop'], dt2)
self.assertEqual(logs[0]['task'], self.db._sanitise_text(t))
path = os.path.join(self.temp_dir.name,
str(dt1.year).zfill(4),
str(dt1.month).zfill(2))
self.assertTrue(os.path.exists(path))
os.remove(path)
@given(lists(fixed_dictionaries({
'start': datetimes().map(lambda d: d.replace(year=2000)),
'stop': datetimes().map(lambda d: d.replace(year=2000)),
'task': text()})))
def test_add_complete_with_sort(self, li):
assume(len(li) == len(set([ # avoid dts that can be sorted either way
d['start'].strftime(ARCHIVE_DT_FORMAT) for d in li])))
for d in li:
self.db.add_complete(d['start'],
d['stop'],
d['task'],
append=False)
for month in range(1, 13):
month_li = list(filter(lambda d: d['start'].month == month, li))
month_li = list(sorted(month_li, key=lambda d: d['start']))
logs = self.db.get_month(2000, month)
self.assertEqual(len(logs), len(month_li))
for i in range(0, len(logs)):
self._check_dt_equal(logs[i]['start'], month_li[i]['start'])
self._check_dt_equal(logs[i]['stop'], month_li[i]['stop'])
self.assertEqual(logs[i]['task'],
self.db._sanitise_text(month_li[i]['task']))
year_dir = os.path.join(self.temp_dir.name, '2000')
if os.path.exists(year_dir):
shutil.rmtree(year_dir)
@given(lists(fixed_dictionaries({
'start': datetimes().map(lambda d: d.replace(year=2000)),
'stop': datetimes().map(lambda d: d.replace(year=2000)),
'task': text()})))
def test_get_month(self, li):
li = list(sorted(li, key=lambda d: d['start']))
for month in range(1, 13):
self.assertEqual(self.db.get_month(2000, month), [])
for d in li:
self.db.add_complete(d['start'], d['stop'], d['task'])
for month in range(1, 13):
li_ = list(filter(lambda d: d['start'].month == month, li))
entries = self.db.get_month(2000, month)
self.assertEqual(len(entries), len(li_))
for i, entry in enumerate(entries):
self._check_dt_equal(entry['start'], li_[i]['start'])
self._check_dt_equal(entry['stop'], li_[i]['stop'])
self.assertEqual(entry['task'],
self.db._sanitise_text(li_[i]['task']))
year_dir = os.path.join(self.temp_dir.name, '2000')
if os.path.exists(year_dir):
shutil.rmtree(year_dir)
@given(lists(fixed_dictionaries({
'start': datetimes().map(lambda d: d.replace(year=2000)),
'stop': datetimes().map(lambda d: d.replace(year=2000)),
'task': text()})))
def test_get_year(self, li):
li = list(sorted(li, key=lambda d: d['start']))
self.assertEqual(self.db.get_year(2000), [])
for d in li:
self.db.add_complete(d['start'], d['stop'], d['task'])
entries = self.db.get_year(2000)
self.assertEqual(len(entries), len(li))
for i, entry in enumerate(entries):
self._check_dt_equal(entry['start'], li[i]['start'])
self._check_dt_equal(entry['stop'], li[i]['stop'])
self.assertEqual(entry['task'],
self.db._sanitise_text(li[i]['task']))
year_dir = os.path.join(self.temp_dir.name, '2000')
if os.path.exists(year_dir):
shutil.rmtree(year_dir)
@given(lists(fixed_dictionaries({ # min and max year set for gen speed
'start': datetimes(min_value=datetime(2000, 1, 1),
max_value=datetime(2100, 1, 1)),
'stop': datetimes(min_value=datetime(2000, 1, 1),
max_value=datetime(2100, 1, 1)),
'task': text()}), min_size=1))
def test_get_span(self, li):
li = list(sorted(li, key=lambda d: d['start']))
first = li[0]['start'].date()
last = li[-1]['start'].date()
for d in li:
self.db.add_complete(d['start'], d['stop'], d['task'])
span = self.db.get_span(first, last)
self.assertEqual(len(span), len(li))
for subdir in os.listdir(self.temp_dir.name):
shutil.rmtree(os.path.join(self.temp_dir.name, subdir))
@given(dictionaries(
keys=text(),
values=lists(dates(), min_size=1)))
def test_add_and_get_task(self, d):
d = {task: dts for task, dts in d.items()
if self.db._sanitise_text(task)}
for task, dts in d.items():
for dt in dts:
self.db.add_task(task, dt.year, dt.month)
for task, dts in d.items():
li = list(set([(dt.year, dt.month) for dt in dts]))
res = self.db.get_task(task)
self.assertEqual(list(sorted(li)), list(sorted(res)))
path = os.path.join(self.temp_dir.name, 'tasks')
if os.path.exists(path):
os.remove(path)
|
11558809
|
import itertools
from typing import List
from bridgebots.deal import Card
from bridgebots.deal_enums import Direction, Rank, Suit
class RotationPermutation:
def __init__(self, suits: List[Suit]):
self.suits = suits
self.sorted_deck = [Card(suit, rank) for suit in suits for rank in Rank]
self.suit_ranks = {suit: suits.index(suit) for suit in suits}
def sort_hand(self, cards: List[Card]):
return sorted(cards, key=lambda card: (self.suit_ranks[card.suit], card.rank))
def build_deck_mask(self, cards: List[Card]):
deck_data = []
sorted_hand = self.sort_hand(cards)
sorted_hand_index = 0
for card in self.sorted_deck:
if sorted_hand_index < len(sorted_hand) and card == sorted_hand[sorted_hand_index]:
sorted_hand_index += 1
deck_data.append(1)
else:
deck_data.append(0)
return deck_data
all_suit_permutations = [
RotationPermutation(list(suit_permutation)) for suit_permutation in itertools.permutations([suit for suit in Suit])
]
clockwise_directions = [Direction.NORTH, Direction.EAST, Direction.SOUTH, Direction.WEST]
all_rotations = [[clockwise_directions[(i + j) % 4] for j in range(0, 4)] for i in range(0, 4)]
RotationPermutation.all_suit_permutations = all_suit_permutations
RotationPermutation.all_rotations = all_rotations
|
11558814
|
import logging
class BaseConverter(object):
"""
BaseConverter to inherit new converters from. This is the preferred method to create support for new file-formats.
Please note, that you will need to implement the convert-method, which has to return a dictionary as specified.
You can pass additional (arbitrary) python objects to CAVE by simply placing them in the returned dictionary.
All custom key-value pairs in the dictionary will be available in CAVE's
`RunsContainer <apidoc/cave.reader.runs_container>`_ as a dictionary `RunsContainer.share_information`.
"""
def __init__(self):
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
def convert(self, folders, ta_exec_dirs=None, output_dir=None, converted_dest='converted_input_data'):
"""Convert specific format results into SMAC3-format.
Parameters
----------
folders: List[str]
list of parallel configurator-runs (folder paths!)
ta_exec_dirs: List[str]
only if you need to load instances, this is the path(s) from which the paths in the scenario are valid
output_dir: str
path to CAVE's output-directory
converted_dest: str
optional, this will be the parent folder in CAVE's output in which the converted runs (in SMAC-format) are
saved, if not specified, will use temporary folders
Returns
-------
result: dictionary
.. code-block:: python
dict{
original_folder : dict{
'new_path' : converted_folder_path,
'config_space' : config_space,
'runhistory' : runhistory,
'validated_runhistory' : validated_runhistory,
'scenario' : scenario,
'trajectory' : trajectory,
}
}
in addition, the result-dictionary can contain any number of arbitrary key-value pairs, that will be
available in CAVE's `RunsContainer`
"""
raise NotImplementedError()
|
11558839
|
from fire.core import FireError
class CliError(FireError):
"""Exception used by the CLI when command cannot be executed."""
|
11558860
|
import sys
IS_PY3 = sys.version_info[0] == 3
if IS_PY3:
from http.client import NO_CONTENT
from email import encoders as Encoders
from urllib.parse import quote, urlencode
unicode = str
bytes = bytes
else:
from email import Encoders
from httplib import NO_CONTENT
from urllib import quote, urlencode
unicode = unicode
_orig_bytes = bytes
bytes = lambda s, *a: _orig_bytes(s)
|
11558896
|
import toolbox
import numpy as np
import pylab
#extract shot record
data, params = toolbox.initialise("prepro.su")
mask = data['fldr'] == 221
shot = data[mask].copy()
#agc
toolbox.agc(shot, None, **params)
params['primary'] = 'fldr'
params['secondary'] = 'tracf'
params['wiggle'] = True
toolbox.display(shot, **params)
#fk plot
params['dx'] = 33.5 #m
#~ toolbox.fk_view(shot, **params)
#~ #fk filter design
params['fkVelocity'] = 2000
params['fkSmooth'] = 20
params['fkFilter'] = toolbox.fk_design(shot, **params)
shot = toolbox.fk_filter(shot, None, **params)
toolbox.display(shot, **params)
##############end of testing
#~ data, nparams = toolbox.initialise("prepro.su")
#~ toolbox.agc(data, None, **params)
#~ data = toolbox.fk_filter(data, None, **params)
#~ #nmo
#~ params['vels'] = np.fromfile('vels_full.bin').reshape(-1, params['ns'])
#~ params['smute'] = 150
#~ toolbox.nmo(data, None, **params)
#~ data.tofile("fk_nmo_gathers.su")
#~ toolbox.agc(data, None, **params)
#~ #stack
#~ stack = toolbox.stack(data, None, **params)
#~ params['gamma'] = -1
#~ toolbox.tar(stack, None, **params)
#~ stack.tofile("fk_stack.su")
#~ #display
#~ params['primary'] = None
#~ params['secondary'] = 'cdp'
#~ toolbox.display(stack, **params)
pylab.show()
|
11558911
|
from io import StringIO
import pytest
from django.core.management import CommandError
from django.core.management import call_command
from pytest_mock import MockFixture
def test_should_call_start_processing(mocker: MockFixture):
mock_start_processing = mocker.patch("django_stomp.management.commands.pubsub.start_processing")
fake_queue = "/queue/your-queue"
fake_function = "your_python_module.your_function"
out = StringIO()
call_command("pubsub", fake_queue, fake_function, stdout=out)
assert "Calling internal service to consume messages" in out.getvalue()
mock_start_processing.assert_called_with(fake_queue, fake_function)
def test_should_essential_parameters_are_required(mocker: MockFixture):
mock_start_processing = mocker.patch("django_stomp.management.commands.pubsub.start_processing")
fake_queue = "/queue/your-queue"
with pytest.raises(CommandError) as e:
call_command("pubsub")
assert e.value.args[0] == "Error: the following arguments are required: source_destination, callback_function"
with pytest.raises(CommandError) as e:
call_command("pubsub", fake_queue)
assert e.value.args[0] == "Error: the following arguments are required: callback_function"
mock_start_processing.assert_not_called()
|
11558933
|
import click
settings = dict(help_option_names=['-h', '--help'])
from .commands import init, install, ls, uninstall
@click.group(options_metavar='', subcommand_metavar='<command>', context_settings=settings)
def cli():
"""
Hi! This is a small command line tool called `pim` for making it easy to publish Python packages.
If you're just getting started with a new project, you'll want to call `pim init`
to initialize a project from inside a new folder.
If you already have a project you want to publish, you'll want to call `pim publish`
from inside the folder with your project (not yet implemented).
"""
pass
cli.add_command(init)
cli.add_command(install)
cli.add_command(uninstall)
cli.add_command(ls)
|
11558937
|
import os
import unittest
from unittest import mock
import tablib
from django.test import TestCase
from django.utils.encoding import force_str
from tablib.core import UnsupportedFormat
from import_export.formats import base_formats
class FormatTest(TestCase):
def setUp(self):
self.format = base_formats.Format()
@mock.patch('import_export.formats.base_formats.HTML.get_format', side_effect=ImportError)
def test_format_non_available1(self, mocked):
self.assertFalse(base_formats.HTML.is_available())
@mock.patch('import_export.formats.base_formats.HTML.get_format', side_effect=UnsupportedFormat)
def test_format_non_available2(self, mocked):
self.assertFalse(base_formats.HTML.is_available())
def test_format_available(self):
self.assertTrue(base_formats.CSV.is_available())
def test_get_title(self):
self.assertEqual("<class 'import_export.formats.base_formats.Format'>", str(self.format.get_title()))
def test_create_dataset_NotImplementedError(self):
with self.assertRaises(NotImplementedError):
self.format.create_dataset(None)
def test_export_data_NotImplementedError(self):
with self.assertRaises(NotImplementedError):
self.format.export_data(None)
def test_get_extension(self):
self.assertEqual("", self.format.get_extension())
def test_get_content_type(self):
self.assertEqual("application/octet-stream", self.format.get_content_type())
def test_is_available_default(self):
self.assertTrue(self.format.is_available())
def test_can_import_default(self):
self.assertFalse(self.format.can_import())
def test_can_export_default(self):
self.assertFalse(self.format.can_export())
class XLSTest(TestCase):
def setUp(self):
self.format = base_formats.XLS()
def test_binary_format(self):
self.assertTrue(self.format.is_binary())
def test_import(self):
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.xls')
with open(filename, self.format.get_read_mode()) as in_stream:
self.format.create_dataset(in_stream.read())
class XLSXTest(TestCase):
def setUp(self):
self.format = base_formats.XLSX()
self.filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.xlsx')
def test_binary_format(self):
self.assertTrue(self.format.is_binary())
def test_import(self):
with open(self.filename, self.format.get_read_mode()) as in_stream:
dataset = self.format.create_dataset(in_stream.read())
result = dataset.dict
self.assertEqual(1, len(result))
row = result.pop()
self.assertEqual(1, row["id"])
self.assertEqual("Some book", row["name"])
self.assertEqual("<EMAIL>", row["author_email"])
self.assertEqual(4, row["price"])
@mock.patch("openpyxl.load_workbook")
def test_that_load_workbook_called_with_required_args(self, mock_load_workbook):
self.format.create_dataset(b"abc")
mock_load_workbook.assert_called_with(unittest.mock.ANY, read_only=True, data_only=True)
class CSVTest(TestCase):
def setUp(self):
self.format = base_formats.CSV()
self.dataset = tablib.Dataset(headers=['id', 'username'])
self.dataset.append(('1', 'x'))
def test_import_dos(self):
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-dos.csv')
with open(filename, self.format.get_read_mode()) as in_stream:
actual = in_stream.read()
expected = 'id,name,author_email\n1,Some book,<EMAIL>\n'
self.assertEqual(actual, expected)
def test_import_mac(self):
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-mac.csv')
with open(filename, self.format.get_read_mode()) as in_stream:
actual = in_stream.read()
expected = 'id,name,author_email\n1,Some book,<EMAIL>\n'
self.assertEqual(actual, expected)
def test_import_unicode(self):
# importing csv UnicodeEncodeError 347
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-unicode.csv')
with open(filename, self.format.get_read_mode()) as in_stream:
data = force_str(in_stream.read())
base_formats.CSV().create_dataset(data)
def test_export_data(self):
res = self.format.export_data(self.dataset)
self.assertEqual("id,username\r\n1,x\r\n", res)
def test_get_extension(self):
self.assertEqual("csv", self.format.get_extension())
def test_content_type(self):
self.assertEqual("text/csv", self.format.get_content_type())
def test_can_import(self):
self.assertTrue(self.format.can_import())
def test_can_export(self):
self.assertTrue(self.format.can_export())
class TSVTest(TestCase):
def setUp(self):
self.format = base_formats.TSV()
def test_import_mac(self):
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-mac.tsv')
with open(filename, self.format.get_read_mode()) as in_stream:
actual = in_stream.read()
expected = 'id\tname\tauthor_email\n1\tSome book\t<EMAIL>\n'
self.assertEqual(actual, expected)
def test_import_unicode(self):
# importing tsv UnicodeEncodeError
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-unicode.tsv')
with open(filename, self.format.get_read_mode()) as in_stream:
data = force_str(in_stream.read())
base_formats.TSV().create_dataset(data)
class TextFormatTest(TestCase):
def setUp(self):
self.format = base_formats.TextFormat()
def test_get_read_mode(self):
self.assertEqual('r', self.format.get_read_mode())
def test_is_binary(self):
self.assertFalse(self.format.is_binary())
|
11558957
|
from abc import ABC
import numpy as np
import airsim
import gym
# from tasks import Shaping
from jsbsim_simulator import Simulation
from jsbsim_aircraft import Aircraft, cessna172P, ball, x8
from debug_utils import *
import jsbsim_properties as prp
from simple_pid import PID
from autopilot import X8Autopilot
from navigation import WindEstimation
from report_diagrams import ReportGraphs
from image_processing import AirSimImages, SemanticImageSegmentation
from typing import Type, Tuple, Dict
class ClosedLoop:
"""
A class to run airsim, JSBSim and join the other classes together
...
Attributes:
----------
sim_time : float
how many seconds to run the simulation for
display_graphics : bool
decides whether to run the airsim graphic update in unreal, required for image_processing input
airspeed : float
fixed airspeed used to fly the aircraft if airspeed_hold_w_throttle a/p used
agent_interaction_frequency_hz : float
how often the agent selects a new action, should be equal to or the lowest frequency
airsim_frequency_hz : float
how often to update the airsim graphic simulation
sim_frequency_hz : float
how often to update the JSBSim input, should not be less than 120Hz to avoid unexpected behaviour
aircraft : Aircraft
the aircraft type used, x8 by default, changing this will likely require a change in the autopilot used
init_conditions : Dict[prp.Property, float] = None
the simulations initial conditions None by default as in basic_ic.xml
debug_level : int
the level of debugging sent to the terminal by JSBSim
- 0 is limited
- 1 is core values
- 2 gives all calls within the C++ source code
Methods:
------
simulation_loop(profile : tuple(tuple))
updates airsim and JSBSim in the loop
get_graph_data()
gets the information required to produce debug type graphics
generate_figures()
produce required graphics
"""
def __init__(self, sim_time: float,
display_graphics: bool = True,
airspeed: float = 30.0,
agent_interaction_frequency: float = 12.0,
airsim_frequency_hz: float = 392.0,
sim_frequency_hz: float = 240.0,
aircraft: Aircraft = x8,
init_conditions: bool = None,
debug_level: int = 0):
self.sim_time = sim_time
self.display_graphics = display_graphics
self.airspeed = airspeed
self.aircraft = aircraft
self.sim: Simulation = Simulation(sim_frequency_hz, aircraft, init_conditions, debug_level)
self.agent_interaction_frequency = agent_interaction_frequency
self.sim_frequency_hz = sim_frequency_hz
self.airsim_frequency_hz = airsim_frequency_hz
self.ap: X8Autopilot = X8Autopilot(self.sim)
self.graph: DebugGraphs = DebugGraphs(self.sim)
self.report: ReportGraphs = ReportGraphs(self.sim)
self.debug_aero: DebugFDM = DebugFDM(self.sim)
self.wind_estimate: WindEstimation = WindEstimation(self.sim)
self.over: bool = False
def simulation_loop(self, profile: tuple) -> None:
"""
Runs the closed loop simulation and updates to airsim simulation based on the class level definitions
:param profile: a tuple of tuples of the aircraft's profile in (lat [m], long [m], alt [feet])
:return: None
"""
update_num = int(self.sim_time * self.sim_frequency_hz) # how many simulation steps to update the simulation
relative_update = self.airsim_frequency_hz / self.sim_frequency_hz # rate between airsim and JSBSim
graphic_update = 0
image = AirSimImages(self.sim)
image.get_np_image(image_type=airsim.ImageType.Scene)
for i in range(update_num):
graphic_i = relative_update * i
graphic_update_old = graphic_update
graphic_update = graphic_i // 1.0
# print(graphic_i, graphic_update_old, graphic_update)
# print(self.display_graphics)
if self.display_graphics and graphic_update > graphic_update_old:
self.sim.update_airsim()
# print('update_airsim')
self.ap.airspeed_hold_w_throttle(self.airspeed)
self.get_graph_data()
if not self.over:
self.over = self.ap.arc_path(profile, 400)
if self.over:
print('over and out!')
break
self.sim.run()
def test_loop(self) -> None:
"""
A loop to test the aircraft's flight dynamic model
:return: None
"""
update_num = int(self.sim_time * self.sim_frequency_hz) # how many simulation steps to update the simulation
relative_update = self.airsim_frequency_hz / self.sim_frequency_hz # rate between airsim and JSBSim
graphic_update = 0
for i in range(update_num):
graphic_i = relative_update * i
graphic_update_old = graphic_update
graphic_update = graphic_i // 1.0
# print(graphic_i, graphic_update_old, graphic_update)
# print(self.display_graphics)
if self.display_graphics and graphic_update > graphic_update_old:
self.sim.update_airsim()
# print('update_airsim')
# elevator = 0.0
# aileron = 0.0
# tla = 0.0
# self.ap.test_controls(elevator, aileron, tla)
# self.ap.altitude_hold(1000)
# self.ap.heading_hold(0)
# self.ap.roll_hold(5 * math.pi / 180)
# self.ap.pitch_hold(5.0 * math.pi / 180.0)
if self.sim[prp.sim_time_s] >= 5.0:
# self.ap.heading_hold(120.0)
# self.ap.roll_hold(0.0 * math.pi / 180.0)
self.ap.airspeed_hold_w_throttle(self.airspeed)
# self.ap.pitch_hold(10.0 * (math.pi / 180.0))
self.ap.altitude_hold(800)
self.get_graph_data()
self.sim.run()
def get_graph_data(self) -> None:
"""
Gets the information required to produce debug type graphics
:return:
"""
self.graph.get_abs_pos_data()
self.graph.get_airspeed()
self.graph.get_alpha()
self.graph.get_control_data()
self.graph.get_time_data()
self.graph.get_pos_data()
self.graph.get_angle_data()
self.graph.get_rate_data()
self.report.get_graph_info()
def generate_figures(self) -> None:
"""
Produce required graphics, outputs them in the desired graphic environment
:return: None
"""
self.graph.control_plot()
self.graph.trace_plot_abs()
self.graph.three_d_scene()
self.graph.pitch_rate_plot()
self.graph.roll_rate_plot()
# self.graph.roll_rate_plot()
# self.debug_aero.get_pitch_values()
def run_simulator() -> None:
"""
Runs the JSBSim and Airsim in the loop when executed as a script
:return: None
"""
env = ClosedLoop(750, True)
circuit_profile = ((0, 0, 1000), (4000, 0, 1000), (4000, 4000, 1000), (0, 4000, 1000), (0, 0, 20), (4000, 0, 20),
(4000, 4000, 20))
ice_profile = ((0, 0, 0), (1200, 0, 0), (1300, 150, 0), (540, 530, -80), (0, 0, -150), (100, 100, -100))
square = ((0, 0, 0), (2000, 0, 0), (2000, 2000, 0), (0, 2000, 0), (0, 0, 0), (2000, 0, 0), (2000, 2000, 0))
approach = ((0, 0, 0), (2000, 0, 800), (2000, 2000, 600), (0, 2000, 400), (0, 0, 200), (2000, 0, 100),
(2000, 2000, 100), (0, 2000, 100), (0, 0, 100))
rectangle = ((0, 0, 0), (2000, 0, 1000), (2000, 2000, 500), (-2000, 2000, 300), (-2000, 0, 100), (2000, 0, 20),
(2000, 2000, 20), (-2000, 2000, 20))
env.simulation_loop(rectangle)
env.generate_figures()
env.report.trace_plot(rectangle)
env.report.control_response(0, 750, 240)
env.report.three_d_plot(300, 750, 240)
print('Simulation ended')
def run_simulator_test() -> None:
"""
Runs JSBSim in the test loop when executed as a script to test the FDM
:return: None
"""
sim_frequency = 240
env = ClosedLoop(65.0, True, 30, 12, 24, sim_frequency)
env.test_loop()
env.generate_figures()
print('Simulation ended')
if __name__ == '__main__':
run_simulator()
# run_simulator_test()
|
11558965
|
import os, sys, subprocess, json, datetime
from shutil import copyfile
DEBUG = False
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " input_dir output_dir")
exit()
input_dir = sys.argv[1]
output_dir = sys.argv[2]
json_data = []
input_files = []
for file in os.listdir(input_dir):
if file.endswith(".json"):
with open(input_dir + "/" + file) as json_desc:
json_data.append(json.load(json_desc))
print("Imported input descriptor: " + file)
#if os.path.exists(video_filename):
# print("Found input video: " + video_filename)
# filenames_no_ext.append(filename_no_ext)
#else:
# print("No matching input video.")
|
11558996
|
import os
import argparse
from current_models import CURRENT_MODELS, MODEL_CAT_TO_GOOGLE_DIR
# 512 always performs best for our models.
max_sents = {128: 11, 256: 5, 384: 3, 512: 3}
# max_sents = {64: 23, 128:11, 256: 5, 384: 3, 512: 3}
bert_lrs = [1e-5, 2e-5]
task_lrs = [1e-4, 2e-4, 3e-4] #, 5e-4, 1e-3]
def get_conf_name(model, seg_len, bert_lr, task_lr, task_optimizer=None, eps=None):
if task_optimizer is None and eps is None:
return '{}_sl{}_blr{}_tlr{}'.format(model, seg_len, bert_lr, task_lr)
else:
return '{}_sl{}_blr{}_tlr{}_to{}_eps{}'.format(model, seg_len, bert_lr, task_lr, task_optimizer, eps)
def get_conf_lines(model, seg_len, bert_lr, task_lr, bert_model_dir, checkpoint, task_optimizer=None, eps=None):
lines = []
casing = 'uncased' if 'uncased' in bert_model_dir else 'cased'
lines += [get_conf_name(model, seg_len, bert_lr, task_lr, task_optimizer, eps) + ' = ${best} {']
lines += [' num_docs = {}'.format(args.num_docs)]
lines += [' bert_learning_rate = {}'.format(bert_lr)]
lines += [' task_learning_rate = {}'.format(task_lr)]
lines += [' max_segment_len = {}'.format(seg_len)]
lines += [' ffnn_size = {}'.format(args.ffnn_size)]
lines += [' train_path = {}/{}/train.{}.{}.jsonlines'.format(args.data_dir, casing, args.lang, seg_len)]
lines += [' eval_path = {}/{}/dev.{}.{}.jsonlines'.format(args.data_dir, casing, args.lang, seg_len)]
lines += [' conll_eval_path = {}/gold_conll/dev.{}.v4_gold_conll'.format(args.data_dir, args.lang)]
lines += [' max_training_sentences = {}'.format(max_sents[seg_len])]
lines += [' bert_config_file = {}/bert_config.json'.format(bert_model_dir)]
lines += [' vocab_file = {}/vocab.txt'.format(bert_model_dir)]
lines += [' tf_checkpoint = {}/bert_model.ckpt'.format(bert_model_dir)]
lines += [' init_checkpoint = {}'.format(checkpoint)]
if task_optimizer is not None:
lines += [' task_optimizer = {}'.format(task_optimizer)]
if eps is not None:
lines += [' adam_eps = {}'.format(eps)]
lines += ['}\n']
return lines
def generate(args):
num_confs = 0
with open(args.conf_file, 'a') as f:
for (model, (model_cat, ckpt_file)) in CURRENT_MODELS.items():
bert_model_dir = os.path.join(args.data_dir, 'bert_models', MODEL_CAT_TO_GOOGLE_DIR[model_cat])
for sl in max_sents.keys():
for bert_lr in bert_lrs:
for task_lr in task_lrs:
lines = get_conf_lines(model, sl, bert_lr, task_lr, bert_model_dir, ckpt_file)
if args.trial:
print('\n'.join(lines) + '\n')
else:
f.write('\n'.join(lines) + '\n')
num_confs += 1
print('{} configs written to {}'.format(num_confs, args.conf_file))
def run_slrm(args):
with open(args.jobs_file) as f:
for i, line in enumerate(f):
job = line.strip()
os.system('sbatch -J {} {} {}'.format('coref_' + job, args.slrm_file, job))
print('starting job {}'.format(job))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, required=True, help='High level coref data dir')
parser.add_argument("--generate_configs", action='store_true', help='appends configs to --conf_file')
parser.add_argument("--run_jobs", action='store_true', help='send jobs from --jobs_file to the cluster')
# you mostly don't need to touch these below
parser.add_argument("--trial", action='store_true', help='Print config to stdout if true')
parser.add_argument("--conf_file", default='experiments.conf', type=str, help='Output config file')
parser.add_argument("--jobs_file", default='torun.txt', type=str, help='file contraining list of jobs')
parser.add_argument("--slrm_file", default='slurm_coref.slrm', type=str, help='Slrm file')
parser.add_argument("--num_docs", default=2802, type=int)
parser.add_argument("--ffnn_size", default=3000, type=int)
parser.add_argument("--lang", default='english', type=str)
args = parser.parse_args()
if not args.generate_configs and not args.run_jobs:
print('Only one of --generate_configs and --run_jobs should be true')
elif args.generate_configs and args.run_jobs:
print('Only one of --generate_configs and --run_jobs should be true. First generate the configs with --generate_configs only. Make sure you have the right list in the jobs_file. The run with --run_jobs.')
elif args.generate_configs:
generate(args)
else:
run_slrm(args)
|
11559007
|
import logging
import gluoncv as gcv
gcv.utils.check_version('0.8.0')
from gluoncv.auto.estimators import ImageClassificationEstimator
from gluoncv.auto.tasks.utils import config_to_nested
from d8.image_classification import Dataset
if __name__ == '__main__':
# specify hyperparameters
config = {
'dataset': 'boat',
'gpus': [0, 1, 2, 3, 4, 5, 6, 7],
'estimator': 'img_cls',
'model': 'resnet50_v1b',
'batch_size': 128, # range [16, 32, 64, 128]
'epochs': 3
}
config = config_to_nested(config)
config.pop('estimator')
# specify dataset
dataset = Dataset.get('boat')
train_data, valid_data = dataset.split(0.8)
# specify estimator
estimator = ImageClassificationEstimator(config)
# fit estimator
estimator.fit(train_data, valid_data)
# evaluate auto estimator
top1, top5 = estimator.evaluate(valid_data)
logging.info('evaluation: top1={}, top5={}'.format(top1, top5))
|
11559023
|
from alchemtest.gmx import load_expanded_ensemble_case_1
from alchemlyb.parsing.util import anyopen
def test_gzip():
"""Test that gzip reads .gz files in the correct (text) mode.
"""
dataset = load_expanded_ensemble_case_1()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
with anyopen(filename, 'r') as f:
assert type(f.readline()) is str
|
11559039
|
import argparse
from bitstring import BitArray
from PIL import Image
argparser = argparse.ArgumentParser(description="Converts 24-bit BMP image to 8-bit RRRGGGBB pixels in COE format")
argparser.add_argument("INPUT", help="Input file path")
argparser.add_argument("OUTPUT", help="Output file path")
args = argparser.parse_args()
# Create image object from BMP file
print(f"Opening \"{args.INPUT}\"...")
img = Image.open(args.INPUT)
print(f" Width: {img.size[0]}px\n Height: {img.size[1]}px")
# Loop through image pixels
data = b''
for y in range(img.size[1]):
for x in range(img.size[0]):
# Get current pixel
pix = img.getpixel((x, y))
# Decimate pixel values
r = int((pix[0] / 255) * 7)
g = int((pix[1] / 255) * 7)
b = int((pix[2] / 255) * 3)
# Pack bits into byte
bit = BitArray()
bit += bin(r)
bit += bin(g)
bit += bin(b)
data += bit.tobytes()
# Format bytes as COE
fstr = "memory_initialization_radix=16;\nmemory_initialization_vector=\n"
for b in data:
fstr += f"{b:0{2}x},\n"
# Remove last comma and new line
fstr = fstr[:-2]
# Print ROM info
print("\nROM Info:")
print(" Width: 8 bits")
print(f" Depth: {len(data)} bytes")
# Write COE to disk
fout = open(args.OUTPUT, "wb")
fout.write(fstr.encode("utf-8"))
fout.close()
print(f"\nCOE saved to \"{args.OUTPUT}\"")
print(f" Size: ~{int(len(fstr) / 1024)} KB")
|
11559088
|
from nox.lib import core, openflow, packet
from nox.lib.core import Component
from nox.lib.packet import ethernet, ipv4, dns
from nox.coreapps.pyrt.pycomponent import CONTINUE
from twisted.python import log
from collections import defaultdict
import curses
class DnsSpy(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
self.ip_records = defaultdict(list)
def install(self):
self.register_for_datapath_join(self.dp_join)
match_src = { core.DL_TYPE: ethernet.ethernet.IP_TYPE,
core.NW_PROTO : ipv4.ipv4.UDP_PROTOCOL,
core.TP_SRC : 53}
self.register_for_packet_match(self.handle_dns, 0xffff, match_src)
def dp_join(self, dpid, stats):
# Make sure we get the full DNS packet at the Controller
self.install_datapath_flow(dpid,
{ core.DL_TYPE : ethernet.ethernet.IP_TYPE,
core.NW_PROTO : ipv4.ipv4.UDP_PROTOCOL,
core.TP_SRC : 53 },
openflow.OFP_FLOW_PERMANENT, openflow.OFP_FLOW_PERMANENT,
[[openflow.OFPAT_OUTPUT, [0, openflow.OFPP_CONTROLLER]]])
return CONTINUE
def handle_dns(self, dpid, inport, ofp_reason, total_frame_len, buffer_id, packet):
dnsh = packet.find('dns')
if not dnsh:
log.err('received invalid DNS packet',system='dnsspy')
return CONTINUE
log.msg(str(dnsh),system='dnsspy')
for answer in dnsh.answers:
if answer.qtype == dns.dns.rr.A_TYPE:
val = self.ip_records[answer.rddata]
if answer.name not in val:
val.insert(0, answer.name)
log.msg("add dns entry: %s %s" % (answer.rddata, answer.name), system='dnsspy')
for addition in dnsh.additional:
# WHAT IS THIS?! XXX
# for char in addition.name:
# # some debugging magic in case we have a bad parse in DNS
# if not curses.ascii.isascii(char):
# for byte in dnsh.get_layer():
# print '%x' % byte,
# print ''
# continue
if addition.qtype == dns.dns.rr.A_TYPE:
val = self.ip_records[addition.rddata]
if addition.name not in val:
val.insert(0, addition.name)
log.msg("additional dns entry: %s %s" % (addition.rddata, addition.name), system='dnsspy')
return CONTINUE
def getInterface(self):
return str(DnsSpy)
def getFactory():
class Factory:
def instance(self, ctxt):
return DnsSpy(ctxt)
return Factory()
|
11559097
|
import json
import requests
import urllib.parse
class RequestHandler(object):
def __init__(self, endpoint, api_key):
self.endpoint = endpoint
self.api_key = api_key
def __to_canonical_querystring(self, params):
canonical_querystring = ""
# parameters have to be sorted alphabetically for the signing part
for param_key, param_value in sorted(params.items()):
if canonical_querystring != "":
canonical_querystring += "&"
canonical_querystring += param_key + "=" + urllib.parse.quote(param_value)
return canonical_querystring
def request(self, path, method="GET", data=None, params={}):
canonical_querystring = self.__to_canonical_querystring(params)
data = json.dumps(data) if data else None
headers = {}
headers["Accept"] = "application/json"
headers["Content-type"] = "application/json"
headers["x-api-key"] = self.api_key
request_url = self.endpoint + path + "?" + canonical_querystring
if method == "GET":
return requests.get(request_url, headers=headers)
if method == "POST":
return requests.post(request_url, headers=headers, data=data)
if method == "PUT":
return requests.put(request_url, headers=headers, data=data)
if method == "DELETE":
return requests.delete(request_url, headers=headers, data=data)
raise RuntimeError("Unknown method: " + method)
|
11559126
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg16',
]
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
# print('feature.size()', x.size())
feature = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, feature
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
|
11559132
|
from nltk.corpus.reader import CategorizedCorpusReader, ChunkedCorpusReader
from nltk.corpus.reader import ConllCorpusReader, ConllChunkCorpusReader
class CategorizedChunkedCorpusReader(CategorizedCorpusReader, ChunkedCorpusReader):
"""
A reader for chunked corpora whose documents are divided into categories
based on their file identifiers.
"""
# code adapted from CategorizedTaggedCorpusReader
def __init__(self, *args, **kwargs):
CategorizedCorpusReader.__init__(self, kwargs)
ChunkedCorpusReader.__init__(self, *args, **kwargs)
def _resolve(self, fileids, categories):
if fileids is not None and categories is not None:
raise ValueError('Specify fileids or categories, not both')
if categories is not None:
return self.fileids(categories)
else:
return fileids
def raw(self, fileids=None, categories=None):
return ChunkedCorpusReader.raw(self, self._resolve(fileids, categories))
def words(self, fileids=None, categories=None):
return ChunkedCorpusReader.words(self, self._resolve(fileids, categories))
def sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.sents(self, self._resolve(fileids, categories))
def paras(self, fileids=None, categories=None):
return ChunkedCorpusReader.paras(self, self._resolve(fileids, categories))
def tagged_words(self, fileids=None, categories=None):
return ChunkedCorpusReader.tagged_words(self, self._resolve(fileids, categories))
def tagged_sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.tagged_sents(self, self._resolve(fileids, categories))
def tagged_paras(self, fileids=None, categories=None):
return ChunkedCorpusReader.tagged_paras(self, self._resolve(fileids, categories))
def chunked_words(self, fileids=None, categories=None):
return ChunkedCorpusReader.chunked_words(
self, self._resolve(fileids, categories))
def chunked_sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.chunked_sents(
self, self._resolve(fileids, categories))
def chunked_paras(self, fileids=None, categories=None):
return ChunkedCorpusReader.chunked_paras(
self, self._resolve(fileids, categories))
class CategorizedConllChunkCorpusReader(CategorizedCorpusReader, ConllChunkCorpusReader):
"""
A reader for conll chunked corpora whose documents are divided into
categories based on their file identifiers.
"""
def __init__(self, *args, **kwargs):
# NOTE: in addition to cat_pattern, ConllChunkCorpusReader also requires
# chunk_types as third argument, which defaults to ('NP','VP','PP')
CategorizedCorpusReader.__init__(self, kwargs)
ConllChunkCorpusReader.__init__(self, *args, **kwargs)
def _resolve(self, fileids, categories):
if fileids is not None and categories is not None:
raise ValueError('Specify fileids or categories, not both')
if categories is not None:
return self.fileids(categories)
else:
return fileids
def raw(self, fileids=None, categories=None):
return ConllCorpusReader.raw(self, self._resolve(fileids, categories))
def words(self, fileids=None, categories=None):
return ConllCorpusReader.words(self, self._resolve(fileids, categories))
def sents(self, fileids=None, categories=None):
return ConllCorpusReader.sents(self, self._resolve(fileids, categories))
def tagged_words(self, fileids=None, categories=None):
return ConllCorpusReader.tagged_words(self, self._resolve(fileids, categories))
def tagged_sents(self, fileids=None, categories=None):
return ConllCorpusReader.tagged_sents(self, self._resolve(fileids, categories))
def chunked_words(self, fileids=None, categories=None, chunk_types=None):
return ConllCorpusReader.chunked_words(
self, self._resolve(fileids, categories), chunk_types)
def chunked_sents(self, fileids=None, categories=None, chunk_types=None):
return ConllCorpusReader.chunked_sents(
self, self._resolve(fileids, categories), chunk_types)
def parsed_sents(self, fileids=None, categories=None, pos_in_tree=None):
return ConllCorpusReader.parsed_sents(
self, self._resolve(fileids, categories), pos_in_tree)
def srl_spans(self, fileids=None, categories=None):
return ConllCorpusReader.srl_spans(self, self._resolve(fileids, categories))
def srl_instances(self, fileids=None, categories=None, pos_in_tree=None, flatten=True):
return ConllCorpusReader.srl_instances(
self, self._resolve(fileids, categories), pos_in_tree, flatten)
def iob_words(self, fileids=None, categories=None):
return ConllCorpusReader.iob_words(self, self._resolve(fileids, categories))
def iob_sents(self, fileids=None, categories=None):
return ConllCorpusReader.iob_sents(self, self._resolve(fileids, categories))
|
11559142
|
import os
# the token of the @CryptoCoinsInfoBot
TOKEN_BOT = 'put_here'
YOUR_TELEGRAM_ALIAS = 'put_here'
# do APIs requests with pause
TIME_INTERVAL = 3600
# old CoinMarketCap public API
# COINMARKET_API_URL_COINLIST = 'https://api.coinmarketcap.com/v1/ticker/?limit=0'
# new pro API
CMC_API_KEY = "put_here"
COINMARKET_API_URL_COINLIST = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?start=1&limit=5000' \
'&CMC_PRO_API_KEY={}'
# CryptoCompare API
# is used to search up if requested coin is on the CC coinlist
CRYPTOCOMPARE_API_URL_COINLIST = 'https://min-api.cryptocompare.com/data/all/coinlist'
# is used to parse prices of the requested coin
CRYPTOCOMPARE_API_URL_PRICEMULTIFULL = 'https://min-api.cryptocompare.com/data/pricemultifull?fsyms={}&tsyms=BTC,USD'
FILE_JSON_COINMARKET = os.path.dirname(os.path.realpath(__file__)) + '/coinmarketcoins.json'
FILE_JSON_CRYPTOCOMPARE = os.path.dirname(os.path.realpath(__file__)) + '/cryptocomparecoins.json'
class JSONFiles:
def __init__(self):
self.coinmarketcapjson = {}
self.cryptocomparejson = {}
def update_cmc_json(self, json1):
assert isinstance(json1, dict)
self.coinmarketcapjson = json1
return json1
def update_cc_json(self, json2):
assert isinstance(json2, dict)
self.cryptocomparejson = json2
return json2
# the object of class JSONFiles for save json API coins lists
jsonfiles = JSONFiles()
|
11559143
|
from .tensordict import TensorDict
from .tensorlist import TensorList
__all__ = [TensorDict, TensorList]
|
11559149
|
from huobi.client.algo import AlgoClient
from huobi.constant import *
from huobi.utils import *
symbol_test = "adausdt"
account_id = g_account_id
algo_client = AlgoClient(api_key=g_api_key, secret_key=g_secret_key)
order_id = algo_client.create_order(symbol=symbol_test, account_id=account_id, order_side=OrderSide.BUY,
order_type=AlgoOrderType.LIMIT, order_size=65, order_price=0.08, stop_price=0.085,
client_order_id="test004")
LogInfo.output("created order id : {id}".format(id=order_id))
|
11559163
|
from __future__ import absolute_import, division, print_function
from .version import version as __version__
from .api import glue, read_notebook, read_notebooks
|
11559186
|
import json
import sys
from tqdm import tqdm
from my.corenlp_interface import CoreNLPInterface
in_path = sys.argv[1]
out_path = sys.argv[2]
url = sys.argv[3]
port = int(sys.argv[4])
data = json.load(open(in_path, 'r'))
h = CoreNLPInterface(url, port)
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
start += len(sub) # use start += 1 to find overlapping matches
def to_hex(s):
return " ".join(map(hex, map(ord, s)))
def handle_nobreak(cand, text):
if cand == text:
return cand
if cand.replace(u'\u00A0', ' ') == text:
return cand
elif cand == text.replace(u'\u00A0', ' '):
return text
raise Exception("{} '{}' {} '{}'".format(cand, to_hex(cand), text, to_hex(text)))
# resolving unicode complication
wrong_loc_count = 0
loc_diffs = []
for article in data['data']:
for para in article['paragraphs']:
para['context'] = para['context'].replace(u'\u000A', '')
para['context'] = para['context'].replace(u'\u00A0', ' ')
context = para['context']
for qa in para['qas']:
for answer in qa['answers']:
answer['text'] = answer['text'].replace(u'\u00A0', ' ')
text = answer['text']
answer_start = answer['answer_start']
if context[answer_start:answer_start + len(text)] == text:
if text.lstrip() == text:
pass
else:
answer_start += len(text) - len(text.lstrip())
answer['answer_start'] = answer_start
text = text.lstrip()
answer['text'] = text
else:
wrong_loc_count += 1
text = text.lstrip()
answer['text'] = text
starts = list(find_all(context, text))
if len(starts) == 1:
answer_start = starts[0]
elif len(starts) > 1:
new_answer_start = min(starts, key=lambda s: abs(s - answer_start))
loc_diffs.append(abs(new_answer_start - answer_start))
answer_start = new_answer_start
else:
raise Exception()
answer['answer_start'] = answer_start
answer_stop = answer_start + len(text)
answer['answer_stop'] = answer_stop
assert para['context'][answer_start:answer_stop] == answer['text'], "{} {}".format(
para['context'][answer_start:answer_stop], answer['text'])
print(wrong_loc_count, loc_diffs)
mismatch_count = 0
dep_fail_count = 0
no_answer_count = 0
size = sum(len(article['paragraphs']) for article in data['data'])
pbar = tqdm(range(size))
for ai, article in enumerate(data['data']):
for pi, para in enumerate(article['paragraphs']):
context = para['context']
sents = h.split_doc(context)
words = h.split_sent(context)
sent_starts = []
ref_idx = 0
for sent in sents:
new_idx = context.find(sent, ref_idx)
sent_starts.append(new_idx)
ref_idx = new_idx + len(sent)
para['sents'] = sents
para['words'] = words
para['sent_starts'] = sent_starts
consts = list(map(h.get_const, sents))
para['consts'] = consts
deps = list(map(h.get_dep, sents))
para['deps'] = deps
for qa in para['qas']:
question = qa['question']
question_const = h.get_const(question)
qa['const'] = question_const
question_dep = h.get_dep(question)
qa['dep'] = question_dep
qa['words'] = h.split_sent(question)
for answer in qa['answers']:
answer_start = answer['answer_start']
text = answer['text']
answer_stop = answer_start + len(text)
# answer_words = h.split_sent(text)
word_idxs = []
answer_words = []
for sent_idx, (sent, sent_start, dep) in enumerate(zip(sents, sent_starts, deps)):
if dep is None:
print("dep parse failed at {} {} {}".format(ai, pi, sent_idx))
dep_fail_count += 1
continue
nodes, edges = dep
words = [node[0] for node in nodes]
for word_idx, (word, _, _, start, _) in enumerate(nodes):
global_start = sent_start + start
global_stop = global_start + len(word)
if answer_start <= global_start < answer_stop or answer_start < global_stop <= answer_stop:
word_idxs.append((sent_idx, word_idx))
answer_words.append(word)
if len(word_idxs) > 0:
answer['answer_word_start'] = word_idxs[0]
answer['answer_word_stop'] = word_idxs[-1][0], word_idxs[-1][1] + 1
if not text.startswith(answer_words[0]):
print("'{}' '{}'".format(text, ' '.join(answer_words)))
mismatch_count += 1
else:
answer['answer_word_start'] = None
answer['answer_word_stop'] = None
no_answer_count += 1
pbar.update(1)
pbar.close()
print(mismatch_count, dep_fail_count, no_answer_count)
print("saving...")
json.dump(data, open(out_path, 'w'))
|
11559223
|
from __future__ import division
from math import cos, sin, fabs, pi
from random import randint
from sfml import sf
# define some constants
game_size = sf.Vector2(800, 600)
paddle_size = sf.Vector2(25, 100)
ball_radius = 10.
# create the window of the application
w, h = game_size
window = sf.RenderWindow(sf.VideoMode(w, h), "pySFML - Pong")
window.vertical_synchronization = True
# load the sounds used in the game
ball_sound_buffer = sf.SoundBuffer.from_file("data/ball.wav")
ball_sound = sf.Sound(ball_sound_buffer)
# create the left paddle
left_paddle = sf.RectangleShape()
left_paddle.size = paddle_size - (3, 3)
left_paddle.outline_thickness = 3
left_paddle.outline_color = sf.Color.BLACK
left_paddle.fill_color = sf.Color(100, 100, 200)
left_paddle.origin = paddle_size / 2
# create the right paddle
right_paddle = sf.RectangleShape()
right_paddle.size = paddle_size - (3, 3)
right_paddle.outline_thickness = 3
right_paddle.outline_color = sf.Color.BLACK
right_paddle.fill_color = sf.Color(200, 100, 100)
right_paddle.origin = paddle_size / 2
# create the ball
ball = sf.CircleShape()
ball.radius = ball_radius - 3
ball.outline_thickness = 3
ball.outline_color = sf.Color.BLACK
ball.fill_color = sf.Color.WHITE
ball.origin = (ball_radius / 2, ball_radius / 2)
# load the font
font = sf.Font.from_file("data/sansation.ttf")
# initialize the pause message
pause_message = sf.Text()
pause_message.font = font
pause_message.character_size = 40
pause_message.position = (170, 150)
pause_message.color = sf.Color.WHITE
pause_message.string = "Welcome to pySFML pong!\nPress space to start the game"
# define the paddles properties
ai_timer = sf.Clock()
ai_time = sf.seconds(0.1)
paddle_speed = 400.
right_paddle_speed = 0.
ball_speed = 400.
ball_angle = 0. # to be changed later
clock = sf.Clock()
is_playing = False
while window.is_open:
# handle events
for event in window.events:
# window closed or escape key pressed: exit
if event == sf.Event.CLOSED:
window.close()
# space key pressed: play
if event == sf.Event.KEY_PRESSED and event['code'] == sf.Keyboard.SPACE:
if not is_playing:
# (re)start the game
is_playing = True
clock.restart()
# reset the position of the paddles and ball
left_paddle.position = (10 + paddle_size.x / 2, game_size.y / 2)
right_paddle.position = (game_size.x - 10 - paddle_size.x / 2, game_size.y / 2)
ball.position = game_size / 2
# reset the ball angle
while True:
# make sure the ball initial angle is not too much vertical
ball_angle = (randint(0, 32767) % 360) * 2 * pi / 360
if not fabs(cos(ball_angle)) < 0.7: break
if is_playing:
delta_time = clock.restart().seconds
# move the player's paddle
if sf.Keyboard.is_key_pressed(sf.Keyboard.UP) and left_paddle.position.y - paddle_size.y / 2 > 5:
left_paddle.move((0, -paddle_speed * delta_time))
elif sf.Keyboard.is_key_pressed(sf.Keyboard.DOWN) and left_paddle.position.y + paddle_size.y / 2 < game_size.y - 5:
left_paddle.position += (0, paddle_speed * delta_time)
# move the computer' paddle
if (right_paddle_speed < 0 and right_paddle.position.y - paddle_size.x / 2 > 5) or (right_paddle_speed > 0 and right_paddle.position.y + paddle_size.y / 2 < game_size.y - 5):
right_paddle.position += (0, right_paddle_speed * delta_time)
# update the computer's paddle direction according to the ball position
if ai_timer.elapsed_time > ai_time:
ai_timer.restart()
if ball.position.y + ball_radius > right_paddle.position.y + paddle_size.y / 2:
right_paddle_speed = paddle_speed
elif ball.position.y - ball_radius < right_paddle.position.y - paddle_size.y / 2:
right_paddle_speed = -paddle_speed
else:
right_paddle_speed = 0
# move the ball
factor = ball_speed * delta_time
ball.move((cos(ball_angle) * factor, sin(ball_angle) * factor))
# check collisions between the ball and the screen
if ball.position.x - ball_radius < 0:
is_playing = False
pause_message.string = "You lost!\nPress space to restart or\nescape to exit"
if ball.position.x + ball_radius > game_size.x:
is_playing = False
pause_message.string = "You won !\nPress space to restart or\nescape to exit"
if ball.position.y - ball_radius < 0:
ball_sound.play()
ball_angle = - ball_angle
ball.position.y = ball_radius + 0.1
if ball.position.y + ball_radius > game_size.y:
ball_sound.play()
ball_angle = - ball_angle
ball.position.y = game_size.y - ball_radius - 0.1
# check the collisions between the ball and the paddles
# left paddle
if ball.position.x - ball_radius < left_paddle.position.x + paddle_size.x / 2 and ball.position.x - ball_radius > left_paddle.position.x and ball.position.y + ball_radius >= left_paddle.position.y - paddle_size.y / 2 and ball.position.y - ball_radius <= left_paddle.position.y + paddle_size.y / 2:
if ball.position.y > left_paddle.position.y:
ball_angle = pi - ball_angle + (randint(0, 32767) % 20) * pi / 180
else:
ball_angle = pi - ball_angle - (randint(0, 32767) % 20) * pi / 180
ball_sound.play()
ball.position = (left_paddle.position.x + ball_radius + paddle_size.x / 2 + 0.1, ball.position.y)
# right paddle
if ball.position.x + ball_radius > right_paddle.position.x - paddle_size.x / 2 and ball.position.x + ball_radius < right_paddle.position.x and ball.position.y + ball_radius >= right_paddle.position.y - paddle_size.y / 2 and ball.position.y - ball_radius <= right_paddle.position.y + paddle_size.y / 2:
if ball.position.y > right_paddle.position.y:
ball_angle = pi - ball_angle + (randint(0, 32767) % 20) * pi / 180
else:
ball_angle = pi - ball_angle - (randint(0, 32767) % 20) * pi / 180
ball_sound.play()
ball.position = (right_paddle.position.x - ball_radius - paddle_size.x / 2 - 0.1, ball.position.y)
window.clear(sf.Color(50, 200, 50))
if is_playing:
# draw the paddles and the ball
window.draw(left_paddle)
window.draw(right_paddle)
window.draw(ball)
else:
# draw the pause message
window.draw(pause_message)
# display things on screen
window.display()
|
11559252
|
import os
import pickle
from fuzzywuzzy import fuzz
from play import play_wav
from listen import recognize_speech_from_mic
def keyword_match(keyword):
results = []
chapter_texts = os.listdir("data/texts")
chapter_texts.sort()
flag = False
for chapter_name in chapter_texts:
pickle_path = os.path.join("data/texts", chapter_name)
chapter = chapter_name.split('_')[1]
with open(pickle_path, 'rb') as file:
chapter_texts = pickle.load(file)
for (interval, sentence) in chapter_texts:
score = fuzz.token_set_ratio(keyword['transcription'], sentence)
if score > 95:
chapter_audio = chapter_name.split(".")[0]+".wav"
audio_path = os.path.join("data/audio_file_wav", chapter_audio)
results.append((audio_path, interval, sentence))
# we need to change min code
# this is wrong but it is ok right now
min_start = round((interval[0]/1000)/60, 2)
min_end = round((interval[1]/1000)/60, 2)
print('Result is found in chapter {} between [{}, {}]'.format(chapter, min_start, min_end))
print("""
{}
""".format(sentence))
play_wav(audio_path, interval)
print("""
Say 'Next!' to find next match with {} in the text
Say 'Stop!' to play the audio_book from current match
""".format(keyword['transcription']))
user = recognize_speech_from_mic()
if user['transcription'] == 'next':
pass
elif user['transcription'] == 'stop':
new_interval = (interval[0], interval[1] + 5000)
play_wav(audio_path, new_interval)
flag = True
break
else:
print("Couldn't Understand the command")
break
if flag:
break
print("No match has been found in chapter {}".format(chapter))
print('We are done here!')
|
11559268
|
import numpy as np
from deap.helpers import getOutputShape
from deap.mappers import PhotonicConvolverMapper
from deap.mappers import ModulatorArrayMapper
from deap.mappers import PWBArrayMapper
def convDEAP(image, kernel, stride, bias=0, normval=255):
"""
Image is a 3D matrix with index values row, col, depth, index
Kernel is a 4D matrix with index values row, col, depth, index.
The depth of the kernel must be equal to the depth of the input.
"""
assert image.shape[2] == kernel.shape[2]
# Allocate memory for storing result of convolution
outputShape = getOutputShape(image.shape, kernel.shape, stride=stride)
output = np.zeros(outputShape)
# Build the photonic circuit
weightBanks = []
inputShape = (kernel.shape[0], kernel.shape[1])
for k in range(image.shape[2]):
pc = PhotonicConvolverMapper.build(
imageShape=inputShape,
kernelShape=inputShape,
power=normval)
weightBanks.append(pc)
for k in range(kernel.shape[3]):
# Load weights
weights = kernel[:, :, :, k]
for c in range(weights.shape[2]):
PWBArrayMapper.updateKernel(
weightBanks[c].pwbArray,
weights[:, :, c])
for h in range(0, outputShape[0], stride):
for w in range(0, outputShape[1], stride):
# Load inputs
inputs = \
image[h:min(h + kernel.shape[0], image.shape[0]),
w:min(w + kernel.shape[0], image.shape[1]), :]
for c in range(kernel.shape[2]):
ModulatorArrayMapper.updateInputs(
weightBanks[c].modulatorArray,
inputs[:, :, c],
normval=normval)
# Perform convolution:
for c in range(kernel.shape[2]):
output[h, w, k] += weightBanks[c].step()
output[h, w, k] += bias
return output
def convDEAP_GIP(image, kernel, stride, convolverShape=None):
"""
Image is a 3D matrix with index values row, col, depth, index
Kernel is a 4D matrix with index values row, col, depth, index.
The depth of the kernel must be equal to the depth of the input.
"""
assert image.shape[2] == kernel.shape[2]
assert kernel.shape[2] == 1 and kernel.shape[3] == 1
if convolverShape is None:
convolverShape = image.shape
# Define convolutional parameters
Hm, Wm = convolverShape[0], convolverShape[1]
H, W = image.shape[0], image.shape[1]
R = kernel.shape[0]
# Allocate memory for storing result of convolution
outputShape = getOutputShape(image.shape, kernel.shape, stride=stride)
output = np.zeros(outputShape)
# Load weights
pc = PhotonicConvolverMapper.build(
imageShape=convolverShape,
kernel=kernel[:, :, 0, 0], power=255)
input_buffer = np.zeros(convolverShape)
for h in range(0, H - R + 1, Hm - R + 1):
for w in range(0, W - R + 1, Wm - R + 1):
inputs = image[h:min(h + Hm, H), w:min(w + Wm, W), 0]
# Load inputs into a buffer if convolution shape doesn't tile
# nicely.
input_buffer[:inputs.shape[0], :inputs.shape[1]] = inputs
input_buffer[inputs.shape[0]:, inputs.shape[1]:] = 0
# Update the inputs to the system.
ModulatorArrayMapper.updateInputs(
pc.modulatorArray,
input_buffer,
normval=255)
# Perform the convolution and store to memory
result = pc.step()[:min(h + Hm, H) - h - R + 1,
:min(w + Wm, W) - w - R + 1]
output[h:min(h + Hm, H) - R + 1,
w:min(w + Hm, W) - R + 1,
0] = result
return output
|
11559276
|
import numpy as np
import torch
#diabetes prevalence for china
#https://jamanetwork.com/journals/jama/fullarticle/1734701
male = np.array([5.2755905511811, 8.346456692913385, 13.543307086614174, 17.95275590551181, 20.708661417322837, 21.653543307086615])
female = np.array([4.015748031496061, 5.1181102362204705, 9.05511811023622, 17.401574803149607, 24.488188976377955, 25.196850393700785])
male = male/100
female = female/100
#https://www.statista.com/statistics/282119/china-sex-ratio-by-age-group/
sex_ratio = np.zeros(101)
sex_ratio[0:5] = 113.91
sex_ratio[5:10] = 118.03
sex_ratio[10:15] = 118.62
sex_ratio[15:20] = 118.14
sex_ratio[20:25] = 112.89
sex_ratio[25:30] = 105.39
sex_ratio[30:35] = 101.05
sex_ratio[35:40] = 102.84
sex_ratio[40:45] = 103.75
sex_ratio[45:50] = 103.64
sex_ratio[50:55] = 102.15
sex_ratio[55:60] = 101.65
sex_ratio[60:65] = 100.5
sex_ratio[65:70] = 96.94
sex_ratio[70:75] = 94.42
sex_ratio[75:80] = 89.15
sex_ratio[80:85] = 76.97
sex_ratio[85:90] = 71.16
sex_ratio[90:95] = 48.74
sex_ratio[95:] = 40.07
#calculate male to female ratio within each age bucket, and use this to combine the male vs female
#prevalence numbers
sex_ratio = sex_ratio/(sex_ratio + 100)
age_distribution = [16113.281,16543.361,16875.302,17118.429,17282.064,17375.527,17408.145,17389.238,17328.13,17234.143,17117.175,16987.122,16850.435,16715.289,16592.73,16484.473,16388.786,16370.261,16460.9,16637.439,16866.861,17182.465,17477.132,17702.896,17928.813,18144.994,18201.129,18841.832,20387.657,22413.391,24308.028,26355.485,27269.657,26400.295,24405.505,22597.72,20719.355,19296.916,18726.536,18750.928,18640.938,18451.511,18716.505,19599.644,20865.548,22101.75,23374.699,24376.638,24907.095,25077.435,25250.357,25414.362,25172.526,24383.003,23225.134,22043.117,20795.729,19608.86,18589.082,17703.703,16743.545,15666.543,14988.213,14917.427,15198.411,15425.124,15749.105,15550.741,14503.063,12921.733,11444.972,9939.85,8651.521,7764.623,7148.723,6478.704,5807.535,5222.027,4729.055,4307.295,3931.038,3608.42,3272.336,2887.659,2481.964,2118.152,1783.88,1480.587,1215.358,983.8,739.561,551.765,453.96,342.463,217.275,145.809,122.178,96.793,69.654,40.759,74.692]
age_distribution = np.array(age_distribution)
intervals = [(18, 30), (30, 40), (40, 50), (50, 60), (60, 70), (70, 100)]
percent_male_intervals = np.zeros(len(intervals))
for i in range(len(intervals)):
age_frequency_within_interval = age_distribution[intervals[i][0]:intervals[i][1]]/age_distribution[intervals[i][0]:intervals[i][1]].sum()
percent_male_intervals[i] = np.dot(age_frequency_within_interval, sex_ratio[intervals[i][0]:intervals[i][1]])
p_diabetes = male*percent_male_intervals + female*(1-percent_male_intervals)
#split 70+ into 70-80 and 80-100, and assume the same distribution within each
intervals = [(18, 30), (30, 40), (40, 50), (50, 60), (60, 70), (70, 80), (80, 100)]
p_diabetes_expanded = np.zeros(len(intervals))
p_diabetes_expanded[:-1] = p_diabetes
p_diabetes_expanded[-1] = p_diabetes_expanded[-2]
p_diabetes = p_diabetes_expanded
#hypertension by age for china
#https://www.ahajournals.org/doi/full/10.1161/CIRCULATIONAHA.117.032380
p_hyp_data = np.zeros(101)
p_hyp_data[:18] = 0
p_hyp_data[18:25] = 4.0
p_hyp_data[25:35] = 6.1
p_hyp_data[35:45] = 15.0
p_hyp_data[45:55] = 29.6
p_hyp_data[55:65] = 44.6
p_hyp_data[65:75] = 55.7
p_hyp_data[75:] = 60.2
p_hyp_data = p_hyp_data/100
#convert this into the same age intervals as the diabetes data by assuming constant
#rate within each bucket of the hypertension data
p_hyp = np.zeros(len(intervals))
for i in range(len(intervals)):
age_frequency_within_interval = age_distribution[intervals[i][0]:intervals[i][1]]/age_distribution[intervals[i][0]:intervals[i][1]].sum()
p_hyp[i] = np.dot(age_frequency_within_interval, p_hyp_data[intervals[i][0]:intervals[i][1]])
#age distribution of adult covid patients from china CDC
p_age = torch.zeros(len(intervals)).double()
p_age[0] = 3619
p_age[1] = 7600
p_age[2] = 8571
p_age[3] = 10008
p_age[4] = 8583
p_age[5] = 3918
p_age[6] = 1408
p_age = p_age/p_age.sum()
#probability of having hypertension for each age group
p_hyp = torch.tensor(p_hyp)
#probability of having diabetes for each age group
p_diabetes = torch.tensor(p_diabetes)
#mortality rate for hypertensive patients, diabetic patients, and for each age group
#http://weekly.chinacdc.cn/en/article/id/e53946e2-c6c4-41e9-9a9b-fea8db1a8f51
target_diabetes = torch.tensor(0.073).double()
target_hyp = torch.tensor(0.06).double()
#n_70_80 = 3918.
#n_over_80 = 1408.
target_age = torch.tensor([0.2, 0.2, 0.4, 1.3, 3.6, 8.0, 14.8]).double()
target_age = target_age/100
#https://www-nature-com.ezp-prod1.hul.harvard.edu/articles/hr201767
p_hyp_given_diabetes = 0.5
#calculate conditional probabilities of various events for each age group
p_hyp_given_not_diabetes = (p_hyp - p_hyp_given_diabetes*p_diabetes)/(1 - p_diabetes)
p_diabetes_total = (p_age*p_diabetes).sum() #
p_hyp_total = (p_age*p_hyp).sum()
p_diabetes_given_hyp = p_hyp_given_diabetes*p_diabetes/p_hyp
#probability of being in each age group given a comorbidity
p_age_given_diabetes = p_diabetes*p_age/p_diabetes_total
p_age_given_hyp = p_hyp*p_age/p_hyp_total
def model_age(c_age, c_hyp, c_diabetes, c_intercept):
return p_diabetes*p_hyp_given_diabetes*(torch.sigmoid(c_age + c_hyp + c_diabetes + c_intercept)) + \
(1 - p_diabetes)*p_hyp_given_not_diabetes*(torch.sigmoid(c_age + c_hyp + c_intercept)) + \
p_diabetes*(1 - p_hyp_given_diabetes)*(torch.sigmoid(c_age + c_diabetes + c_intercept)) + \
(1 - p_diabetes) * (1 - p_hyp_given_not_diabetes)*torch.sigmoid(c_age + c_intercept)
def model_diabetes(c_age, c_hyp, c_diabetes, c_intercept):
preds_by_age = p_age_given_diabetes*p_hyp_given_diabetes*torch.sigmoid(c_age + c_hyp + c_diabetes + c_intercept) + \
p_age_given_diabetes*(1 - p_hyp_given_diabetes)*torch.sigmoid(c_age + c_diabetes + c_intercept)
return preds_by_age.sum()
def model_hyp(c_age, c_hyp, c_diabetes, c_intercept):
preds_by_age = p_age_given_hyp*p_diabetes_given_hyp*torch.sigmoid(c_age + c_hyp + c_diabetes + c_intercept) + \
p_age_given_hyp*(1 - p_diabetes_given_hyp)*torch.sigmoid(c_age + c_hyp + c_intercept)
return preds_by_age.sum()
def loss(c_age, c_hyp, c_diabetes, c_intercept):
preds_age = model_age(c_age, c_hyp, c_diabetes, c_intercept)
preds_diabetes = model_diabetes(c_age, c_hyp, c_diabetes, c_intercept)
preds_hyp = model_hyp(c_age, c_hyp, c_diabetes, c_intercept)
return torch.nn.MSELoss()(preds_age, target_age) + torch.nn.MSELoss()(preds_diabetes, target_diabetes) + torch.nn.MSELoss()(preds_hyp, target_hyp)
num_restarts = 10
c_age_store = torch.zeros(num_restarts, len(intervals)).double()
c_diabetes_store = torch.zeros(num_restarts).double()
c_hyp_store = torch.zeros(num_restarts).double()
for restart in range(num_restarts):
print(restart)
num_iter = 10000
c_age = torch.rand(len(intervals), requires_grad=True, dtype=torch.double)
c_diabetes = torch.rand(1, requires_grad=True, dtype=torch.double)
c_hyp = torch.rand(1, requires_grad=True, dtype=torch.double)
c_intercept = torch.tensor(0., requires_grad=False, dtype=torch.double)
optimizer = torch.optim.Adam([c_age, c_diabetes, c_hyp], lr=1e-1)
for t in range(num_iter):
loss_itr = loss(c_age, c_hyp, c_diabetes, c_intercept)
# print(loss_itr.item())
optimizer.zero_grad()
loss_itr.backward()
optimizer.step()
print(loss_itr)
print(model_age(c_age, c_hyp, c_diabetes, c_intercept), model_diabetes(c_age, c_hyp, c_diabetes, c_intercept).item(), model_hyp(c_age, c_hyp, c_diabetes, c_intercept).item())
c_age_store[restart] = c_age
c_diabetes_store[restart] = c_diabetes
c_hyp_store[restart] = c_hyp
# print(c_age, c_diabetes.item(), c_hyp.item(), c_intercept.item())
np.savetxt('c_age.txt', c_age_store.detach().numpy(), delimiter = ',')
np.savetxt('c_diabetes.txt', c_diabetes_store.detach().numpy(), delimiter = ',')
np.savetxt('c_hypertension.txt', c_hyp_store.detach().numpy(), delimiter = ',')
np.savetxt('comorbidity_age_intervals.txt', intervals, delimiter = ',', fmt='%d')
|
11559277
|
import ctypes
from ctypes import wintypes
_GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW
_GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]
_GetShortPathNameW.restype = wintypes.DWORD
def get_short_path_name(long_name):
"""
Gets the short path name of a given long path.
http://stackoverflow.com/a/23598461/200291
"""
output_buf_size = 0
while True:
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
if output_buf_size >= needed:
return output_buf.value
else:
output_buf_size = needed
|
11559285
|
import numpy as np
from .fast import reduce_sum, reduce_prod, reduce_mean, reduce_std
from .table import Table
def reduce_by_key(table, column_name, func, check_sorted=True):
"""
Func is a string
:param table:
:param column_name:
:param func:
:param check_sorted:
:return:
"""
key_data = table[column_name]
key_index = table._index_column(column_name)
if func not in reduce_funcs:
raise ValueError('Reduction not available')
if check_sorted:
if not np.all(key_data == np.sort(key_data)):
raise ValueError('You can only reduce from a sorted column')
unique_keys = np.unique(key_data)
other_cols = set(table.keys) - {column_name}
new_data = [unique_keys]
new_index = [np.ones(len(unique_keys), dtype=np.uint8)]
new_keys = [column_name]
# Check if key data is a string, and substitute with an equivalent integer
if key_data.dtype.kind in {'S', 'U'}:
key_data = np.searchsorted(key_data, key_data)
for col in other_cols:
col_data = table[col]
if col_data.dtype.kind in {'S', 'U'}:
# Strings cannot be arithmetically reduced.
continue
col_index = table._index_column(col)
reduced_col_data, reduced_col_index = reduce_funcs[func](
key_data, key_index, col_data, col_index, len(unique_keys)
)
new_data.append(reduced_col_data)
new_index.append(reduced_col_index)
new_keys.append(col)
t = Table()
t.data = new_data
t.index = np.vstack(new_index)
t.keys = new_keys
return t
# Remember to update this dict when adding a new reduction
reduce_funcs = {'sum': reduce_sum,
'prod': reduce_prod,
'mean': reduce_mean,
'std': reduce_std}
|
11559287
|
def test_simple_comment(check_output):
check_output('''
main() {
extrn putchar;
/* a comment */
putchar('a');
}
''', 'a')
def test_comment_stops_at_first_terminator(check_output):
check_output('''
main() {
extrn putchar;
/* a comment */
putchar('a');
/* another comment */
}
''', 'a')
def test_comment_accepts_initial_asterisk(check_output):
check_output('''
main() {
extrn putchar;
/** a comment */
putchar('a');
}
''', 'a')
def test_comment_accepts_final_asterisk(check_output):
check_output('''
main() {
extrn putchar;
/* a comment **/
putchar('a');
}
''', 'a')
def test_comment_accepts_medial_asterisk(check_output):
check_output('''
main() {
extrn putchar;
/* a * comment */
putchar('a');
}
''', 'a')
def test_comment_accepts_newline(check_output):
check_output('''
main() {
extrn putchar;
/* a
multi
line
comment */
putchar('a');
}
''', 'a')
|
11559330
|
from __future__ import absolute_import
import unet_collection.keras_vision_transformer
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv2D, concatenate
from unet_collection.keras_vision_transformer import swin_layers
from unet_collection.keras_vision_transformer import transformer_layers
from unet_collection.keras_vision_transformer import utils
def swin_transformer_stack(X, stack_num, embed_dim,
num_patch, num_heads,
window_size, num_mlp,
shift_window=True,
name=''):
'''
Stacked Swin Transformers that share the same token size.
Alternated Window-MSA and Swin-MSA will be configured if `shift_window=True`, Window-MSA only otherwise.
*Dropout is turned off.
'''
# Turn-off dropouts
mlp_drop_rate = 0 # Droupout after each MLP layer
attn_drop_rate = 0 # Dropout after Swin-Attention
proj_drop_rate = 0 # Dropout at the end of each Swin-Attention block, i.e., after linear projections
drop_path_rate = 0 # Drop-path within skip-connections
qkv_bias = True # Convert embedded patches to query, key, and values with a learnable additive value
qk_scale = None # None: Re-scale query based on embed dimensions per attention head # Float for user specified scaling factor
if shift_window:
shift_size = window_size // 2
else:
shift_size = 0
for i in range(stack_num):
if i % 2 == 0:
shift_size_temp = 0
else:
shift_size_temp = shift_size
X = swin_layers.SwinTransformerBlock(dim=embed_dim, num_patch=num_patch, num_heads=num_heads,
window_size=window_size, shift_size=shift_size_temp,
num_mlp=num_mlp, qkv_bias=qkv_bias, qk_scale=qk_scale,
mlp_drop=mlp_drop_rate,
attn_drop=attn_drop_rate,
proj_drop=proj_drop_rate,
drop_path_prob=drop_path_rate,
name='name{}'.format(i))(X)
return X
def swin_unet_2d_base(input_tensor, filter_num_begin, depth,
stack_num_down, stack_num_up,
patch_size, num_heads, window_size,
num_mlp, shift_window=True,
name='swin_unet'):
'''
The base of SwinUNET.
'''
# Compute number be patches to be embeded
input_size = input_tensor.shape.as_list()[1:]
num_patch_x = input_size[0]//patch_size[0]
num_patch_y = input_size[1]//patch_size[1]
# Number of Embedded dimensions
embed_dim = filter_num_begin
depth_ = depth
X_skip = []
X = input_tensor
# Patch extraction
X = transformer_layers.patch_extract(patch_size)(X)
# Embed patches to tokens
X = transformer_layers.patch_embedding(num_patch_x*num_patch_y, embed_dim)(X)
# The first Swin Transformer stack
X = swin_transformer_stack(X, stack_num=stack_num_down,
embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads[0], window_size=window_size[0], num_mlp=num_mlp,
shift_window=shift_window, name='{}_swin_down0'.format(name))
X_skip.append(X)
# Downsampling blocks
for i in range(depth_-1):
# Patch merging
X = transformer_layers.patch_merging((num_patch_x, num_patch_y),
embed_dim=embed_dim, name='down{}'.format(i))(X)
print(X)
# update token shape info
embed_dim = embed_dim*2
num_patch_x = num_patch_x//2
num_patch_y = num_patch_y//2
# Swin Transformer stacks
X = swin_transformer_stack(X, stack_num=stack_num_down,
embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads[i+1], window_size=window_size[i+1], num_mlp=num_mlp,
shift_window=shift_window, name='{}_swin_down{}'.format(name, i+1))
# Store tensors for concat
X_skip.append(X)
# reverse indexing encoded tensors and hyperparams
X_skip = X_skip[::-1]
num_heads = num_heads[::-1]
window_size = window_size[::-1]
# upsampling begins at the deepest available tensor
X = X_skip[0]
# other tensors are preserved for concatenation
X_decode = X_skip[1:]
depth_decode = len(X_decode)
for i in range(depth_decode):
# Patch expanding
X = transformer_layers.patch_expanding(num_patch=(num_patch_x, num_patch_y),
embed_dim=embed_dim, upsample_rate=2,
return_vector=True)(X)
# update token shape info
embed_dim = embed_dim//2
num_patch_x = num_patch_x*2
num_patch_y = num_patch_y*2
# Concatenation and linear projection
X = concatenate([X, X_decode[i]], axis=-1, name='{}_concat_{}'.format(name, i))
X = Dense(embed_dim, use_bias=False, name='{}_concat_linear_proj_{}'.format(name, i))(X)
# Swin Transformer stacks
X = swin_transformer_stack(X, stack_num=stack_num_up,
embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads[i], window_size=window_size[i], num_mlp=num_mlp,
shift_window=shift_window, name='{}_swin_up{}'.format(name, i))
# The last expanding layer; it produces full-size feature maps based on the patch size
# !!! <--- "patch_size[0]" is used; it assumes patch_size = (size, size)
X = transformer_layers.patch_expanding(num_patch=(num_patch_x, num_patch_y),
embed_dim=embed_dim, upsample_rate=patch_size[0],
return_vector=False)(X)
return X
def swin_unet_2d(input_size,
n_labels,
filter_num_begin,
depth,
stack_num_down, stack_num_up,
patch_size, num_heads,
window_size, num_mlp,
shift_window,
output_activation='sigmoid'):
'''
filter_num_begin = 128 # number of channels in the first downsampling block;
it is also the number of embedded dimensions
depth = 4 # the depth of SwinUNET; depth=4 means three down/upsampling levels and a bottom level
stack_num_down = 2 # number of Swin Transformers per downsampling level
stack_num_up = 2 # number of Swin Transformers per upsampling level
patch_size = (4, 4) # Extract 2-by-2 patches from the input image. Height and width of the patch must be equal.
num_heads = [4, 8, 8, 8] # number of attention heads per down/upsampling level
window_size = [4, 2, 2, 2] # the size of attention window per down/upsampling level
num_mlp = 512 # number of MLP nodes within the Transformer
shift_window=True # Apply window shifting, i.e., Swin-MSA
'''
inputs = Input(input_size)
X = inputs
X = swin_unet_2d_base(X, filter_num_begin, depth,
stack_num_down, stack_num_up,
patch_size, num_heads,
window_size, num_mlp,
shift_window=shift_window,
name='swin_unet')
output = Conv2D(filters=n_labels,
kernel_size=1,
padding='same',
activation=output_activation)(X)
model = Model(inputs, output)
return model
|
11559383
|
class Ladder:
"""
Ladder object. It starts at some lower position and ends at a higher position.
"""
def __init__(self, start: int, end: int):
self.start = start
self.end = end
|
11559398
|
import warnings
warnings.warn(
"pyschema.contrib.avro is deprecated and will be removed.\n"
"Please use the pyschema_extensions.avro package instead.",
DeprecationWarning,
stacklevel=2
)
import pyschema_extensions.avro
from pyschema_extensions.avro import *
|
11559439
|
import logging
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, HTML, Submit, Column
from django import forms
from django.contrib.auth.models import User
from django.urls import reverse_lazy
from YtManagerApp.views.forms.auth import ExtendedUserCreationForm, ExtendedAuthenticationForm
logger = logging.getLogger("FirstTimeWizard")
class WelcomeForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Submit('submit', value='Continue')
)
class ApiKeyForm(forms.Form):
api_key = forms.CharField(label="YouTube API Key:")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'api_key',
Column(
Submit('submit', value='Continue'),
HTML('<a href="{% url \'first_time_2\' %}" class="btn btn-secondary">Skip</a>')
)
)
class UserCreationForm(ExtendedUserCreationForm):
form_action = reverse_lazy('first_time_2')
class LoginForm(ExtendedAuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'username',
'password',
'remember_me',
Column(
Submit('submit', value='Continue'),
HTML('<a href="{% url \'first_time_2\' %}?register=1" class="btn">Register new admin account</a>')
)
)
class PickAdminUserForm(forms.Form):
admin_user = forms.ModelChoiceField(
User.objects.order_by('username'),
label='User to promote to admin',
required=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'admin_user',
Column(
Submit('submit', value='Continue'),
HTML('<a href="{% url \'first_time_2\' %}®ister=1" class="btn">Register a new admin user</a>')
)
)
class ServerConfigForm(forms.Form):
allow_registrations = forms.BooleanField(
label="Allow user registrations",
help_text="Disabling this option will prevent anyone from registering to the site.",
initial=True,
required=False
)
sync_schedule = forms.CharField(
label="Synchronization schedule",
help_text="How often should the application look for new videos.",
initial="5 * * * *",
required=True
)
auto_download = forms.BooleanField(
label="Download videos automatically",
required=False
)
download_location = forms.CharField(
label="Download location",
help_text="Location on the server where videos are downloaded.",
required=True
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
HTML('<h3>Server settings</h3>'),
'sync_schedule',
'allow_registrations',
HTML('<h3>User settings</h3>'),
'auto_download',
'download_location',
Submit('submit', value='Continue'),
)
class DoneForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Submit('submit', value='Finish')
)
|
11559446
|
import numpy as np
import mmcv
from mmdet.models import DETECTORS, TwoStageDetector
from monorun.core import draw_box_3d_pred, show_bev
@DETECTORS.register_module()
class MonoRUnDetector(TwoStageDetector):
def simple_test(self, img, img_metas, proposals=None, rescale=False,
**kwargs):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale, **kwargs)
def show_result(self,
img,
cam_intrinsic,
result,
score_thr=0.3,
cov_scale=5.0,
bev_scale=25,
thickness=2,
win_name='',
show=False,
wait_time=0,
out_file=None,
views=['camera', 'bev']):
if self.roi_head.new_version:
result = result[0]
img = mmcv.imread(img)
img_show = []
if 'camera' in views:
img_pred_3d = img.copy()
draw_box_3d_pred(
img_pred_3d,
result['bbox_3d_results'],
cam_intrinsic,
score_thr=score_thr)
img_show.append(img_pred_3d)
if 'bev' in views:
viz_bev = show_bev(
img, None, result['bbox_results'], result['bbox_3d_results'],
cam_intrinsic, width=img.shape[1], height=img.shape[0], scale=bev_scale,
oc_maps=result.get('oc_maps'),
std_maps=result.get('std_maps'),
pose_covs=result.get('pose_covs'),
cov_scale=cov_scale,
score_thr=score_thr, thickness=2)
img_show.append(viz_bev)
if len(img_show) == 1:
img_show = img_show[0]
elif len(img_show) == 2:
img_show = np.concatenate(img_show, axis=0)
else:
raise ValueError('no view to show')
if show:
mmcv.imshow(img_show, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img_show, out_file)
if not (show or out_file):
return img_show
|
11559475
|
import os
import unittest
from typing import Tuple
from unittest.case import TestCase
from eth_typing.evm import ChecksumAddress
from web3 import Web3
from moonworm.contracts import ERC1155
from moonworm.web3_util import build_transaction, submit_transaction
from ..manage import deploy_ERC1155
def read_testnet_env_variables() -> Tuple[Web3, ChecksumAddress, str]:
provider_path = os.environ.get("MOONWORM_TESTNET_PATH")
if provider_path is None:
raise ValueError("MOONWORM_TESTNET_PATH env variable is not set")
raw_address = os.environ.get("MOONWORM_TEST_ETHEREUM_ADDRESS")
if raw_address is None:
raise ValueError("MOONWORM_TEST_ETHEREUM_ADDRESS env variable is not set")
private_key = os.environ.get("MOONWORM_TEST_ETHEREUM_ADDRESS_PRIVATE_KEY")
if raw_address is None:
raise ValueError(
"MOONWORM_TEST_ETHEREUM_ADDRESS_PRIVATE_KEY env variable is not set"
)
return (
Web3(Web3.HTTPProvider(provider_path)),
Web3.toChecksumAddress(raw_address),
private_key,
)
class MoonwormTestnetTestCase(TestCase):
def setUp(self) -> None:
self.basedir = os.path.dirname(os.path.dirname(__file__))
try:
(
self.web3,
self.test_address,
self.test_address_pk,
) = read_testnet_env_variables()
except Exception as e:
raise unittest.SkipTest(f"Skipping test because of : {str(e)}")
def _deploy_contract(self) -> ChecksumAddress:
TOKEN_NAME = "MO<PASSWORD>"
TOKEN_SYMBOL = "CNTPD"
TOKEN_URI = "moonstream.to/moonworm/"
_, contract_address = deploy_ERC1155(
self.web3,
TOKEN_NAME,
TOKEN_SYMBOL,
TOKEN_URI,
self.test_address,
self.test_address,
self.test_address_pk,
)
return contract_address
def test_deployment(self) -> None:
contract_address = self._deploy_contract()
contract = self.web3.eth.contract(contract_address, abi=ERC1155.abi())
tx = build_transaction(
self.web3, contract.functions["create"]("1", b""), self.test_address
)
submit_transaction(self.web3, tx, self.test_address_pk)
if __name__ == "__main__":
unittest.main()
|
11559495
|
import os
import sys
import yaml
# To update the package version number, edit CITATION.cff
citationfile = os.path.join(sys.exec_prefix, 'citation/grlc', 'CITATION.cff')
with open(citationfile, 'r') as f:
data = yaml.safe_load(f)
__version__ = data['version']
|
11559514
|
import os
import pathlib
import yaml
import panel as pn
import param
import requests
from panel.reactive import ReactiveHTML
from lumen.sources import Source
from .base import WizardItem
from .fast import FastComponent
from .gallery import Gallery, GalleryItem
from .state import state
ASSETS_DIR = pathlib.Path(__file__).parent / 'assets'
class SourceEditor(FastComponent):
cache_dir = param.String(label="Cache directory (optional)", precedence=1, doc="""
Enter a relative cache directory.""")
preview = param.Parameter(default=pn.Row(sizing_mode='stretch_width'))
source_type = param.String(default="")
shared = param.Boolean(default=False, precedence=1, doc="""
Whether the Source can be shared.""")
sizing_mode = param.String(default='stretch_width')
source = param.Parameter(precedence=-1)
spec = param.Dict(precedence=-1)
resize = param.Integer(default=0)
margin = param.Integer(default=5)
_scripts = {
'resize': ['window.dispatchEvent(new Event("resize"))']
}
_template = """
<span style="font-size: 2em"><b>{{ name }} - {{ source_type }}</b></span>
<div id="preview">${preview}</div>
"""
thumbnail = ASSETS_DIR / 'source.png'
def __new__(cls, **params):
if cls is not SourceEditor:
return super().__new__(cls)
editors = param.concrete_descendents(cls)
source_type = params.get('spec', {}).get('type')
for editor in editors.values():
if editor.source_type == source_type:
return super().__new__(editor)
return super().__new__(cls)
def __init__(self, **params):
super().__init__(**{k: v for k, v in params.items() if k in self.param})
def _update_spec(self, *events):
for event in events:
self.spec[event.name] = event.new
def _preview(self, event):
source = Source.from_spec(self.spec)
tables = source.get_tables()
table = pn.widgets.Select(name='Select table', options=tables, width=200, margin=0)
table_view = pn.widgets.Tabulator(
sizing_mode='stretch_width', pagination='remote', page_size=12, theme='midnight',
height=400
)
load = pn.widgets.Button(name='Load table', width=200, margin=(15, 0))
def load_table(event):
table_view.value = source.get(table.value)
load.on_click(load_table)
self.preview[:] = [
pn.Column(table, load, margin=(0, 20, 0, 0)),
table_view
]
self.resize += 1
def _save(self):
pass
class SourceGalleryItem(GalleryItem):
editor = param.ClassSelector(class_=SourceEditor, precedence=-1)
thumbnail = param.Filename()
_template = """
<span style="font-size: 1.2em; font-weight: bold;">{{ name }}</p>
<fast-switch id="selected" checked=${selected} style="float: right;"></fast-switch>
<div id="details" style="margin: 1em 0; max-width: 320px;">
${view}
</div>
<p style="height: 4em; max-width: 320px;">{{ description }}</p>
<fast-button id="edit-button" style="width: 320px;" onclick="${_open_modal}">Edit</fast-button>
"""
def __init__(self, **params):
if 'description' not in params:
params['description'] = ''
super().__init__(**params)
self.view = pn.pane.PNG(self.thumbnail, height=200, max_width=300, align='center')
self._modal_content = [self.editor]
@param.depends('selected', watch=True)
def _add_spec(self):
sources = state.spec['sources']
if self.selected:
sources[self.name] = self.spec
elif self.name in sources:
del sources[self.name]
class SourceGallery(WizardItem, Gallery):
"""
Select the data sources to add to your dashboard specification.
"""
path = param.Foldername()
sources = param.Dict(default={}, doc="The list of sources added to the dashboard.", precedence=-1)
_template = """
<span style="font-size: 1.5em">Sources</span>
<fast-divider></fast-divider>
<span style="font-size: 1.2em; font-weight: bold;">{{ __doc__ }}</p>
<div id="items" style="margin: 1em 0; display: flex; flex-wrap: wrap; gap: 1em;">
{% for item in items.values() %}
<fast-card id="source-container" style="width: 350px; height: 380px;">
${item}
</fast-card>
{% endfor %}
<fast-card id="sources-container-new" style="height: 380px; width: 350px; padding: 1em;">
<div style="display: grid;">
<span style="font-size: 1.25em; font-weight: bold;">Add new source</span>
<i id="add-button" onclick="${_open_modal}" class="fa fa-plus" style="font-size: 14em; margin: 0.2em auto;" aria-hidden="true"></i>
</div>
</fast-card>
</div>
"""
_gallery_item = SourceGalleryItem
_editor_type = SourceEditor
def __init__(self, **params):
super().__init__(**params)
for name, item in self.items.items():
self.sources[name] = item.editor
self._editor = SourcesEditor(spec={}, margin=10)
self._save_button = pn.widgets.Button(name='Save sources')
self._save_button.on_click(self._save_sources)
self._modal_content = [self._editor, self._save_button]
@param.depends('spec', watch=True)
def _update_params(self):
for name, source in self.spec.items():
self.sources[name] = editor = SourceEditor(name=name, spec=source)
self.items[name] = SourceGalleryItem(
name=name, spec=source, selected=True, editor=editor,
thumbnail=editor.thumbnail
)
self.param.trigger('sources')
self.param.trigger('items')
def _add_source(self, event):
state.modal[:] = self._modal_content
state.template.open_modal()
def _save_sources(self, event):
for name, source in self._editor.sources.items():
path = pathlib.Path(self.path) / f'{name}.yaml'
with open(path, 'w', encoding='utf-8') as f:
f.write(yaml.dump(source.spec))
self.spec[name] = source.spec
item = SourceGalleryItem(
name=name, spec=source.spec, margin=0, selected=True,
editor=source, thumbnail=source.thumbnail
)
self.items[name] = item
self.sources[name] = source
self.param.trigger('items')
self.param.trigger('sources')
self._editor.sources = {}
state.template.close_modal()
class IntakeSourceEditor(SourceEditor):
"""
Declare the Intake catalog either by entering the URI of the catalog file, uploading a file or manually entering the catalog file.
"""
uri = param.String(precedence=1)
source_type = param.String(default='intake', readonly=True)
editor = param.Parameter(precedence=-1)
upload = param.Parameter(precedence=-1)
_template = """
<span style="font-size: 1.5em">{{ name }} - Intake Source</span>
<p>{{ __doc__ }}</p>
<fast-divider></fast-divider>
<form>
<div style="display: flex; flex-wrap: wrap;">
<div style="flex: 25%; min-width: 200px; margin-right: 1em;">
<div style="display: grid;">
<label for="URI"><b>URI</b></label>
<fast-text-field id="uri" placeholder="Enter a URI" value="${uri}">
</fast-text-field>
</div>
<div id="upload" style="display: grid; margin-top: 1em;">${upload}</div>
</div>
<div style="flex: 70%; min-width: 600px; display: block">
<label for="catalog"><b>Catalog</b></label>
<div id="catalog">${editor}</div>
</div>
</div>
<div style="display: flex; margin-top: 1em;">
<div style="display: grid; margin-right: 1em;">
<label for="cache_dir"><b>{{ param.cache_dir.label }}</b></label>
<fast-text-field id="cache_dir" placeholder="{{ param.cache_dir.doc }}" value="${cache_dir}" style="min-width: 300px;">
</fast-text-field>
</div>
<div style="display: grid;">
<label for="shared"><b>{{ param.shared.label }}</b></label>
<fast-checkbox id="shared" value="${shared}"></fast-checkbox>
</div>
</div>
</form>
<fast-button id="preview-button" onclick="${_preview}" style="position: absolute; right: 5px; margin-top: 1.5em; z-index: 100;">
Preview
</fast-button>
<fast-divider></fast-divider>
<div id="preview" style="margin-top: 1.8em;">${preview}</div>
"""
_dom_events = {'cache_dir': ['keyup'], 'uri': ['keyup']}
def __init__(self, **params):
import lumen.sources.intake # noqa
params.pop('source_type', None)
self.editor = pn.widgets.Ace(language='yaml', theme='dracula', margin=0, sizing_mode='stretch_width')
self.upload = pn.widgets.FileInput(sizing_mode='stretch_width', margin=0)
super().__init__(**params)
@property
def thumbnail(self):
return pathlib.Path(__file__).parent / 'assets' / 'intake.png'
@param.depends('upload.value', watch=True)
def _upload_catalog(self):
self.editor.value = self.upload.value.decode('utf-8')
@param.depends('editor.value', watch=True)
def _update_catalog(self):
self.spec['catalog'] = yaml.safe_load(self.editor.value)
@param.depends('uri', watch=True)
def _load_file(self):
uri = os.path.expanduser(self.uri)
if os.path.isfile(uri):
with open(uri) as f:
self.editor.value = f.read()
else:
self.editor.value = requests.get(self.uri).content
class IntakeDremioSourceEditor(SourceEditor):
"""
Provide a Dremio URI.
"""
cert = param.String(default=None)
load_schema = param.Boolean(default=False)
tls = param.Boolean(doc="Enable TLS")
uri = param.String(doc="Enter a URI")
username = param.String(doc="Enter a username")
password = param.String(doc="Enter a password")
source_type = param.String(default='intake_dremio', readonly=True)
_template = """
<span style="font-size: 1.5em">{{ name }} - Intake Dremio Source</span>
<p>{{ __doc__ }}</p>
<fast-divider></fast-divider>
<form>
<div style="display: flex; flex-wrap: wrap;">
<div style="flex: 25%; min-width: 200px; margin-right: 1em;">
<div style="display: grid;">
<label for="URI"><b>URI</b></label>
<fast-text-field id="uri" placeholder="Enter a URI" value="${uri}">
</fast-text-field>
</div>
<div style="display: grid;">
<label for="username"><b>Username</b></label>
<fast-text-field id="username" placeholder="Enter your username" value="${username}">
</fast-text-field>
</div>
<div style="display: grid;">
<label for="password"><b>Password</b></label>
<fast-text-field id="password" type="password" placeholder="Enter password" value="${password}">
</fast-text-field>
</div>
<fast-checkbox id="load_schema" checked="${load_schema}">Load schema</fast-checkbox>
<fast-checkbox id="tls" checked="${tls}">Enable TLS</fast-checkbox>
<div style="display: grid;">
<label for="cert"><b>Certificate</b></label>
<fast-text-field id="cert" disabled=${tls} placeholder="Enter path to a certificate" value="${cert}">
</fast-text-field>
</div>
</div>
</div>
<div style="display: flex; margin-top: 1em;">
<div style="display: grid; margin-right: 1em;">
<label for="cache_dir"><b>{{ param.cache_dir.label }}</b></label>
<fast-text-field id="cache_dir" placeholder="{{ param.cache_dir.doc }}" value="${cache_dir}" style="min-width: 300px;">
</fast-text-field>
</div>
<div style="display: grid;">
<label for="shared"><b>{{ param.shared.label }}</b></label>
<fast-checkbox id="shared" value="${shared}"></fast-checkbox>
</div>
</div>
</form>
<fast-button id="preview-button" onclick="${_preview}" style="position: absolute; right: 5px; margin-top: 1.5em; z-index: 100;">
Preview
</fast-button>
<fast-divider></fast-divider>
<div id="preview">${preview}</div>
"""
_dom_events = {'uri': ['keyup'], 'username': ['keyup'], 'password': ['<PASSWORD>']}
def __init__(self, **params):
import lumen.sources.intake # noqa
super().__init__(**params)
@param.depends('cert', 'load_schema', 'tls', 'uri', 'password', 'username', watch=True)
def _update_spec(self):
for p in ('cert', 'load_schema', 'tls', 'uri', 'password', 'username'):
self.spec[p] = getattr(self, p)
@property
def thumbnail(self):
return pathlib.Path(__file__).parent / 'assets' / 'intake.png'
class FileSourceTable(ReactiveHTML):
uri = param.String(doc="Enter a URI")
margin = param.Integer(default=0)
remove = param.Boolean(default=False, precedence=-1)
sizing_mode = param.String(default='stretch_width')
_template = """
<form style="display: flex; flex-wrap: wrap; margin-top: 0.5em;">
<div style="flex: 25%; min-width: 150px; display: grid; margin-right: 1em;">
<label for="name"><b>Table Name</b></label>
<fast-text-field id="name" placeholder="Enter a name" value="${name}"></fast-text-field>
</div>
<div style="flex: 60%; min-width: 300px; display: grid; margin-right: 1em;">
<label for="uri"><b>URI</b></label>
<fast-text-field id="uri" placeholder="{{ param.uri.doc }}" value="${uri}"></fast-text-field>
</div>
<fast-button style="width: 20px; margin-top: auto;" id="remove-source" onclick="${_remove}" appearance="accent">
<b>-</b>
</fast-button>
</form>
"""
_dom_events = {'uri': ['keyup'], 'name': ['keyup']}
def _remove(self, event):
self.remove = True
class FileSourceEditor(SourceEditor):
"""
Declare a list of tables by providing a name and a URI to a local or remote file for each.
"""
source_type = param.String(default='file', readonly=True)
tables = param.List()
kwargs = param.Dict(default={})
_template = """
<span style="font-size: 1.5em">{{ name }} - File Source</span>
<p>{{ __doc__ }}</p>
<fast-divider></fast-divider>
<div id="tables">
<fast-button id="add-table" onclick="${_add_table}" appearance="outline" style="float: right">
<b>+</b>
</fast-button>
<span style="font-size: 1.2em; margin: 1em 0;"><b>Tables</b></span>
${tables}
</div>
<div style="display: flex; margin-top: 1em;">
<div style="display: grid; margin-right: 1em;">
<label for="cache_dir"><b>{{ param.cache_dir.label }}</b></label>
<fast-text-field id="cache_dir" placeholder="{{ param.cache_dir.doc }}" value="${cache_dir}" style="min-width: 300px;">
</fast-text-field>
</div>
<div style="display: grid;">
<label for="shared"><b>{{ param.shared.label }}</b></label>
<fast-checkbox id="shared" value="${shared}"></fast-checkbox>
</div>
</div>
<fast-button id="preview-button" onclick="${_preview}" style="position: absolute; right: 5px; margin-top: 1.5em; z-index: 100;">
Preview
</fast-button>
<fast-divider></fast-divider>
<div id="preview" style="margin-top: 4em">${preview}</div>
"""
_dom_events = {'cache_dir': ['keyup']}
def __init__(self, **params):
if 'tables' in params:
tables = []
for name, table in params['tables'].items():
tables.append(FileSourceTable(name=name, uri=table))
params['tables'] = tables
params.pop('source_type', None)
super().__init__(**params)
@property
def thumbnail(self):
assets = pathlib.Path(__file__).parent / 'assets'
exts = {table.uri.split('.')[-1] for table in self.tables}
if len(exts) == 1:
filename = assets/ f'{list(exts)[0]}.png'
if os.path.isfile(filename):
return filename
def _add_table(self, event=None):
table = FileSourceTable()
table.param.watch(self._remove_table, 'remove')
self.tables += [table]
def _remove_table(self, event):
self.tables.remove(event.obj)
self.param.trigger('tables')
class SourcesEditor(WizardItem):
"""
Declare the data sources for your dashboard by giving your source
a name, selecting the source type and adding the source.
"""
disabled = param.Boolean(default=True)
sources = param.Dict(default={}, doc="The list of sources added to the dashboard.")
source_name = param.String(doc="Enter a name for the source")
source_type = param.Selector(doc="Select the type of source")
_template = """
<span style="font-size: 2em">Source Editor</span>
<p>{{ __doc__ }}</p>
<fast-divider></fast-divider>
<div style="display: flex;">
<form role="form" style="flex: 20%; max-width: 250px; line-height: 2em;">
<div style="display: grid;">
<label for="source-name-${id}"><b>{{ param.source_name.label }}</b></label>
<fast-text-field id="source-name" placeholder="{{ param.source_name.doc }}" value="${source_name}">
</fast-text-field>
</div>
<div style="display: flex;">
<div style="display: grid; flex: auto;">
<label for="type-${id}">
<b>{{ param.source_type.label }}</b>
</label>
<fast-select id="source-select" style="min-width: 150px;" value="${source_type}">
{% for stype in param.source_type.objects %}
<fast-option id="source-option-{{ loop.index0 }}" value="{{ stype }}">{{ stype.title() }}</fast-option>
{% endfor %}
</fast-select>
<fast-tooltip anchor="type-${id}">{{ param.source_type.doc }}</fast-tooltip>
</div>
<fast-button id="submit" appearance="accent" style="margin-top: auto; margin-left: 1em; width: 20px;" onclick="${_add_source}" disabled="${disabled}">
<b style="font-size: 2em;">+</b>
</fast-button>
</div>
</form>
<div id="sources" style="flex: 75%; margin-left: 1em;">
{% for source in sources.values() %}
<div id="source-container">${source}</div>
<fast-divider></faster-divider>
{% endfor %}
</div>
</div>
"""
_dom_events = {'source-name': ['keyup']}
def __init__(self, **params):
super().__init__(**params)
sources = param.concrete_descendents(Source)
self.param.source_type.objects = types = [
source.source_type for source in sources.values()
]+['intake', 'intake_dremio']
if self.source_type is None and types:
self.source_type = types[0]
@param.depends('source_name', watch=True)
def _enable_add(self):
self.disabled = not bool(self.source_name)
def _add_source(self, event):
self.spec[self.source_name] = spec = {'type': self.source_type}
editor = SourceEditor(
type=self.source_type, name=self.source_name, spec=spec,
sizing_mode='stretch_width'
)
self.sources[self.source_name] = editor
self.param.trigger('sources')
self.source_name = ''
self.ready = True
|
11559525
|
import os
import sys
from insights.client.constants import InsightsConstants as constants
from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml, PlaybookVerificationError
skipVerify = False
def read_playbook():
"""
Read in the stringified playbook yaml from stdin
"""
unverified_playbook = ''
for line in sys.stdin:
unverified_playbook += line
return unverified_playbook
if (os.environ.get('SKIP_VERIFY')):
skipVerify = True
try:
playbook = read_playbook()
playbook_yaml = loadPlaybookYaml(playbook)
verified_playbook = verify(playbook_yaml, skipVerify)
except PlaybookVerificationError as err:
sys.stderr.write(err.message)
sys.exit(constants.sig_kill_bad)
print(playbook)
|
11559539
|
import pytest
from numpy import allclose, arange, array, asarray, dot, cov, corrcoef, float64
from thunder.series.readers import fromlist, fromarray
from thunder.images.readers import fromlist as img_fromlist
pytestmark = pytest.mark.usefixtures("eng")
def test_map(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
assert allclose(data.map(lambda x: x + 1).toarray(), [[2, 3], [4, 5]])
assert data.map(lambda x: 1.0*x, dtype=float64).dtype == float64
assert data.map(lambda x: 1.0*x).dtype == float64
def test_map_singletons(eng):
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng)
mapped = data.map(lambda x: x.mean())
assert mapped.shape == (2, 1)
def test_filter(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
assert allclose(data.filter(lambda x: x.sum() > 3).toarray(), [3, 4])
def test_flatten(eng):
arr = arange(2*2*5).reshape(2, 2, 5)
data = fromarray(arr, engine=eng)
assert data.flatten().shape == (4, 5)
assert allclose(data.flatten().toarray(), arr.reshape(2*2, 5))
def test_sample(eng):
data = fromlist([array([1, 5]), array([1, 10]), array([1, 15])], engine=eng)
assert allclose(data.sample(3).shape, (3, 2))
assert allclose(data.filter(lambda x: x.max() > 10).sample(1).toarray(), [1, 15])
def test_between(eng):
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng)
val = data.between(0, 2)
assert allclose(val.index, array([0, 1]))
assert allclose(val.toarray(), array([[4, 5], [8, 9]]))
def test_first(eng):
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng)
assert allclose(data.first(), [4, 5, 6, 7])
def test_select(eng):
index = ['label1', 'label2', 'label3', 'label4']
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng, index=index)
assert data.select('label1').shape == (2, 1)
assert allclose(data.select('label1').toarray(), [4, 8])
assert allclose(data.select(['label1']).toarray(), [4, 8])
assert allclose(data.select(['label1', 'label2']).toarray(), array([[4, 5], [8, 9]]))
assert data.select('label1').index == ['label1']
assert data.select(['label1']).index == ['label1']
def test_standardize_axis1(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
centered = data.center(1)
standardized = data.standardize(1)
zscored = data.zscore(1)
assert allclose(centered.toarray(), array([-2, -1, 0, 1, 2]), atol=1e-3)
assert allclose(standardized.toarray(),
array([0.70710, 1.41421, 2.12132, 2.82842, 3.53553]), atol=1e-3)
assert allclose(zscored.toarray(),
array([-1.41421, -0.70710, 0, 0.70710, 1.41421]), atol=1e-3)
def test_standardize_axis0(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
centered = data.center(0)
standardized = data.standardize(0)
zscored = data.zscore(0)
assert allclose(centered.toarray(), array([[-1, -1], [1, 1]]), atol=1e-3)
assert allclose(standardized.toarray(), array([[1, 2], [3, 4]]), atol=1e-3)
assert allclose(zscored.toarray(), array([[-1, -1], [1, 1]]), atol=1e-3)
def test_squelch(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
squelched = data.squelch(5)
assert allclose(squelched.toarray(), [[0, 0], [0, 0]])
squelched = data.squelch(3)
assert allclose(squelched.toarray(), [[0, 0], [3, 4]])
squelched = data.squelch(1)
assert allclose(squelched.toarray(), [[1, 2], [3, 4]])
def test_correlate(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
sig = [4, 5, 6, 7, 8]
corr = data.correlate(sig).toarray()
assert allclose(corr, 1)
sigs = [[4, 5, 6, 7, 8], [8, 7, 6, 5, 4]]
corrs = data.correlate(sigs).toarray()
assert allclose(corrs, [1, -1])
def test_correlate_multiindex(eng):
index = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
data = fromlist([array([1, 2, 3, 4, 5])], index=asarray(index).T, engine=eng)
sig = [4, 5, 6, 7, 8]
corr = data.correlate(sig).toarray()
assert allclose(corr, 1)
sigs = [[4, 5, 6, 7, 8], [8, 7, 6, 5, 4]]
corrs = data.correlate(sigs).toarray()
assert allclose(corrs, [1, -1])
def test_clip(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
assert allclose(data.clip(2).toarray(), [2, 2, 3, 4, 5])
assert allclose(data.clip(2, 3).toarray(), [2, 2, 3, 3, 3])
def test_mean(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.mean().toarray()
expected = data.toarray().mean(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
def test_sum(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.sum().toarray()
expected = data.toarray().sum(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'int64'
def test_var(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.var().toarray()
expected = data.toarray().var(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
def test_std(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.std().toarray()
expected = data.toarray().std(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
def test_max(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.max().toarray()
expected = data.toarray().max(axis=0)
assert allclose(val, expected)
def test_min(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.min().toarray()
expected = data.toarray().min(axis=0)
assert allclose(val, expected)
def test_labels(eng):
x = [array([0, 1]), array([2, 3]), array([4, 5]), array([6, 7])]
data = fromlist(x, labels=[0, 1, 2, 3], engine=eng)
assert allclose(data.filter(lambda x: x[0]>2).labels, array([2, 3]))
assert allclose(data[2:].labels, array([2, 3]))
assert allclose(data[1].labels, array([1]))
assert allclose(data[1, :].labels, array([1]))
assert allclose(data[[0, 2]].labels, array([0, 2]))
assert allclose(data.flatten().labels, array([0, 1, 2, 3]))
x = [array([[0, 1],[2, 3]]), array([[4, 5], [6, 7]])]
data = img_fromlist(x, engine=eng).toseries()
data.labels = [[0, 1], [2, 3]]
assert allclose(data.filter(lambda x: x[0]>1).labels, array([2, 3]))
assert allclose(data[0].labels, array([[0, 1]]))
assert allclose(data[:, 0].labels, array([[0], [2]]))
assert allclose(data.flatten().labels, array([0, 1, 2, 3]))
def test_labels_setting(eng):
x = [array([0, 1]), array([2, 3]), array([4, 5]), array([6, 7])]
data = fromlist(x, engine=eng)
with pytest.raises(ValueError):
data.labels = [0, 1, 2]
def test_index_setting(eng):
data = fromlist([array([1, 2, 3]), array([2, 2, 4]), array([4, 2, 1])], engine=eng)
assert allclose(data.index, array([0, 1, 2]))
data.index = [3, 2, 1]
assert allclose(data.index, [3, 2, 1])
with pytest.raises(ValueError):
data.index = 5
with pytest.raises(ValueError):
data.index = [1, 2]
def test_select_by_index(eng):
data = fromlist([arange(12)], index=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], engine=eng)
result = data.select_by_index(1)
assert allclose(result.toarray(), array([4, 5, 6, 7]))
assert allclose(result.index, array([1, 1, 1, 1]))
result = data.select_by_index(1, squeeze=True)
assert allclose(result.index, array([0, 1, 2, 3]))
index = [
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 2, 3, 0, 1, 0, 1, 2, 3]
]
data.index = array(index).T
result, mask = data.select_by_index(0, level=2, return_mask=True)
assert allclose(result.toarray(), array([0, 2, 6, 8]))
assert allclose(result.index, array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]]))
assert allclose(mask, array([1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0]))
result = data.select_by_index(0, level=2, squeeze=True)
assert allclose(result.toarray(), array([0, 2, 6, 8]))
assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]]))
result = data.select_by_index([1, 0], level=[0, 1])
assert allclose(result.toarray(), array([6, 7]))
assert allclose(result.index, array([[1, 0, 0], [1, 0, 1]]))
result = data.select_by_index(val=[0, [2,3]], level=[0, 2])
assert allclose(result.toarray(), array([4, 5]))
assert allclose(result.index, array([[0, 1, 2], [0, 1, 3]]))
result = data.select_by_index(1, level=1, filter=True)
assert allclose(result.toarray(), array([0, 1, 6, 7]))
assert allclose(result.index, array([[0, 0, 0], [0, 0, 1], [1, 0, 0], [1, 0, 1]]))
def test_aggregate_by_index(eng):
data = fromlist([arange(12)], index=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], engine=eng)
result = data.aggregate_by_index(sum)
assert allclose(result.toarray(), array([6, 22, 38]))
assert allclose(result.index, array([0, 1, 2]))
index = [
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 2, 3, 0, 1, 0, 1, 2, 3]
]
data.index = array(index).T
result = data.aggregate_by_index(sum, level=[0, 1])
assert allclose(result.toarray(), array([1, 14, 13, 38]))
assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]]))
def test_stat_by_index(eng):
data = fromlist([arange(12)], index=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], engine=eng)
assert allclose(data.stat_by_index('sum').toarray(), array([6, 22, 38]))
assert allclose(data.stat_by_index('mean').toarray(), array([1.5, 5.5, 9.5]))
assert allclose(data.stat_by_index('min').toarray(), array([0, 4, 8]))
assert allclose(data.stat_by_index('max').toarray(), array([3, 7, 11]))
assert allclose(data.stat_by_index('count').toarray(), array([4, 4, 4]))
assert allclose(data.stat_by_index('median').toarray(), array([1.5, 5.5, 9.5]))
assert allclose(data.sum_by_index().toarray(), array([6, 22, 38]))
assert allclose(data.mean_by_index().toarray(), array([1.5, 5.5, 9.5]))
assert allclose(data.min_by_index().toarray(), array([0, 4, 8]))
assert allclose(data.max_by_index().toarray(), array([3, 7, 11]))
assert allclose(data.count_by_index().toarray(), array([4, 4, 4]))
assert allclose(data.median_by_index().toarray(), array([1.5, 5.5, 9.5]))
def test_stat_by_index_multi(eng):
index = [
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 2, 3, 0, 1, 0, 1, 2, 3]
]
data = fromlist([arange(12)], index=array(index).T, engine=eng)
result = data.stat_by_index('sum', level=[0, 1])
assert allclose(result.toarray(), array([1, 14, 13, 38]))
assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]]))
result = data.sum_by_index(level=[0, 1])
assert allclose(result.toarray(), array([1, 14, 13, 38]))
assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]]))
def test_mean_by_panel(eng):
data = fromlist([arange(8)], engine=eng)
test1 = data.mean_by_panel(4)
assert allclose(test1.index, array([0, 1, 2, 3]))
assert allclose(test1.toarray(), [[2, 3, 4, 5]])
test2 = data.mean_by_panel(2)
assert allclose(test2.index, array([0, 1]))
assert allclose(test2.toarray(), [[3, 4]])
def test_times_array(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2 = asarray([[7, 8], [9, 10], [11, 12]])
mat1 = fromlist(mat1raw, engine=eng)
truth = dot(mat1raw, mat2)
result = mat1.times(mat2)
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(0, 2))
def test_times_array_alt(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2 = asarray([[7, 8, 7, 8], [9, 10, 9, 10], [11, 12, 11, 12]])
mat1 = fromlist(mat1raw, engine=eng)
truth = dot(mat1raw, mat2)
result = mat1.times(mat2)
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(0, 4))
def test_times_vector(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2 = [7, 8, 9]
mat1 = fromlist(mat1raw, engine=eng)
truth = dot(mat1raw, mat2)
result = mat1.times(mat2)
assert allclose(result.toarray(), truth)
assert allclose(result.index, [0])
def test_times_scalar(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2 = 5
mat1 = fromlist(mat1raw, engine=eng)
truth = mat1raw * mat2
result = mat1.times(mat2)
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(0, 3))
def test_gramian(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat1 = fromlist(mat1raw, engine=eng)
result = mat1.gramian()
truth = dot(mat1raw.T, mat1raw)
assert allclose(result.toarray(), truth)
def test_cov(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat1 = fromlist(mat1raw, engine=eng)
result = mat1.cov()
truth = cov(mat1raw.T)
assert allclose(result.toarray(), truth)
def test_fourier(eng):
data = fromlist([array([1.0, 2.0, -4.0, 5.0, 8.0, 3.0, 4.1, 0.9, 2.3])], engine=eng)
vals = data.fourier(freq=2)
assert allclose(vals.select('coherence').toarray(), 0.578664)
assert allclose(vals.select('phase').toarray(), 4.102501)
def test_convolve(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
sig = array([1, 2, 3])
betas = data.convolve(sig, mode='same')
assert allclose(betas.toarray(), array([4, 10, 16, 22, 22]))
def test_crosscorr(eng):
local = array([1.0, 2.0, -4.0, 5.0, 8.0, 3.0, 4.1, 0.9, 2.3])
data = fromlist([local], engine=eng)
sig = array([1.5, 2.1, -4.2, 5.6, 8.1, 3.9, 4.2, 0.3, 2.1])
betas = data.crosscorr(signal=sig, lag=0)
assert allclose(betas.toarray(), corrcoef(local, sig)[0, 1])
betas = data.crosscorr(signal=sig, lag=2)
truth = array([-0.18511, 0.03817, 0.99221, 0.06567, -0.25750])
assert allclose(betas.toarray(), truth, atol=1E-5)
def test_detrend(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
out = data.detrend('linear')
assert(allclose(out.toarray(), array([1, 1, 1, 1, 1])))
def test_normalize_percentile(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
out = data.normalize('percentile', perc=20)
vals = out.toarray()
assert str(vals.dtype) == 'float64'
assert allclose(vals, array([-0.42105, 0.10526, 0.63157, 1.15789, 1.68421]), atol=1e-3)
def test_normalize_window(eng):
y = array([1, 2, 3, 4, 5])
data = fromlist([y], engine=eng)
vals = data.normalize('window', window=2).toarray()
b = array([1, 1, 2, 3, 4])
result_true = (y - b) / (b + 0.1)
assert allclose(vals, result_true, atol=1e-3)
vals = data.normalize('window', window=5).toarray()
b = array([1, 1, 2, 3, 4])
result_true = (y - b) / (b + 0.1)
assert allclose(vals, result_true, atol=1e-3)
def test_normalize_window_exact(eng):
y = array([1, 2, 3, 4, 5])
data = fromlist([y], engine=eng)
vals = data.normalize('window-exact', window=2).toarray()
b = array([1.2, 1.4, 2.4, 3.4, 4.2])
result_true = (y - b) / (b + 0.1)
assert allclose(vals, result_true, atol=1e-3)
vals = data.normalize('window-exact', window=6).toarray()
b = array([1.6, 1.8, 1.8, 1.8, 2.6])
result_true = (y - b) / (b + 0.1)
assert allclose(vals, result_true, atol=1e-3)
def test_normalize_mean(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
vals = data.normalize('mean').toarray()
assert allclose(vals, array([-0.64516, -0.32258, 0.0, 0.32258, 0.64516]), atol=1e-3)
def test_mean_by_window(eng):
data = fromlist([array([0, 1, 2, 3, 4, 5, 6])], engine=eng)
test1 = data.mean_by_window(indices=[3, 5], window=2).toarray()
assert allclose(test1, [3, 4])
test2 = data.mean_by_window(indices=[3, 5], window=3).toarray()
assert allclose(test2, [3, 4, 5])
test3 = data.mean_by_window(indices=[3, 5], window=4).toarray()
assert allclose(test3, [2, 3, 4, 5])
test4 = data.mean_by_window(indices=[3], window=4).toarray()
assert allclose(test4, [1, 2, 3, 4])
def test_reshape(eng):
original = fromarray(arange(72).reshape(6, 6, 2), engine=eng)
arr = original.toarray()
assert allclose(arr.reshape(12, 3, 2), original.reshape(12, 3, 2).toarray())
assert allclose(arr.reshape(36, 2), original.reshape(36, 2).toarray())
assert allclose(arr.reshape(4, 3, 3, 2), original.reshape(4, 3, 3, 2).toarray())
# must conserve number of elements
with pytest.raises(ValueError):
original.reshape(6, 3, 2)
# cannot change length of series
with pytest.raises(ValueError):
original.reshape(6, 3, 4)
def test_downsample(eng):
data = fromlist([arange(8)], engine=eng)
vals = data.downsample(2).toarray()
assert allclose(vals, [0.5, 2.5, 4.5, 6.5])
vals = data.downsample(4).toarray()
assert allclose(vals, [1.5, 5.5])
def test_downsample_uneven(eng):
data = fromlist([arange(9)], engine=eng)
vals = data.downsample(2).toarray()
assert allclose(vals, [0.5, 2.5, 4.5, 6.5])
|
11559541
|
from .samplerate import AudioSampleRate, audio_sample_rate
from soundfile import SoundFile
from io import BytesIO
from zounds.core import IdentityDimension, ArrayWithUnits
from .timeseries import TimeDimension, TimeSlice
from .duration import Picoseconds, Seconds
from .samplerate import SampleRate
import numpy as np
class AudioSamples(ArrayWithUnits):
"""
`AudioSamples` represents constant-rate samples of a continuous audio signal
at common sampling rates.
It is a special case of an :class:`~zounds.core.ArrayWithUnits` whose first
dimension is a :class:`~zounds.timeseries.TimeDimension` that has a common
audio sampling rate (e.g. :class:`~zounds.timeseries.SR44100`).
Args:
array (np.ndarray): The raw sample data
samplerate (SampleRate): The rate at which data was sampled
Raises:
ValueError: When array has a second dimension with size greater than 2
TypeError: When samplerate is not a
:class:`~zounds.timeseries.AudioSampleRate`
(e.g. :class:`~zounds.timeseries.SR22050`)
Examples::
>>> from zounds import AudioSamples, SR44100, TimeSlice, Seconds
>>> import numpy as np
>>> raw = np.random.normal(0, 1, 44100*10)
>>> samples = AudioSamples(raw, SR44100())
>>> samples.samples_per_second
44100
>>> samples.channels
1
>>> sliced = samples[TimeSlice(Seconds(2))]
>>> sliced.shape
(88200,)
"""
def __new__(cls, array, samplerate):
if array.ndim == 1:
dimensions = [TimeDimension(*samplerate)]
elif array.ndim == 2:
dimensions = [TimeDimension(*samplerate), IdentityDimension()]
else:
raise ValueError(
'array must be one (mono) or two (multi-channel) dimensions')
if not isinstance(samplerate, AudioSampleRate):
raise TypeError('samplerate should be an AudioSampleRate instance')
return ArrayWithUnits.__new__(cls, array, dimensions)
def __add__(self, other):
try:
if self.samplerate != other.samplerate:
raise ValueError(
'Samplerates must match, but they were '
'{self.samplerate} and {other.samplerate}'
.format(**locals()))
except AttributeError:
pass
return super(AudioSamples, self).__add__(other)
def kwargs(self):
return {'samplerate': self.samplerate}
def sum(self, axis=None, dtype=None, **kwargs):
result = super(AudioSamples, self).sum(axis, dtype, **kwargs)
if self.ndim == 2 and axis == 1:
return AudioSamples(result, self.samplerate)
else:
return result
@classmethod
def from_file(cls, file_like_object):
with SoundFile(file_like_object, mode='r') as f:
samples = f.read(dtype=np.float32)
return AudioSamples(samples, audio_sample_rate(f.samplerate))
@classmethod
def silence(cls, samplerate, duration, dtype=np.float32, channels=1):
shape = (int(duration / samplerate.frequency), channels)
silence = np.zeros(shape, dtype=dtype).squeeze()
return cls(silence, samplerate)
def silence_like(self, duration):
x = self.__class__.silence(
self.samplerate, duration, self.dtype, self.channels)
x[:] = 1
return x
def pad_with_silence(self, silence_duration=Seconds(1)):
silence = self.__class__.silence(
self.samplerate, silence_duration, self.dtype)
return AudioSamples(np.concatenate([self, silence]), self.samplerate)
@property
def samples_per_second(self):
return int(Picoseconds(int(1e12)) / self.frequency)
@property
def duration_in_seconds(self):
return self.duration / Picoseconds(int(1e12))
@property
def samplerate(self):
return SampleRate(self.frequency, self.duration)
@property
def overlap(self):
return self.samplerate.overlap
@property
def span(self):
return self.dimensions[0].span
@property
def end(self):
return self.dimensions[0].end
@property
def frequency(self):
return self.dimensions[0].frequency
@property
def duration(self):
return self.dimensions[0].duration
@classmethod
def from_example(cls, arr, example):
return cls(arr, example.samplerate)
@property
def channels(self):
if len(self.shape) == 1:
return 1
return self.shape[1]
@property
def samplerate(self):
return audio_sample_rate(self.samples_per_second)
@property
def mono(self):
"""
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
"""
if self.channels == 1:
return self
x = self.sum(axis=1) * 0.5
y = x * 0.5
return AudioSamples(y, self.samplerate)
@property
def stereo(self):
if self.channels == 2:
return self
return AudioSamples(np.vstack([self, self]).T, self.samplerate)
def __getitem__(self, item):
sliced = super(AudioSamples, self).__getitem__(item)
try:
if sliced.dimensions == self.dimensions:
return AudioSamples(sliced, self.samplerate)
except AttributeError:
pass
return sliced
def sliding_window(self, samplerate, padding=True):
ws = TimeSlice(duration=samplerate.duration)
ss = TimeSlice(duration=samplerate.frequency)
_, windowed = self.sliding_window_with_leftovers(ws, ss, dopad=padding)
return windowed
def encode(self, flo=None, fmt='WAV', subtype='PCM_16'):
"""
Return audio samples encoded as bytes given a particular audio format
Args:
flo (file-like): A file-like object to write the bytes to. If flo
is not supplied, a new :class:`io.BytesIO` instance will be
created and returned
fmt (str): A libsndfile-friendly identifier for an audio encoding
(detailed here: http://www.mega-nerd.com/libsndfile/api.html)
subtype (str): A libsndfile-friendly identifier for an audio
encoding subtype (detailed here:
http://www.mega-nerd.com/libsndfile/api.html)
Examples:
>>> from zounds import SR11025, AudioSamples
>>> import numpy as np
>>> silence = np.zeros(11025*10)
>>> samples = AudioSamples(silence, SR11025())
>>> bio = samples.encode()
>>> bio.read(10)
'RIFFx]\\x03\\x00WA'
"""
flo = flo or BytesIO()
with SoundFile(
flo,
mode='w',
channels=self.channels,
format=fmt,
subtype=subtype,
samplerate=self.samples_per_second) as f:
if fmt == 'OGG':
# KLUDGE: Trying to write too-large chunks to an ogg file seems
# to cause a segfault in libsndfile
# KLUDGE: This logic is very similar to logic in the OggVorbis
# processing node, and should probably be factored into a common
# location
factor = 20
chunksize = self.samples_per_second * factor
for i in range(0, len(self), chunksize):
chunk = self[i: i + chunksize]
f.write(chunk)
else:
# write everything in one chunk
f.write(self)
flo.seek(0)
return flo
def save(self, filename, fmt='WAV', subtype='PCM_16'):
with open(filename, 'wb') as f:
self.encode(f, fmt=fmt, subtype=subtype)
|
11559556
|
from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
OutcomeTemplate,
ExperimentTemplate,
)
outcome_test_data = {}
outcome_tests = [
##----TEST 0----##
#creates an experiment
#creates an outcome with the previous two entries as foreign keys
#gets the outcome
#puts the outcome adding the other parameterdef to the manytomany field
#gets the updated outcome
#deletes the updated outcome
#gets the outcome (should return error)
[
*[{
'name': name,
'method': POST,
'endpoint': 'experimenttemplate-list',
'body': random_model_dict(ExperimentTemplate),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
} for name in ['experiment0', 'experiment1']],
{
'name': 'outcome0',
'method': POST,
'endpoint': 'outcometemplate-list',
'body': (request_body := random_model_dict(OutcomeTemplate, experiment='experiment0__url')),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body
}
}
},
{
'name': 'outcome0_get_0',
'method': GET,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'outcome0_update_0',
'method': PUT,
'endpoint': 'outcometemplate-detail',
'body': (request_body := random_model_dict(OutcomeTemplate, experiment='experiment0__url')),
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'outcome0_get_1',
'method': GET,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'outcome0_delete_0',
'method': DELETE,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE
}
}
},
{
'name': 'outcome0_get_2',
'method': GET,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR
}
}
},
],
]
|
11559590
|
import requests
import sys
import exchange_client
from exchange_client import smart_open
import json
import os
import tempfile
'''Make a request to an eXchange non-paginated API.
@Param api_uri String -- API's URI (does not include the "base" URI)
@Param data_set_type String -- name of the data set type that is fetched via the request
@Param output_name (optional) -- name of file in which to place output; if None then a file name is generated
@Return string -- name of output file
'''
def run(api_uri, output_name=None):
base_uri = "https://api.theexchange.fanniemae.com"
#use the exchange_client to get our access token
full_auth = exchange_client.get_auth_token()
user_token = full_auth["AuthenticationResult"]["IdToken"]
uri = base_uri + api_uri
if output_name is None:
output_file_name = '-'
else:
output_file_name = output_name
with smart_open(output_file_name) as output_file:
response = do_get(uri,user_token)
json.dump(response.json(),output_file,indent=2)
return output_file_name
'''---------------------------------------------------------------------------'''
'''Request content from uri, return as a response object.'''
def do_get(uri, user_token):
r = requests.get(uri,headers={"Authorization": user_token, "Accept": "application/json"})
if r.status_code != 200:
raise Exception(uri + " resulted in an HTTP " + str(page_num))
return r
|
11559641
|
from quasimodo.assertion_validation.content_comparator import ContentComparator
from quasimodo.cache.mongodb_cache import MongoDBCache
from quasimodo.parameters_reader import ParametersReader
import logging
import wikipedia
parameters_reader = ParametersReader()
DEFAULT_MONGODB_LOCATION = parameters_reader.get_parameter("default-mongodb-location") or "mongodb://localhost:27017/"
class WikipediaCooccurrenceSubmodule(ContentComparator):
def __init__(self, module_reference, use_cache=True, cache_name="wikipedia-cache"):
super().__init__(module_reference)
self._name = "Wikipedia Cooccurrence"
self.use_cache = use_cache
self._lang = "en"
self.cache = MongoDBCache(cache_name, mongodb_location=DEFAULT_MONGODB_LOCATION)
def _get_wikipedia_page_content(self, name):
content = self.read_cache(name)
if content is not None:
return content
search = wikipedia.search(name)
# For now, we only consider the first result
if search:
try:
content = wikipedia.page(search[0]).content
except wikipedia.DisambiguationError as e:
# Not clear how often it happens
if e.options:
try:
content = wikipedia.page(e.options[0]).content
except wikipedia.DisambiguationError as e2:
if e2.options:
temp = e2.options[0].replace("(", "")\
.replace(")", "")
try:
content = wikipedia.page(temp).content
except wikipedia.DisambiguationError as e3:
pass
except wikipedia.exceptions.PageError:
logging.warning("Wikipedia page not found: " + name)
except wikipedia.exceptions.PageError:
logging.warning("Wikipedia page not found: " + name)
except wikipedia.exceptions.PageError:
logging.warning("Wikipedia page not found: " + name)
self.write_cache(name, content)
return content
def write_cache(self, wikipedia_page, content):
if self.use_cache:
filename = wikipedia_page.replace(" ", "_").replace("/", "_")
self.cache.write_cache(filename, content)
def read_cache(self, wikipedia_page):
if self.use_cache:
filename = wikipedia_page.replace(" ", "_").replace("/", "_")
cache_value = self.cache.read_cache(filename)
return cache_value
return None
def get_contents(self, subject):
return [self._get_wikipedia_page_content(subject)]
def setup_processing(self, input_interface):
wikipedia.set_lang(self._lang)
|
11559672
|
from rest_framework import serializers as rest_serializers
from geotrek.authent import models as authent_models
class StructureSerializer(rest_serializers.ModelSerializer):
class Meta:
model = authent_models.Structure
fields = ('id', 'name')
|
11559673
|
import defcon
from fontParts.base import BaseKerning
from fontParts.fontshell.base import RBaseObject
class RKerning(RBaseObject, BaseKerning):
wrapClass = defcon.Kerning
def _items(self):
return self.naked().items()
def _contains(self, key):
return key in self.naked()
def _setItem(self, key, value):
self.naked()[key] = value
def _getItem(self, key):
return self.naked()[key]
def _delItem(self, key):
del self.naked()[key]
def _find(self, pair, default=0):
return self.naked().find(pair, default)
|
11559692
|
import logging
import sys
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import autotvm
from tvm.contrib import nvcc, cc
# check whether the gpu has tensorcore
if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
raise Exception("skip building this tutorial because cuda is not enabled..")
ctx = tvm.cuda()
if not nvcc.have_tensorcore(ctx.compute_version):
raise Exception("the gpu has no tensorcore, skipping...")
M, N, L = 1024, 32, 4096
dtype = "int8"
layout = "TN"
if len(sys.argv) >= 4:
M, N, L = int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])
if len(sys.argv) >= 5:
dtype = sys.argv[4]
if len(sys.argv) >= 6:
layout = sys.argv[5]
# check whether current gpu arch support support current dtype's wmma codegen
cuda_compute_capability = tvm.runtime._ffi_api.GetDeviceAttr(2, 0, 4)
major, minor = nvcc.parse_compute_version(cuda_compute_capability)
if dtype == "int8":
assert major == 7 and minor >= 2
elif dtype == "int4" or dtype == "int1":
# int4/int1 only support layout TN
assert major == 7 and minor == 5 and layout == "TN"
def matmul_nn(A, B, L, dtype="float16", layout="NN"):
k = te.reduce_axis((0, L), name="k")
if dtype == "float16":
out_type = "float"
elif dtype == "int8":
out_type = "int"
elif dtype == "int4" or dtype == "int1":
out_type = "int"
if layout == "NN":
return te.compute(
(N, M),
lambda i, j: te.sum(
A[i, k].astype(out_type) * B[k, j].astype(out_type), axis=k
),
)
if layout == "NT":
return te.compute(
(N, M),
lambda i, j: te.sum(
A[k, i].astype(out_type) * B[k, j].astype(out_type), axis=k
),
)
if layout == "TN":
return te.compute(
(N, M),
lambda i, j: te.sum(
A[i, k].astype(out_type) * B[j, k].astype(out_type), axis=k
),
)
if layout == "TT":
return te.compute(
(N, M),
lambda i, j: te.sum(
A[k, i].astype(out_type) * B[j, k].astype(out_type), axis=k
),
)
@autotvm.template("tutorial/auto_tensorcore/test_gemm")
def test_gemm(N, L, M, dtype, layout):
if layout == "NN":
shape_a = (N, L)
shape_b = (L, M)
elif layout == "NT":
shape_a = (L, N)
shape_b = (L, M)
elif layout == "TN":
shape_a = (N, L)
shape_b = (M, L)
elif layout == "TT":
shape_a = (L, N)
shape_b = (M, L)
else:
print("Unsupported layout:", layout)
sys.exit(1)
A = te.placeholder(shape_a, name="A", dtype=dtype)
B = te.placeholder(shape_b, name="B", dtype=dtype)
C = matmul_nn(A, B, L, dtype, layout)
s = te.create_schedule(C.op)
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
# storage_align params
factor = 16
offset = 8
if dtype == "int8":
factor = 32
offset = 16
elif dtype == "int4":
factor = 64
offset = 32
elif dtype == "int1":
factor = 256
offset = 128
# create cache stages
AA = s.cache_read(A, "shared", [C])
if layout == "NN" or layout == "TN":
s[AA].storage_align(AA.op.axis[0], factor, offset)
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
if layout == "TT" or layout == "NT":
s[BB].storage_align(BB.op.axis[0], factor, offset)
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
# autotvm search space definition
cfg = autotvm.get_config()
cfg.define_knob("bx", [2, 4, 8])
cfg.define_knob("by", [8, 16, 32, 64])
cfg.define_knob("step_k", [1, 2, 4, 8, 16, 32])
cfg.define_knob("v", [4, 8, 16, 32])
by = cfg["by"].val
bx = cfg["bx"].val
step_k = cfg["step_k"].val
v = cfg["v"].val
# thread tile
TX = 8
TY = 1
if dtype == "int4" or dtype == "int1":
TX = 2
# warp tile
warp_tile_m = 16 # it could also be 8 or 32 on CUDA version >= 10.0
warp_tile_k = 16 # it must be 16 for fp16/int8 data type
if dtype == "int4":
warp_tile_m = 8
warp_tile_k = 32
elif dtype == "int1":
warp_tile_m = 8
warp_tile_k = 128
# block tile
tile_x = bx * TX
tile_y = by * TY
yo, ty = s[C].split(y, tile_y)
ty, yi = s[C].split(ty, TY)
# schedule for C stage
xo, xi = s[C].split(x, tile_x)
WX = min(warp_tile_m, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
s[C].reorder(yo, xo, tz, ty, tx, yi, xi)
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
# schedule for CL stage
ko, ki = s[CL].split(k, step_k * warp_tile_k)
kl, ki = s[CL].split(ki, warp_tile_k)
s[CL].compute_at(s[C], tx)
yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, yo, xo)
# schedule for AA stage
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[0], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
# vectorization is very important for float16/int8 inputs
s[AA].vectorize(vec)
# schedule for BB stage
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[1], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[0], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
# set the 'tensor_core' pragma for tensorcore codegen
s[CL].pragma(ko, "tensor_core")
return s, [A, B, C]
def tune_and_evaluate(M, N, L, dtype, layout):
task = autotvm.task.create(
"tutorial/auto_tensorcore/test_gemm",
args=(N, L, M, dtype, layout),
target="cuda",
)
print(task.config_space)
logging.getLogger("autotvm").setLevel(logging.DEBUG)
logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout))
measure_option = autotvm.measure_option(
builder="local", runner=autotvm.LocalRunner(number=5)
)
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file("matmul.log")],
)
dispatch_context = autotvm.apply_history_best("matmul.log")
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
with autotvm.apply_history_best("matmul.log"):
with tvm.target.Target("cuda"):
s, arg_bufs = test_gemm(N, L, M, dtype, layout)
print(tvm.lower(s, arg_bufs, simple_mode=True))
func = tvm.build(s, arg_bufs)
dev_module = func.imported_modules[0]
print(dev_module.get_source())
# check correctness
if layout == "NN":
shape_a = (N, L)
shape_b = (L, M)
elif layout == "NT":
shape_a = (L, N)
shape_b = (L, M)
elif layout == "TN":
shape_a = (N, L)
shape_b = (M, L)
elif layout == "TT":
shape_a = (L, N)
shape_b = (M, L)
a_np = None
b_np = None
c_np = None
c_np_type = None
if dtype == "float16":
c_np_type = np.float32
a_np = np.random.uniform(size=shape_a).astype(np.float16)
b_np = np.random.uniform(size=shape_b).astype(np.float16)
if layout == "NN":
c_np = np.dot(a_np, b_np)
elif layout == "NT":
c_np = np.dot(a_np.T, b_np)
elif layout == "TN":
c_np = np.dot(a_np, b_np.T)
elif layout == "TT":
c_np = np.dot(a_np.T, b_np.T)
elif dtype == "int8":
c_np_type = np.int32
a_np = np.random.randint(low=-128, high=127, size=shape_a).astype(np.int8)
b_np = np.random.randint(low=-128, high=127, size=shape_b).astype(np.int8)
if layout == "NN":
c_np = np.dot(a_np.astype(np.int32), b_np.astype(np.int32))
elif layout == "NT":
c_np = np.dot(a_np.astype(np.int32).T, b_np.astype(np.int32))
elif layout == "TN":
c_np = np.dot(a_np.astype(np.int32), b_np.astype(np.int32).T)
elif layout == "TT":
c_np = np.dot(a_np.astype(np.int32).T, b_np.astype(np.int32).T)
elif dtype == "int4":
c_np_type = np.int32
a_np_int = np.random.randint(low=-8, high=7, size=shape_a).astype(np.int32)
b_np_int = np.random.randint(low=-8, high=7, size=shape_b).astype(np.int32)
# "TN"
c_np = np.dot(a_np_int.astype(np.int32), b_np_int.astype(np.int32).T)
a_np = np.zeros(shape=(N, int(L / 8)), dtype=np.int32)
b_np = np.zeros(shape=(M, int(L / 8)), dtype=np.int32)
# a_np --> col_major
for i in range(N):
for j in range(int(L / 8)):
for k in range(8):
a_np[i, j] = a_np[i, j] | (
(a_np_int[i, j * 8 + k] & 0xF) << ((7 - k) * 4)
)
# b_np --> row_major
for i in range(M):
for j in range(int(L / 8)):
for k in range(8):
b_np[i, j] = b_np[i, j] | (
(b_np_int[i, j * 8 + k] & 0xF) << ((7 - k) * 4)
)
elif dtype == "int1":
c_np_type = np.int32
a_np_int = np.random.randint(low=0, high=1, size=shape_a).astype(np.int32)
b_np_int = np.random.randint(low=0, high=1, size=shape_b).astype(np.int32)
# "TN"
c_np = np.dot(a_np_int.astype(np.int32), b_np_int.astype(np.int32).T)
a_np = np.zeros(shape=(N, int(L / 32)), dtype=np.int32)
b_np = np.zeros(shape=(M, int(L / 32)), dtype=np.int32)
for i in range(N):
for j in range(int(L / 32)):
for k in range(32):
a_np[i, j] = a_np[i, j] | (
(a_np_int[i, j * 32 + k] & 0xF) << (31 - k)
)
for i in range(M):
for j in range(int(L / 32)):
for k in range(32):
b_np[i, j] = b_np[i, j] | (
(b_np_int[i, j * 32 + k] & 0xF) << (31 - k)
)
c_tvm = tvm.nd.array(np.zeros(c_np.shape, dtype=c_np_type), device=ctx)
a_tvm = tvm.nd.array(a_np, device=ctx)
b_tvm = tvm.nd.array(b_np, device=ctx)
func(a_tvm, b_tvm, c_tvm)
tvm.testing.assert_allclose(c_np, c_tvm.asnumpy(), rtol=1e-3)
evaluator = func.time_evaluator(func.entry_name, ctx, number=100)
print("Time cost of this operator: %f" % evaluator(a_tvm, b_tvm, c_tvm).mean)
func.save("gemm.o")
func.imported_modules[0].save("gemm.ptx")
cc.create_shared("gemm.so", ["gemm.o"])
if __name__ == "__main__":
tune_and_evaluate(M, N, L, dtype, layout)
|
11559705
|
import os
import sys
from subprocess import run, PIPE, STDOUT
import click
from hobbit import ROOT_PATH
def dev_init(all_, hooks, pipenv):
run('git init', shell=True)
if all_ or hooks:
HOOKS_PATH = os.path.join(ROOT_PATH, 'static', 'hooks')
run(f'cp -r {HOOKS_PATH}/* .git/hooks', shell=True)
if all_ or pipenv:
sub = run('which pipenv', shell=True, stdout=PIPE, stderr=STDOUT)
if sub.returncode != 0:
click.echo(click.style('cmd pipenv not exist.', fg='red'))
sys.exit(sub.returncode)
pipenv_path = sub.stdout.strip().decode('utf8')
pipenv_cmds = [
f'{pipenv_path} install --dev pytest pytest-cov pytest-env flake8',
]
if 'requirements.txt' in os.listdir():
pipenv_cmds.insert(
0, f'{pipenv_path} install -r requirements.txt --pre')
cmd = ' && '.join(pipenv_cmds)
click.echo(click.style(cmd, fg='green'))
# force pipenv to ignore that environment and create its own instead
env = os.environ.copy()
env.update({'PIPENV_IGNORE_VIRTUALENVS': '1'})
run(cmd, shell=True, env=env)
|
11559769
|
import fastai
from fastai.vision import *
from torch.utils.data.dataloader import default_collate
from torch.utils.data import Sampler, SequentialSampler, RandomSampler
import sklearn
# Modification to ImageDataBunch to allow to give a list of custome samplers.
class ImageDataBunch(ImageDataBunch):
@classmethod
def create(cls, train_ds:Dataset, valid_ds:Dataset, test_ds:Optional[Dataset]=None, path:PathOrStr='.', bs:int=64,
val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False, samplers=None, **dl_kwargs)->'DataBunch':
"Create a `DataBunch` from `train_ds`, `valid_ds` and maybe `test_ds` with a batch size of `bs` and optionally a list of samplers."
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
if samplers is None: samplers = [RandomSampler] + 3*[SequentialSampler]
dls = [DataLoader(d, b, sampler=s(d, bs=b), num_workers=num_workers, **dl_kwargs) for d,b,s in
zip(datasets, (bs,val_bs,val_bs,val_bs), samplers) if d is not None]
return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check)
class ImageList(ImageList):
_bunch = ImageDataBunch
class SequentialSampler(SequentialSampler):
def __init__(self, data_source, **kwargs):
self.data_source = data_source
class RandomSampler(RandomSampler):
def __init__(self, data_source, replacement=False, num_samples=None, **kwargs):
self.data_source = data_source
self.replacement = replacement
try:
self.num_samples = num_samples
except:
self._num_samples = num_samples
class FixedLenRandomSampler(RandomSampler):
"""Sample epochs with a fixed length"""
def __init__(self, data_source, bs, epoch_size, *args, **kwargs):
super().__init__(data_source)
self.epoch_size = epoch_size*bs
def __iter__(self):
return iter(np.random.choice(range(len(self.data_source)), size=len(self), replace=True).tolist())
def __len__(self):
return self.epoch_size
def load_image(fn:PathOrStr, div:bool=True, convert_mode:str='RGB', cls:type=Image,
after_open:Callable=None)->Image:
"Return `Image` cropped and resized."
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning) # EXIF warning from TiffPlugin
if Path(fn).parent.name == 'train':
ind = train_df.loc[Path(fn).name, 'ind']
x = X_train[ind]
else:
ind = test_df.loc[Path(fn).name, 'ind']
x = X_test[ind]
_, time_dim = x.shape
if time_dim - base_dim > 0:
crop_x = np.random.randint(0, time_dim - base_dim)
x = x[:, crop_x:crop_x+base_dim]
x = PIL.Image.fromarray(x).resize((SZ,SZ)).convert(convert_mode)
if after_open: x = after_open(x)
x = pil2tensor(x,np.float32)
if div: x.div_(255)
return cls(x)
def load_image_tta(fn:PathOrStr, div:bool=True, convert_mode:str='RGB', cls:type=Image,
after_open:Callable=None, flip=False, vert=False, step=128)->Image:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning) # EXIF warning from TiffPlugin
if Path(fn).parent.name == 'train':
ind = train_df.loc[Path(fn).name, 'ind']
x = X_train[ind]
else:
ind = test_df.loc[Path(fn).name, 'ind']
x = X_test[ind]
if flip: x = np.fliplr(x)
if vert: x = np.flipud(x)
_, time_dim = x.shape
xb = []
for n in range(0, max(1, time_dim-base_dim), step):
x0 = PIL.Image.fromarray(x[:,n:n+base_dim]).resize((SZ,SZ)).convert(convert_mode)
if after_open: x0 = after_open(x0)
x0 = pil2tensor(x0, np.float32)
if div: x0.div_(255)
x0 = normalize(x0, mean=tensor([0.2932, 0.2932, 0.2932]), std=tensor([0.2556, 0.2556, 0.2556]))
xb.append(x0[None])
xb = torch.cat(xb, dim=0)
return xb
class ImageListMemory(ImageList):
"""ImageList that load images from memory using load_image function"""
def __init__(self, *args, convert_mode='L', after_open:Callable=None, **kwargs):
super().__init__(*args, **kwargs)
self.convert_mode,self.after_open = convert_mode,after_open
self.copy_new.append('convert_mode')
self.c,self.sizes = 1,{}
def open(self, fn):
"Open image in `fn`, subclass and overwrite for custom behavior."
return load_image(fn, convert_mode=self.convert_mode, after_open=self.after_open)
def _cutout(x, n_holes:uniform_int=1, length:uniform_int=40):
"Cut out `n_holes` number of rectangular bands of size `length` in image at random locations."
h,w = x.shape[1:]
for n in range(n_holes):
h_y = np.random.randint(0, h)
h_x = np.random.randint(0, w)
y1 = int(np.clip(h_y - length / 2, 0, h))
y2 = int(np.clip(h_y + length / 2, 0, h))
x1 = int(np.clip(h_x - length / 2, 0, w))
x2 = int(np.clip(h_x + length / 2, 0, w))
x[:, y1:y2, :] = 0
x[:, :, x1:x2] = 0
return x
cutout2 = TfmPixel(_cutout, )
class BCELoss(nn.Module):
def __init__(self, reduce=False):
super().__init__()
self.reduce = reduce
def forward(self, logit, target):
target = target.float()
loss = nn.BCEWithLogitsLoss()(logit, target)
if len(loss.size())==2:
loss = loss.sum(dim=1)
if not self.reduce:
return loss
else:
return loss.mean()
# Adapted from https://www.kaggle.com/c/human-protein-atlas-image-classification/discussion/78109
class FocalLoss(nn.Module):
def __init__(self, gamma=2, reduce=False):
super().__init__()
self.gamma = gamma
self.reduce = reduce
def forward(self, logit, target):
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + \
((-max_val).exp() + (-logit - max_val).exp()).log()
invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
if len(loss.size())==2:
loss = loss.sum(dim=1)
if not self.reduce:
return loss
else:
return loss.mean()
def fbeta2(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, eps:float=1e-9, sigmoid:bool=True)->Rank0Tensor:
"Computes the f_beta between `preds` and `targets`"
beta2 = beta ** 2
if sigmoid: y_pred = y_pred.sigmoid()
y_pred = (y_pred>thresh).float()
y_true = y_true.float()
TP = (y_pred*y_true).sum(dim=1)
prec = TP/(y_pred.sum(dim=1)+eps)
rec = TP/(y_true.sum(dim=1)+eps)
res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2)
return res
class MixupBCELoss(BCELoss):
def forward(self, x, y):
if isinstance(y, dict):
y0, y1, a = y['y0'], y['y1'], y['a']
loss = a*super().forward(x, y0) + (1-a)*super().forward(x, y1)
if f2cl is not None:
# Removing samples with F2 score equal to f2cl
fbs = fbeta2(x, y0*a.view(-1,1)+(1-a.view(-1,1))*y1)
loss = loss[(fbs<f2cl).byte()]
else:
loss = super().forward(x, y)
return 100*loss.mean()
class MixupFocalLoss(FocalLoss):
def forward(self, x, y):
if isinstance(y, dict):
y0, y1, a = y['y0'], y['y1'], y['a']
loss = a*super().forward(x, y0) + (1-a)*super().forward(x, y1)
if f2cl is not None:
# Removing samples with F2 score equal to f2cl
fbs = fbeta2(x, y0*a.view(-1,1)+(1-a.view(-1,1))*y1)
loss = loss[(fbs<f2cl).byte()]
else:
loss = super().forward(x, y)
return loss.mean()
# Calculate the overall lwlrap using sklearn.metrics function.
def lwlrap(scores, truth):
"""Calculate the overall lwlrap using sklearn.metrics.lrap."""
# sklearn doesn't correctly apply weighting to samples with no labels, so just skip them.
scores = scores.detach().cpu().numpy()
truth = truth.detach().cpu().numpy()
sample_weight = np.sum(truth > 0, axis=1)
nonzero_weight_sample_indices = np.flatnonzero(sample_weight > 0)
overall_lwlrap = sklearn.metrics.label_ranking_average_precision_score(
truth[nonzero_weight_sample_indices, :] > 0,
scores[nonzero_weight_sample_indices, :],
sample_weight=sample_weight[nonzero_weight_sample_indices])
return tensor(overall_lwlrap)
class AudioMixup(LearnerCallback):
def __init__(self, learn):
super().__init__(learn)
def on_batch_begin(self, last_input, last_target, train, **kwargs):
if train:
bs = last_input.size()[0]
lambd = np.random.uniform(0, 0.5, bs)
shuffle = torch.randperm(last_target.size(0)).to(last_input.device)
x1, y1 = last_input[shuffle], last_target[shuffle]
a = tensor(lambd).float().view(-1, 1, 1, 1).to(last_input.device)
last_input = a*last_input + (1-a)*x1
last_target = {'y0':last_target, 'y1':y1, 'a':a.view(-1)}
return {'last_input': last_input, 'last_target': last_target}
def get_preds_tta(learn, valid=True, flip=False, vert=False):
with torch.no_grad():
preds0 = []
N = len(learn.data.valid_ds) if valid else len(learn.data.test_ds)
for i in progress_bar(range(N), total=N):
if valid:
xb = load_image_tta(learn.data.valid_ds.items[i], flip=flip, vert=vert, step=base_dim)
else:
xb = load_image_tta(learn.data.test_ds.items[i], flip=flip, vert=vert, step=base_dim)
out = learn.model(xb.cuda())
out = out.sigmoid().max(0)[0]
preds0.append(out[None].cpu())
preds0 = torch.cat(preds0, dim=0)
return preds0
def print_scores(name, preds, ys):
print(f'{name} | F2={fbeta(preds, ys).item():.4f}; LWL={lwlrap(preds, ys).item():.4f}')
|
11559799
|
from __future__ import division
import numpy as np
#Single value as opposed to mean/std image
#Perhaps we want to change it to one value per color channel
def calc_mean_std(X):
mean = np.mean(X)
std = np.std(X)
return mean, std
def normalize(data, mean, std):
return (data-mean)/std
|
11559816
|
from bag_transfer.rights.views import (RightsAPIAdminView, RightsCreateView,
RightsDetailView, RightsUpdateView)
from django.conf.urls import url
app_name = 'rights'
urlpatterns = [
url(r"^add/$", RightsCreateView.as_view(), name="add"),
url(r"^(?P<pk>\d+)/$", RightsDetailView.as_view(), name="detail"),
url(r"^(?P<pk>\d+)/edit$", RightsUpdateView.as_view(), name="edit"),
url(
r"^(?P<pk>\d+)/(?P<action>(delete))/$", RightsAPIAdminView.as_view(), name="api"
),
]
|
11559848
|
from . import UVSubToolBar as _UVSubToolBar
import re
import maya.mel as mel
import sys
import maya.cmds as cmds
import os
from PySide.QtCore import *
from PySide.QtGui import *
from maya.app.general.UVSubToolBar import UVSubToolBar
from random import randint
class UVEditBar(UVSubToolBar):
def UVLatticeButtoncmd(self):
pass
def UVSelectShortestEdgecmd(self):
pass
def UVSmudgeToolcmd(self):
pass
def UVTweakcmd(self):
pass
def __init__(self):
pass
def changeGrid(self):
pass
def moveUVShellcmd(self):
pass
def texSmoothcmd(self):
pass
COLUMN_COUNT = 3
staticMetaObject = None
|
11559852
|
import sqlite3
import pandas as pd
conn = sqlite3.connect("menu.db")
c = conn.cursor()
print(pd.read_sql_query("SELECT * FROM Sandwiches", conn))
print("-------")
print(pd.read_sql_query("SELECT name, my_cost FROM Sandwiches WHERE my_cost>5", conn))
print("-------")
x = pd.read_sql_query("SELECT name, my_cost FROM Sandwiches WHERE my_cost>5", conn)
print(x['name'])
print('-')
print(x['my_cost'][0])
# Committing changes and closing the connection to the database file
conn.commit()
conn.close()
|
11559882
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
x = [-26, -15.6464, -9.8422, -6.0, -4.0, -2.68, -2.3, -1.8, -1.26, -0.61, 0, 0.61, 1.26, 2.1, 2.68, 4.4704] # relative velocity values
y = [.76, .504, 0.34, 0.29, 0.25, 0.22, 0.19, 0.13, 0.053, 0.017, 0, -0.015, -0.042, -0.13, -0.19, -.315] # modification values
TR = 1.6
old_y = []
new_y = []
for _x, _y in zip(x, y):
old_y.append(_y)
_y = _y + 1
new_y.append(_y)
# assert np.isclose(TR + old_y[-1], TR * new_y[-1])
new_TR = 1.2
plt.plot(x, np.array(old_y) + new_TR, label='old_y')
plt.plot(x, ((np.array(new_y) - 1) / new_TR + 1) * new_TR, label='new_y')
plt.legend()
print(np.round(new_y, 4).tolist())
|
11559948
|
from flask import Blueprint, render_template
from platypush.backend.http.app import template_folder
from platypush.backend.http.app.utils import authenticate, get_websocket_port
from platypush.backend.http.utils import HttpUtils
dashboard = Blueprint('dashboard', __name__, template_folder=template_folder)
# Declare routes list
__routes__ = [
dashboard,
]
# noinspection PyUnusedLocal
@dashboard.route('/dashboard/<name>', methods=['GET'])
@authenticate()
def render_dashboard(name):
""" Route for the dashboard """
return render_template('index.html',
utils=HttpUtils,
websocket_port=get_websocket_port())
# vim:sw=4:ts=4:et:
|
11559969
|
import time
from os import PathLike
from typing import Iterable
from typing import Optional
from loguru import logger
from extract_emails.errors import BrowserImportError
try:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
except ModuleNotFoundError:
msg = "ChromeBrowser require selenium library:\n\n\tpip install selenium\n\tpoetry add selenium\n"
raise BrowserImportError(msg)
from extract_emails.browsers.page_source_getter import PageSourceGetter
class ChromeBrowser(PageSourceGetter):
"""Getting page sources with selenium and chromedriver
Examples:
>>> from extract_emails.browsers.chrome_browser import ChromeBrowser
>>> browser = ChromeBrowser()
>>> browser.open()
>>> page_source = browser.get_page_source('https://example.com')
>>> browser.close()
>>> from extract_emails.browsers.chrome_browser import ChromeBrowser
>>> with ChromeBrowser() as browser:
... page_source = browser.get_page_source('https://example.com')
"""
default_options = {
"--disable-gpu",
"--disable-software-rasterizer",
"--disable-dev-shm-usage",
"--window-size=1920x1080",
"--disable-setuid-sandbox",
"--no-sandbox",
}
wait_seconds_after_get = 0
def __init__(
self,
executable_path: PathLike = "/usr/bin/chromedriver",
headless_mode: bool = True,
options: Iterable[str] = None,
) -> None:
"""ChromeBrowser initialization
Args:
executable_path: path to chromedriver, use `which chromedriver` to get the path.
Default: /usr/bin/chromedriver
headless_mode: run browser with headless mode or not. Default: True
options: arguments for chrome.Options().
Default: set("--disable-gpu", "--disable-software-rasterizer", "--disable-dev-shm-usage",
"--window-size=1920x1080", "--disable-setuid-sandbox", "--no-sandbox", )
"""
self.executable_path = executable_path
self.headless_mode = headless_mode
self.options = options if options is not None else self.default_options
self.driver: Optional[webdriver.Chrome] = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self):
"""Add arguments to chrome.Options() and run the browser"""
options = Options()
for option in self.options:
options.add_argument(option)
if self.headless_mode:
options.add_argument("--headless")
self.driver = webdriver.Chrome(
options=options, executable_path=self.executable_path
)
def close(self):
"""Close the browser"""
self.driver.close()
self.driver.quit()
def get_page_source(self, url: str) -> str:
"""Get page source text from URL
Args:
url: URL
Returns:
page source as text
"""
try:
self.driver.get(url)
time.sleep(self.wait_seconds_after_get)
page_source = self.driver.page_source
except Exception as e:
logger.error(f"Could not get page source from {url}: {e}")
return ""
if "<html><head></head><body></body></html>" == page_source:
logger.error(f"Could not get page source from {url}: Unknown reason")
return page_source
|
11559987
|
from typing import List, Optional
import logging
from collections import Counter
from itertools import cycle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from pandas import DataFrame
from scipy.cluster import hierarchy
from scipy.spatial.distance import pdist
from hypercluster.constants import param_delim
from hypercluster.utilities import convert_to_multiind, evaluate_one
matplotlib.rcParams["pdf.fonttype"] = 42
matplotlib.rcParams["ps.fonttype"] = 42
sns.set(font="arial", style="white", color_codes=True, font_scale=1.3)
matplotlib.rcParams.update({"savefig.bbox": "tight"})
cmap = sns.cubehelix_palette(
start=0,
rot=0.4,
gamma=1.0,
hue=0.82,
light=1,
dark=0,
reverse=False,
as_cmap=True
)
cmap.set_over('black')
cmap.set_under('white')
cmap.set_bad("#DAE0E6")
def zscore(df):
"""Row zscores a DataFrame, ignores np.nan
Args:
df (DataFrame): DataFrame to z-score
Returns (DataFrame):
Row-zscored DataFrame.
"""
return df.subtract(df.mean(axis=1), axis=0).divide(df.std(axis=1), axis=0)
def compute_order(
df,
dist_method: str = "euclidean",
cluster_method: str = "average"
):
"""Gives hierarchical clustering order for the rows of a DataFrame
Args:
df (DataFrame): DataFrame with rows to order.
dist_method (str): Distance method to pass to scipy.cluster.hierarchy.linkage.
cluster_method (str): Clustering method to pass to scipy.spatial.distance.pdist.
Returns (pandas.Index):
Ordered row index.
"""
dist_mat = pdist(df, metric=dist_method)
link_mat = hierarchy.linkage(dist_mat, method=cluster_method)
return df.index[hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(link_mat, dist_mat))]
def visualize_evaluations(
evaluations_df: DataFrame,
savefig: bool = False,
output_prefix: str = "evaluations",
**heatmap_kws
) -> List[matplotlib.axes.Axes]:
"""Makes a z-scored visualization of all evaluations.
Args:
evaluations_df (DataFrame): Evaluations dataframe from clustering.optimize_clustering
output_prefix (str): If saving a figure, file prefix to use.
savefig (bool): Whether to save a pdf
**heatmap_kws: Additional keyword arguments to pass to seaborn.heatmap.
Returns (List[matplotlib.axes.Axes]):
List of all matplotlib axes.
"""
clusterers = sorted(
list(set([i.split(param_delim, 1)[0] for i in evaluations_df.columns]))
)
width_ratios = [
dict(
Counter(
[i.split(param_delim, 1)[0] for i in evaluations_df.columns]
)
)[clus]
for clus in clusterers
]
evaluations_df = zscore(evaluations_df)
width = 0.18 * (len(evaluations_df.columns) + 2 + (0.01 * (len(clusterers) - 1)))
height = 0.22 * (len(evaluations_df))
fig, axs = plt.subplots(
figsize=(width, height),
nrows=1,
ncols=(len(clusterers) + 1),
gridspec_kw=dict(
width_ratios=width_ratios + [2],
wspace=0.01,
left=0,
right=1,
top=1,
bottom=0,
),
)
vmin = np.nanquantile(evaluations_df, 0.1)
vmax = np.nanquantile(evaluations_df, 0.9)
heatmap_kws['cmap'] = heatmap_kws.get('cmap', cmap)
heatmap_kws['vmin'] = heatmap_kws.get('vmin', vmin)
heatmap_kws['vmax'] = heatmap_kws.get('vmax', vmax)
for i, clus in enumerate(clusterers):
temp = convert_to_multiind(clus, evaluations_df)
ax = axs[i]
sns.heatmap(
temp,
ax=ax,
yticklabels=temp.index,
xticklabels=["-".join([str(i) for i in col]) for col in temp.columns],
cbar_ax=axs[-1],
cbar_kws=dict(label="z-score"),
**heatmap_kws
)
ax.set_ylabel("")
ax.set_title(clus)
ax.set_yticklabels([])
axs[0].set_ylabel("evaluation method")
axs[0].set_yticklabels(temp.index, rotation=0)
if savefig:
plt.savefig("%s.pdf" % output_prefix)
return axs
def visualize_pairwise(
df: DataFrame,
savefig: bool = False,
output_prefix: Optional[str] = None,
method: Optional[str] = None,
**heatmap_kws
) -> List[matplotlib.axes.Axes]:
"""Visualize symmetrical square DataFrames.
Args:
df (DataFrame): DataFrame to visualize.
savefig (bool): Whether to save a pdf.
output_prefix (str): If saving a pdf, file prefix to use.
method (str): Label for cbar, if relevant.
**heatmap_kws: Additional keywords to pass to `seaborn.heatmap`_
Returns (List[matplotlib.axes.Axes]):
List of matplotlib axes for figure.
.. _seaborn.heatmap:
https://seaborn.pydata.org/generated/seaborn.heatmap.html
"""
heatmap_kws = {**heatmap_kws}
vmin = np.nanquantile(df, 0.1)
vmax = np.nanquantile(df, 0.9)
heatmap_kws['cmap'] = heatmap_kws.get('cmap', cmap)
heatmap_kws['vmin'] = heatmap_kws.get('vmin', vmin)
heatmap_kws['vmax'] = heatmap_kws.get('vmax', vmax)
cbar_kws = heatmap_kws.get('cbar_kws', {})
cbar_kws['label'] = cbar_kws.get('label', method)
heatmap_kws['cbar_kws'] = cbar_kws
cbar_ratio = 2
wspace = 0.01
height = 0.18 * len(df)
width = 0.18 * (len(df.columns)+cbar_ratio+wspace)
fig, axs = plt.subplots(
figsize=(width, height),
nrows=1,
ncols=2,
gridspec_kw=dict(
width_ratios=[len(df.columns), cbar_ratio],
wspace=wspace,
left=0,
right=1,
top=1,
bottom=0,
)
)
try:
order = compute_order(df.fillna(df.median()))
except ValueError:
order = df.index
df = df.loc[order, order]
sns.heatmap(
df,
xticklabels=order,
yticklabels=order,
ax=axs[0],
cbar_ax=axs[1],
**heatmap_kws
)
if savefig:
if output_prefix is None:
output_prefix = "heatmap.pairwise"
plt.savefig('%s.pdf' % output_prefix)
return axs
def visualize_label_agreement(
labels: DataFrame,
method: Optional[str] = None,
savefig: bool = False,
output_prefix: Optional[str] = None,
**heatmap_kws
) -> List[matplotlib.axes.Axes]:
"""Visualize similarity between clustering results given an evaluation metric.
Args:
labels (DataFrame): Labels DataFrame, e.g. from optimize_clustering or \
AutoClusterer.labels_
method (str): Method with which to compare labels. Must be a metric like the ones in \
constants.need_ground_truth, which takes two sets of labels.
savefig (bool): Whether to save a pdf.
output_prefix (str): If saving a pdf, file prefix to use.
**heatmap_kws: Additional keywords to pass to `seaborn.heatmap`_
Returns (List[matplotlib.axes.Axes]):
List of matplotlib axes
.. _seaborn.heatmap:
https://seaborn.pydata.org/generated/seaborn.heatmap.html
"""
if savefig and output_prefix is None:
output_prefix = 'heatmap.labels.pairwise'
if method is None:
method = 'adjusted_rand_score'
labels = labels.astype(float).corr(
lambda x, y: evaluate_one(x, method=method, gold_standard=y)
)
return visualize_pairwise(labels, savefig, output_prefix, method=method, **heatmap_kws)
def visualize_sample_label_consistency(
labels: DataFrame,
savefig: bool = False,
output_prefix: Optional[str] = None,
**heatmap_kws
) -> List[matplotlib.axes.Axes]:
"""Visualize how often two samples are labeled in the same group across conditions. Interpret
with care--if you use more conditions for some type of clusterers, e.g. more n_clusters for
KMeans, those cluster more similarly across conditions than between clusterers. This means
that more agreement in labeling could be due to the choice of clusterers rather than true
similarity between samples.
Args:
labels (DataFrame): Labels DataFrame, e.g. from optimize_clustering or \
AutoClusterer.labels_
savefig (bool): Whether to save a pdf.
output_prefix (str): If saving a pdf, file prefix to use.
**heatmap_kws: Additional keywords to pass to `seaborn.heatmap`_
Returns (List[matplotlib.axes.Axes]):
List of matplotlib axes
.. _seaborn.heatmap:
https://seaborn.pydata.org/generated/seaborn.heatmap.html
"""
if savefig and output_prefix is None:
output_prefix = "heatmap.sample.pairwise"
#TODO change this to much faster matmult
labels = labels.transpose().astype(float).corr(lambda x, y: sum(
np.equal(x[((x != -1) | (y != -1))], y[((x != -1) | (y != -1))])
))
return visualize_pairwise(labels, savefig, output_prefix, method='# same label', **heatmap_kws)
def visualize_for_picking_labels(
evaluation_df: DataFrame,
method: Optional[str] = None,
savefig_prefix: Optional[str] = None
):
"""Generates graphs similar to a `scree graph`_ for PCA for each parameter and each clusterer.
Args:
evaluation_df (DataFrame): DataFrame of evaluations to visualize. Clusterer.evaluation_df.
method (str): Which metric to visualize.
savefig_prefix (str): If not None, save a figure with give prefix.
Returns:
matplotlib axes.
.. _scree graph:
https://en.wikipedia.org/wiki/Scree_plot
"""
if method is None:
method = "silhouette_score"
cluss_temp = list(set([i.split(param_delim, 1)[0] for i in evaluation_df.columns]))
# get figure dimensions
ncols = 0
cluss = []
for ploti, clus in enumerate(cluss_temp):
scores = convert_to_multiind(
clus, evaluation_df.loc[[method], :]
).transpose().dropna(how='any')
if len(scores) == 0:
logging.error(
'Score %s is missing for clusterer %s, skipping visualization' % (method, clus)
)
continue
indep = scores.index.to_frame().reset_index(drop=True)
try:
indep.astype(float)
except ValueError or AssertionError:
logging.error('Cannot convert %s data to floats, skipping visualization' % clus)
continue
cluss.append(clus)
if scores.index.nlevels > ncols:
ncols = scores.index.nlevels
if not cluss:
logging.error('No valid clusterers, cannot visualize. ')
return None
cluss.sort()
ybuff = np.abs(np.nanquantile(evaluation_df.loc[method], 0.05))
ylim = (evaluation_df.loc[method].min() - ybuff, evaluation_df.loc[method].max() + ybuff)
colors = cycle(sns.color_palette('twilight', n_colors=len(cluss) * ncols))
fig = plt.figure(figsize=(5 * (ncols), 5 * len(cluss)))
gs = plt.GridSpec(nrows=len(cluss), ncols=ncols, wspace=0.25, hspace=0.25)
for ploti, clus in enumerate(cluss):
scores = convert_to_multiind(
clus, evaluation_df.loc[[method], :]
).transpose().dropna(how='any')
indep = scores.index.to_frame().reset_index(drop=True)
for whcol, col in enumerate(indep.columns):
if whcol == 0:
saveax = plt.subplot(gs[ploti, whcol])
ax = saveax
ax.set_ylim(ylim)
ax.set_ylabel(clus)
else:
ax = plt.subplot(gs[ploti, whcol], sharey=saveax)
color = next(colors)
# plot eval results
sns.regplot(
indep[col],
scores[method].values,
color=color,
ax=ax,
logistic=True,
)
axs = fig.get_axes()
axs[0].set_title('%s results per parameter' % method, ha='left')
if savefig_prefix:
plt.savefig('%s.pdf' % savefig_prefix)
return axs
|
11560016
|
import random
import re
import shlex
import subprocess
import time
from threading import Thread
import httpx # type: ignore
import pytest
import mosec
TEST_PORT = "5000"
URL = f"http://0.0.0.0:{TEST_PORT}"
@pytest.fixture
def http_client():
client = httpx.Client()
yield client
client.close()
@pytest.fixture(scope="session")
def mosec_service(request):
name, wait = request.param
service = subprocess.Popen(
shlex.split(f"python -u tests/{name}.py --port {TEST_PORT}"),
)
time.sleep(wait) # wait for service to start
assert service.poll() is None, "service failed to start"
yield service
service.terminate()
time.sleep(2) # wait for service to stop
@pytest.mark.parametrize(
"mosec_service, http_client",
[
pytest.param(("square_service", 2), "", id="basic"),
pytest.param(
("square_service_shm", 5),
"",
marks=pytest.mark.arrow,
id="shm_arrow",
),
],
indirect=["mosec_service", "http_client"],
)
def test_square_service(mosec_service, http_client):
resp = http_client.get(URL)
assert resp.status_code == 200
assert f"mosec/{mosec.__version__}" == resp.headers["server"]
resp = http_client.get(f"{URL}/metrics")
assert resp.status_code == 200
resp = http_client.post(f"{URL}/inference", json={"msg": 2})
assert resp.status_code == 422
assert resp.text == "validation error: 'x'"
resp = http_client.post(f"{URL}/inference", content=b"bad-binary-request")
assert resp.status_code == 400
validate_square_service(http_client, URL, 2)
@pytest.mark.parametrize(
"mosec_service, http_client",
[
pytest.param(("square_service", 2), "", id="basic"),
pytest.param(
("square_service_shm", 5),
"",
marks=pytest.mark.arrow,
id="shm_arrow",
),
],
indirect=["mosec_service", "http_client"],
)
def test_square_service_mp(mosec_service, http_client):
threads = []
for _ in range(20):
t = Thread(
target=validate_square_service,
args=(http_client, URL, random.randint(-500, 500)),
)
t.start()
threads.append(t)
for t in threads:
t.join()
assert_batch_larger_than_one(http_client, URL)
assert_empty_queue(http_client, URL)
def validate_square_service(http_client, url, x):
resp = http_client.post(f"{url}/inference", json={"x": x})
assert resp.json()["x"] == x**2
def assert_batch_larger_than_one(http_client, url):
metrics = http_client.get(f"{url}/metrics").content.decode()
bs = re.findall(r"batch_size_bucket.+", metrics)
get_bs_int = lambda x: int(x.split(" ")[-1]) # noqa
assert get_bs_int(bs[-1]) > get_bs_int(bs[0])
def assert_empty_queue(http_client, url):
metrics = http_client.get(f"{url}/metrics").content.decode()
remain = re.findall(r"mosec_service_remaining_task \d+", metrics)[0]
assert int(remain.split(" ")[-1]) == 0
|
11560020
|
import os
import Ngl, Nio
from utils import *
dirc = os.path.join("$NCARGTEST","nclscripts","cdf_files")
b = Nio.open_file(os.path.join(dirc,"wrftest_for.nc"))
tk_for = b.variables["tk"][:]
rh_for = b.variables["rh"][:]
tk_m_for = multid(tk_for,[3,2,2])
rh_m_for = multid(rh_for,[3,2,2])
filename = "wrfout_d01_2005-12-14_13:00:00.GWATC_FCST"
a = Nio.open_file(os.path.join(dirc,filename+".nc"))
Qv = a.variables["QVAPOR"][:]
P = a.variables["P"][:] # perturbation
Pb = a.variables["PB"][:] # base state pressure"
P = P + Pb # total pressure
theta = a.variables["T"][:] # perturbation potential temperature (theta-t0)"]
theta = theta + 300.
Pm = multid(P,[3,2,2])
Qvm = multid(Qv,[3,2,2])
thetam = multid(theta,[3,2,2])
#---Test the float case
TK_ncl = Ngl.wrf_tk (P, theta)
RH_ncl = Ngl.wrf_rh (Qv, P, TK_ncl)
test_values("wrf_tk",TK_ncl,tk_for)
test_values("wrf_rh",RH_ncl,rh_for)
TK_m_ncl = Ngl.wrf_tk (Pm, thetam)
RH_m_ncl = Ngl.wrf_rh (Qvm, Pm, TK_m_ncl)
test_values("wrf_tk",TK_m_ncl,tk_m_for)
test_values("wrf_rh",RH_m_ncl,rh_m_for)
|
11560026
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
import math
from transformers import Wav2Vec2Model,Wav2Vec2Config
from transformers.modeling_outputs import BaseModelOutput
from typing import Optional, Tuple
_CONFIG_FOR_DOC = "Wav2Vec2Config"
# the implementation of Wav2Vec2Model is borrowed from https://huggingface.co/transformers/_modules/transformers/models/wav2vec2/modeling_wav2vec2.html#Wav2Vec2Model
# initialize our encoder with the pre-trained wav2vec 2.0 weights.
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.Tensor] = None,
min_masks: int = 0,
) -> np.ndarray:
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
padding_mask = attention_mask.ne(1) if attention_mask is not None else None
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
lengths = np.full(num_mask, mask_length)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray([mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j])])
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
# linear interpolation layer
def linear_interpolation(features, input_fps, output_fps, output_len=None):
features = features.transpose(1, 2)
seq_len = features.shape[2] / float(input_fps)
if output_len is None:
output_len = int(seq_len * output_fps)
output_features = F.interpolate(features,size=output_len,align_corners=True,mode='linear')
return output_features.transpose(1, 2)
class Wav2Vec2Model(Wav2Vec2Model):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_values,
dataset,
attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
frame_num=None
):
self.config.output_attentions = True
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.feature_extractor(input_values)
hidden_states = hidden_states.transpose(1, 2)
if dataset == "BIWI":
# cut audio feature
if hidden_states.shape[1]%2 != 0:
hidden_states = hidden_states[:, :-1]
if frame_num and hidden_states.shape[1]>frame_num*2:
hidden_states = hidden_states[:, :frame_num*2]
elif dataset == "vocaset":
hidden_states = linear_interpolation(hidden_states, 50, 30,output_len=frame_num)
if attention_mask is not None:
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
attention_mask = torch.zeros(
hidden_states.shape[:2], dtype=hidden_states.dtype, device=hidden_states.device
)
attention_mask[
(torch.arange(attention_mask.shape[0], device=hidden_states.device), output_lengths - 1)
] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
hidden_states = self.feature_projection(hidden_states)
if self.config.apply_spec_augment and self.training:
batch_size, sequence_length, hidden_size = hidden_states.size()
if self.config.mask_time_prob > 0:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
self.config.mask_time_prob,
self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=2,
)
hidden_states[torch.from_numpy(mask_time_indices)] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0:
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
self.config.mask_feature_prob,
self.config.mask_feature_length,
)
mask_feature_indices = torch.from_numpy(mask_feature_indices).to(hidden_states.device)
hidden_states[mask_feature_indices[:, None].expand(-1, sequence_length, -1)] = 0
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
11560060
|
from binding import *
from ..namespace import llvm
@llvm.Class()
class AssemblyAnnotationWriter:
_include_ = "llvm/Assembly/AssemblyAnnotationWriter.h"
|
11560076
|
import requests
import json
import pymysql
import sys
SUCCESS = "SUCCESS"
FAILED = "FAILED"
def create_db(dbname, dbuser, dbpassword, rdsendpoint, rdsuser, rdspassword):
create_db_query = f"CREATE DATABASE {dbname};"
create_user_query = f"CREATE USER '{dbuser}'@'%' IDENTIFIED BY '{dbpassword}';"
grant_query = f"GRANT ALL PRIVILEGES ON {dbname}.* TO '{dbuser}'@'%';"
flush_query = "FLUSH PRIVILEGES;"
try:
conn = pymysql.connect(host=rdsendpoint,
user=rdsuser,
password=<PASSWORD>)
cursor = conn.cursor()
cursor.execute(create_db_query)
cursor.execute(create_user_query)
cursor.execute(grant_query)
cursor.execute(flush_query)
cursor.close()
conn.commit()
conn.close()
except Exception as err:
return err
return None
def delete_db(dbname, dbuser, rdsendpoint, rdsuser, rdspassword):
delete_db_query = f"DROP DATABASE {dbname}"
delete_user_query = f"DROP USER '{dbuser}'"
db_exists_query = f"SHOW DATABASES LIKE '{dbname}'"
user_exists_query = f"SELECT user FROM mysql.user where user='{dbuser}'"
try:
conn = pymysql.connect(host=rdsendpoint,
user=rdsuser,
password=<PASSWORD>)
cursor = conn.cursor()
db_exists = cursor.execute(db_exists_query)
if db_exists:
cursor.execute(delete_db_query)
user_exists = cursor.execute(user_exists_query)
if user_exists:
cursor.execute(delete_user_query)
cursor.close()
conn.commit()
conn.close()
except Exception as err:
return err
return None
def handler(event, context):
input_props = event['ResourceProperties']
required_props = ["DBName", "RDSEndpoint", "RDSUser", "RDSPassword"]
missing_props = [prop for prop in required_props if prop not in input_props]
if missing_props:
if event['RequestType'] == "Delete":
send(event, context, SUCCESS, responseData={})
sys.exit(0)
reason = f"Required properties are missing: {missing_props}"
send(event, context, FAILED, responseReason=reason, responseData={})
sys.exit(1)
db_name = input_props['DBName']
rds_endpoint = input_props['RDSEndpoint']
rds_user = input_props['RDSUser']
rds_password = input_props['<PASSWORD>']
if "DBUser" not in input_props or len(input_props['DBUser']) == 0:
db_user = db_name
else:
db_user = input_props['DBUser']
if "DBPassword" not in input_props or len(input_props['DBPassword']) == 0:
db_password = db_name
else:
db_password = input_props['DBPassword']
if event['RequestType'] == "Delete":
err = delete_db(db_name, db_user, rds_endpoint, rds_user, rds_password)
elif event['RequestType'] in ["Create", "Update"]:
err = create_db(db_name, db_user, db_password, rds_endpoint, rds_user, rds_password)
if err:
print(err)
send(event, context, FAILED, physicalResourceId="", responseData={})
sys.exit(1)
send(event, context, SUCCESS, physicalResourceId=db_name, responseData={})
def send(event, context, responseStatus, responseData, responseReason="", physicalResourceId=None, noEcho=False):
responseUrl = event['ResponseURL']
print(responseUrl)
responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = responseReason
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['NoEcho'] = noEcho
responseBody['Data'] = responseData
json_responseBody = json.dumps(responseBody)
print("Response body:\n" + json_responseBody)
headers = {
'content-type': '',
'content-length': str(len(json_responseBody))
}
try:
response = requests.put(responseUrl,
data=json_responseBody,
headers=headers)
print("Status code: " + response.reason)
except Exception as e:
print("send(..) failed executing requests.put(..): " + str(e))
|
11560083
|
import re
from tempfile import TemporaryDirectory
import pytest
from aqt.archives import TargetConfig
from aqt.exceptions import UpdaterError
from aqt.helper import Settings
from aqt.updater import Updater
@pytest.fixture(autouse=True)
def setup_settings():
Settings.load_settings()
@pytest.mark.parametrize(
"target_config, expected_err_pattern",
(
(
TargetConfig("5.15.2", "winrt", "win64_msvc2019_winrt_x86", "windows"),
re.compile(
r"Updater caused an IO error: .*No such file or directory: "
# '.*' wildcard used to match path separators on windows/*nix
r".*5\.15\.2.*winrt_x86_msvc2019.*mkspecs.*qconfig.pri.*"
),
),
(
TargetConfig("5.15.2", "desktop", "win64_msvc2019_64", "windows"),
re.compile(
r"Updater caused an IO error: .*No such file or directory: "
# '.*' wildcard used to match path separators on windows/*nix
r".*5\.15\.2.*msvc2019_64.*mkspecs.*qconfig.pri.*"
),
),
(
TargetConfig("6.1.2", "desktop", "clang_64", "mac"),
re.compile(
r"Updater caused an IO error: .*No such file or directory: "
# '.*' wildcard used to match path separators on windows/*nix
r".*6\.1\.2.*macos.*mkspecs.*qconfig.pri.*"
),
),
(
TargetConfig("6.1.1", "desktop", "clang_64", "mac"),
re.compile(
r"Updater caused an IO error: .*No such file or directory: "
# '.*' wildcard used to match path separators on windows/*nix
r".*6\.1\.1.*clang_64.*mkspecs.*qconfig.pri.*"
),
),
),
)
def test_updater_update_license_io_error(monkeypatch, target_config: TargetConfig, expected_err_pattern: re.Pattern):
"""
All of these tests will raise IOError when they attempt to patch the license file.
"""
with pytest.raises(UpdaterError) as err:
with TemporaryDirectory() as empty_dir:
# Try to update a Qt installation that does not exist
Updater.update(target_config, base_dir=empty_dir)
assert err.type == UpdaterError
err_msg = format(err.value)
assert expected_err_pattern.match(err_msg)
|
11560132
|
from models import db, Product
import setup
app = setup.create_app()
items = [
{
'name': 'Product 1',
'slug': 'product-1',
'image': 'apple.png',
'price': 1
},
{
'name': 'Product 2',
'slug': 'product-2',
'image': 'banana.png',
'price': 2
},
{
'name': 'Product 3',
'slug': 'product-3',
'image': 'coffee.png',
'price': 3
},
{
'name': 'Product 4',
'slug': 'product-4',
'image': 'rubber_duck.png',
'price': 4
},
{
'name': 'Product 5',
'slug': 'product-5',
'image': 'tomato.png',
'price': 1
},
{
'name': 'Product 6',
'slug': 'product-6',
'image': 'Fidget_spinner_in_blue.png',
'price': 3
},
]
for item in items:
record = Product.query.filter_by(slug=item['slug']).first()
if record is None:
print("Adding product " + item['slug'] + "\n")
record = Product()
record.name = item['name']
record.slug = item['slug']
record.image = item['image']
record.price = item['price']
db.session.add(record)
db.session.commit()
else:
print("product " + item['slug'] + " has already been added ...... Skipping \n")
|
11560175
|
import gym
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Reshape
from keras.layers.embeddings import Embedding
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'FrozenLake-v0'
env = gym.make(ENV_NAME)
np.random.seed(1)
env.seed(1)
Actions = env.action_space.n
model = Sequential()
model.add(Embedding(16, 4, input_length=1))
model.add(Reshape((4,)))
print(model.summary())
memory = SequentialMemory(limit=10000, window_length=1)
policy = BoltzmannQPolicy()
Dqn = DQNAgent(model=model, nb_actions=Actions,
memory=memory, nb_steps_warmup=500,
target_model_update=1e-2, policy=policy,
enable_double_dqn=False, batch_size=512
)
Dqn.compile(Adam())
Dqn.fit(env, nb_steps=1e5, visualize=False, verbose=1, log_interval=10000)
Dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
Dqn.test(env, nb_episodes=20, visualize=False)
|
11560244
|
from ursina import *
from ursina.shaders import basic_lighting_shader
from ursinanetworking import *
BLOCKS_PARENT = Entity()
class Block(Button):
def __init__(self, position = (0,0,0)):
super().__init__(
parent = BLOCKS_PARENT,
position = position,
model = "block",
origin_y = .5,
color = color.white,
highlight_color = color.white,
scale = .5,
shader = basic_lighting_shader
)
self.name = "unnamed_block"
self.sound = None
self.break_sound = None
self.client = None
self.breakable = True
|
11560261
|
import unittest
from ccdc.protein import Protein
from ccdc.io import MoleculeReader
from hotspots.hs_io import HotspotReader, HotspotWriter
from hotspots.result import Results
from hotspots.grid_extension import Grid
from hotspots.calculation import Runner
from hotspots.wrapper_pymol import PyMOLFile, PyMOLCommands
from pprint import pprint
import numpy as np
import os
import tempfile
import zipfile
class TestHotspotReader(unittest.TestCase):
def test_read(self):
path = "testdata/hs_io/minimal_multi_all_grids/out.zip"
with HotspotReader(path=path) as r:
hr = r.read(identifier="hotspot-1")
self.assertIsInstance(hr, Results)
with HotspotReader(path=path) as r:
hr = r.read()
self.assertIsInstance(hr, list)
class TestRun():
def test_generate_real(self):
runner = Runner()
hr = runner.from_pdb(pdb_code="2vta", buriedness_method='ghecom')
settings = HotspotWriter.Settings()
settings.output_superstar = True
parent = "testdata/2vta"
with HotspotWriter(parent) as w:
w.write(hr)
def generate_fake(self, buriedness=False, weighted=False, superstar=True):
"""
create a small set of grids for testing
:param buriedness:
:param weighted:
:param superstar:
:return:
"""
def populate_grid(template, num_spheres, radius=1, value=8, scaling='linear'):
h = template.copy_and_clear()
for i in range(1, num_spheres):
x, y, z = [np.random.randint(low=2, high=ax - 2, size=1) for ax in h.nsteps]
h.set_sphere(point=h.indices_to_point(x, y, z),
radius=radius,
value=value,
scaling=scaling)
return h
protein = Protein.from_file("testdata/6y2g_A/binding_site.pdb")
mol = MoleculeReader("testdata/6y2g_A/A_mol.mol2")[0]
g = Grid.initalise_grid([a.coordinates for a in mol.atoms])
if buriedness:
buriedness_grid = Grid.from_molecule(mol)
else:
buriedness_grid = None
interactions = ["apolar", "donor", "acceptor"]
super_grids = {p: populate_grid(template=g, num_spheres=3) for p in interactions}
if superstar:
superstar_grids = {p: populate_grid(template=g, num_spheres=3) for p in interactions}
else:
superstar_grids = None
if weighted:
weighted_superstar_grids = {p: populate_grid(template=g, num_spheres=3) for p in interactions}
else:
weighted_superstar_grids = None
return Results(super_grids=super_grids,
protein=protein,
buriedness=buriedness_grid,
superstar=superstar_grids,
weighted_superstar=weighted_superstar_grids)
def test_write_real_single(self):
base = "testdata/1hcl"
interactions = ["donor", "acceptor", "apolar"]
super_grids = {p: Grid.from_file(os.path.join(base, f"{p}.grd")) for p in interactions}
superstar_grids = {p: Grid.from_file(os.path.join(base, f"superstar_{p}.grd")) for p in interactions}
buriedness = Grid.from_file(os.path.join(base, "buriedness.grd"))
prot = Protein.from_file(os.path.join(base, "protein.pdb"))
hr = Results(super_grids=super_grids,
protein=prot,
buriedness=buriedness,
superstar=superstar_grids)
settings = HotspotWriter.Settings()
settings.output_superstar = True
with HotspotWriter("testdata/hs_io/minimal_all_grids_real", settings=settings) as w:
w.write(hr)
def test_write_fake_single(self):
a = self.generate_fake(buriedness=True, superstar=True)
settings = HotspotWriter.Settings()
settings.output_superstar = True
with HotspotWriter("testdata/hs_io/minimal_all_grids", settings=settings) as w:
w.write(a)
def test_write_fake_multi(self):
a = self.generate_fake(buriedness=True, superstar=True)
b = self.generate_fake(buriedness=True, superstar=True)
settings = HotspotWriter.Settings()
settings.output_superstar = True
with HotspotWriter("testdata/hs_io/minimal_multi_all_grids", settings=settings) as w:
w.write([a, b])
class TestHotspotWriter(unittest.TestCase):
def test_write_pymol_isosurfaces(self):
# test out.zip prepared, generate minimal pymol commands to test isosurface gen code
settings = HotspotWriter.Settings()
writer = HotspotWriter("testdata/hs_io/minimal_all_grids", settings=settings) # we won't actually write
# pymol file initialised in the writer init function, therefore the unzip code is already in place
writer.pymol_out.commands += writer._write_pymol_isosurfaces({"apolar": None, "donor": None, "acceptor": None},
"hotspot",
"hotspot",
"fhm")
writer.pymol_out.commands += writer._write_pymol_isosurfaces({"apolar": None, "donor": None, "acceptor": None},
"hotspot",
"hotspot",
"superstar")
writer.pymol_out.write("testdata/hs_io/minimal_all_grids/test_write_pymol_isosurfaces.py")
def test_write_pymol_isoslider(self):
# read in manually
path = "testdata/hs_io/minimal_all_grids/out.zip"
base = tempfile.mkdtemp()
with zipfile.ZipFile(path) as hs_zip:
hs_zip.extractall(base)
base = os.path.join(base, "hotspot")
interactions = ["donor", "acceptor", "apolar"]
super_grids = {p: Grid.from_file(os.path.join(base, f"{p}.grd")) for p in interactions}
superstar_grids = {p: Grid.from_file(os.path.join(base, f"superstar_{p}.grd")) for p in interactions}
prot = Protein.from_file(os.path.join(base, "protein.pdb"))
hr = Results(super_grids=super_grids,
protein=prot,
superstar=superstar_grids)
hr.identifier = "hotspot"
settings = HotspotWriter.Settings()
settings.output_superstar = True
writer = HotspotWriter("testdata/hs_io/minimal_all_grids", settings=settings) # we won't actually write
writer.pymol_out.commands += writer._write_pymol_isosurfaces(hr.super_grids,
"hotspot",
"hotspot",
"fhm")
writer.pymol_out.commands += writer._write_pymol_isosurfaces(hr.superstar,
"hotspot",
"hotspot",
"superstar")
writer._write_pymol_isoslider(hr)
writer.pymol_out.write("testdata/hs_io/minimal_all_grids/test_write_pymol_isoslider.py")
# def test_get_labels(self):
# labs = self.result.grid_labels()
|
11560266
|
import click
from typing import Dict
from dumpit import pdumpit
from aenum import MultiValueEnum
class DefaultQueryParams(MultiValueEnum):
"""Default set of query parameters."""
msg_type = 'msgtype', 'simple or extended (extended cost more credits)'
protocol = 'protocol', 'Response type. Use one of the following: '\
'xml, csv, json, jsono (object)'
class QueryParams(MultiValueEnum):
"""Query parameters for API calls."""
@classmethod
def get_params(cls) -> Dict:
"""Merges default query parameters with call specific query parameters."""
return {**{param.name: param.value for param in cls},
**{param.name: param.value for param in DefaultQueryParams}}
@classmethod
def print_params(cls) -> None:
"""Prints all query parameters."""
for param in DefaultQueryParams:
param.__doc__ = param.values[1]
click.echo('Default params:')
pdumpit(DefaultQueryParams, all_=False)
for param in cls:
param.__doc__ = param.values[1]
click.echo('{} params:'.format(cls.__name__))
pdumpit(cls, all_=False)
|
11560299
|
import numpy as np
class BaseDataLoaders(object):
def __init__(self, name):
self.data_size = None
self.indexes = None
self.name = name
def _shuffle_indexes(self):
np.random.shuffle(self.indexes)
def _shuffle_batch_indexes(self):
np.random.shuffle(self.batch_indexes)
def epoch_init(self, config, shuffle=True, verbose=True, fix_batch=False):
self.ptr = 0
self.batch_size = config.batch_size
self.num_batch = self.data_size // config.batch_size
if verbose:
print('Number of left over sample = %d' % (self.data_size - config.batch_size * self.num_batch))
if shuffle and not fix_batch:
self._shuffle_indexes()
self.batch_indexes = []
for i in range(self.num_batch):
self.batch_indexes.append(self.indexes[i*self.batch_size: (i+1)*self.batch_size])
if shuffle and fix_batch:
self._shuffle_batch_indexes()
if verbose:
print('%s begins with %d batches' % (self.name, self.num_batch))
def next_batch(self):
if self.ptr < self.num_batch:
selected_ids = self.batch_indexes[self.ptr]
self.ptr += 1
return self._prepare_batch(selected_index=selected_ids)
else:
return None
def _prepare_batch(self, *args, **kwargs):
raise NotImplementedError('Have to override _prepare_batch()')
def pad_to(self, max_len, tokens, do_pad):
if len(tokens) >= max_len:
return tokens[: max_len-1] + [tokens[-1]]
elif do_pad:
return tokens + [0] * (max_len - len(tokens))
else:
return tokens
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.