id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11488981
|
import numpy as np
from random import shuffle
from scipy.sparse import csr_matrix
from deepneuro.utilities.util import add_parameter
from deepneuro.augmentation.augment import Augmentation
class ExtractPatches(Augmentation):
def load(self, kwargs):
# Patch Parameters
add_parameter(self, kwargs, 'patch_shape', None)
add_parameter(self, kwargs, 'patch_extraction_conditions', None)
add_parameter(self, kwargs, 'patch_region_conditions', None)
add_parameter(self, kwargs, 'patch_dimensions', {})
# Derived Parameters
self.patch_regions = []
self.patches = None
self.patch_corner = None
self.patch_slice = None
self.leading_dims = {}
self.input_shape = {}
self.output_shape = {} # Redundant
self.augmentation_string = '_patch_'
def initialize_augmentation(self):
""" There are some batch dimension problems with output_shape here. Hacky fixes for now, but revisit. TODO
"""
if not self.initialization:
# A weird way to proportionally divvy up patch conditions.
# TODO: Rewrite
self.condition_list = [None] * (self.multiplier)
self.region_list = [None] * (self.multiplier)
if self.patch_extraction_conditions is not None:
start_idx = 0
for condition_idx, patch_extraction_condition in enumerate(self.patch_extraction_conditions):
end_idx = start_idx + int(np.ceil(patch_extraction_condition[1] * self.multiplier))
self.condition_list[start_idx:end_idx] = [condition_idx] * (end_idx - start_idx)
start_idx = end_idx
if self.patch_region_conditions is not None:
start_idx = 0
for condition_idx, patch_region_condition in enumerate(self.patch_region_conditions):
end_idx = start_idx + int(np.ceil(patch_region_condition[1] * self.multiplier))
self.region_list[start_idx:end_idx] = [condition_idx] * (end_idx - start_idx)
start_idx = end_idx
shuffle(self.region_list)
for label, data_group in list(self.data_groups.items()):
self.input_shape[label] = data_group.get_shape()
if label not in list(self.patch_dimensions.keys()):
# If no provided patch dimensions, just presume the format is [batch, patch_dimensions, channel]
# self.patch_dimensions[label] = [-4 + x for x in xrange(len(self.input_shape[label]) - 1)]
self.patch_dimensions[label] = [x + 1 for x in range(len(self.input_shape[label]) - 1)]
# This is a little goofy.
self.output_shape[label] = np.array(self.input_shape[label])
# self.output_shape[label][self.patch_dimensions[label]] = list(self.patch_shape)
self.output_shape[label][[x - 1 for x in self.patch_dimensions[label]]] = list(self.patch_shape)
self.output_shape[label] = tuple(self.output_shape[label])
# Batch dimension correction, revisit
# self.patch_dimensions[label] = [x + 1 for x in self.patch_dimensions[label]]
self.initialization = True
def iterate(self):
super(ExtractPatches, self).iterate()
self.generate_patch_corner()
def reset(self, augmentation_num=0):
self.patch_regions = []
region_input_data = {label: self.data_groups[label].augmentation_cases[augmentation_num] for label in list(self.data_groups.keys())}
if self.patch_region_conditions is not None:
for region_condition in self.patch_region_conditions:
self.patch_regions += [np.where(region_condition[0](region_input_data))]
# self.patch_regions += self.get_indices_sparse(region_condition[0](region_input_data))
return
def augment(self, augmentation_num=0):
# Any more sensible way to deal with this case?
if self.patches is None:
self.generate_patch_corner(augmentation_num)
for label, data_group in list(self.data_groups.items()):
# A bit lengthy. Also unnecessarily rebuffers patches
data_group.augmentation_cases[augmentation_num + 1] = self.patches[label]
def generate_patch_corner(self, augmentation_num=0):
""" Think about how one could to this, say, with 3D and 4D volumes at the same time.
Also, patching across the modality dimension..? Interesting..
"""
# TODO: Escape clause in case acceptable patches cannot be found.
if self.patch_region_conditions is None:
corner_idx = None
else:
region = self.patch_regions[self.region_list[self.iteration]]
# print(self.region_list[self.iteration])
# TODO: Make errors like these more ubiquitous.
if len(region[0]) == 0:
# raise ValueError('The region ' + str(self.patch_region_conditions[self.region_list[self.iteration]][0]) + ' has no voxels to select patches from. Please modify your patch-sampling region')
# Tempfix -- Eek
region = self.patch_regions[self.region_list[1]]
if len(region[0]) == 0:
print('Provided patch extraction region has selected 0 voxels. Selecting non-zero patch.')
region = np.where(self.data_groups['input_data'].augmentation_cases[augmentation_num] != 0)
self.patch_regions[self.region_list[0]] = region
corner_idx = np.random.randint(len(region[0]))
self.patches = {}
# Pad edge patches.
for label, data_group in list(self.data_groups.items()):
input_data = self.data_groups[label].augmentation_cases[augmentation_num]
# TODO: Some redundancy here
if corner_idx is None:
corner = np.array([np.random.randint(0, self.input_shape[label][i]) for i in range(len(self.input_shape[label]))])[self.patch_dimensions[label]]
else:
corner = np.array([d[corner_idx] for d in region])[self.patch_dimensions[label]]
patch_slice = [slice(None)] * (len(self.input_shape[label]) + 1)
# Will run into problems with odd-shaped patches.
for idx, patch_dim in enumerate(self.patch_dimensions[label]):
patch_slice[patch_dim] = slice(max(0, corner[idx] - self.patch_shape[idx] // 2), corner[idx] + self.patch_shape[idx] // 2, 1)
input_shape = input_data.shape
self.patches[label] = input_data[tuple(patch_slice)]
# More complicated padding needed for center-voxel based patches.
pad_dims = [(0, 0)] * len(self.patches[label].shape)
for idx, patch_dim in enumerate(self.patch_dimensions[label]):
pad = [0, 0]
if corner[idx] > input_shape[patch_dim] - self.patch_shape[idx] // 2:
pad[1] = self.patch_shape[idx] // 2 - (input_shape[patch_dim] - corner[idx])
if corner[idx] < self.patch_shape[idx] // 2:
pad[0] = self.patch_shape[idx] // 2 - corner[idx]
pad_dims[patch_dim] = tuple(pad)
self.patches[label] = np.lib.pad(self.patches[label], tuple(pad_dims), 'edge')
# print(self.patches[label].shape)
# if label == 'ground_truth':
# for i in range(4):
# print(np.sum(self.patches[label][..., i]))
# print(label, np.sum(self.patches[label]))
return
def compute_M(self, data):
# Magic, vectorized sparse matrix calculation method to replace np.where
# https://stackoverflow.com/questions/33281957/faster-alternative-to-numpy-where
cols = np.arange(data.size)
return csr_matrix((cols, (data.ravel(), cols)), shape=(data.max() + 1, data.size))
def get_indices_sparse(self, data):
# Magic, vectorized sparse matrix calculation method to replace np.where
# https://stackoverflow.com/questions/33281957/faster-alternative-to-numpy-where
M = self.compute_M(data)
return [np.unravel_index(row.data, data.shape) for row in M]
class ChooseData(Augmentation):
def load(self, kwargs):
# Add functionality for choosing multiple axes
# Choose Parameters
add_parameter(self, kwargs, 'axis', {})
add_parameter(self, kwargs, 'choices', None)
add_parameter(self, kwargs, 'num_chosen', 1)
add_parameter(self, kwargs, 'random_sample', True)
# Derived Parameters
self.input_shape = {}
self.augmentation_string = '_choose_'
def initialize_augmentation(self):
if not self.initialization:
self.choices = np.array(self.choices)
for label, data_group in list(self.data_groups.items()):
input_shape = data_group.get_shape()
self.output_shape[label] = np.array(input_shape)
self.output_shape[label][self.axis[label]] = self.num_chosen
self.output_shape[label] = tuple(self.output_shape[label])
self.initialization = True
def iterate(self):
super(ChooseData, self).iterate()
def augment(self, augmentation_num=0):
choice = None # This is messed up
for label, data_group in list(self.data_groups.items()):
# Wrote this function while half-asleep; revisit
input_data = data_group.augmentation_cases[augmentation_num]
if self.choices is None:
choices = np.arange(input_data.shape[self.axis[label]])
else:
choices = self.choices
if choice is None:
if self.random_sample:
choice = np.random.choice(choices, self.num_chosen, replace=False)
else:
idx = [x % len(choices) for x in range(self.iteration, self.iteration + self.num_chosen)]
choice = choices[idx]
# Temporary
if input_data.shape[-1] == 6:
choice = choice.tolist()
choice = list(range(4)) + choice
choice_slice = [slice(None)] * (len(input_data.shape))
choice_slice[self.axis[label]] = choice
# Currently only works if applied to channels; revisit
data_group.augmentation_cases[augmentation_num + 1] = input_data[choice_slice]
data_group.augmentation_strings[augmentation_num + 1] = data_group.augmentation_strings[augmentation_num] + self.augmentation_string + str(choice).strip('[]').replace(' ', '')
|
11488982
|
from __future__ import print_function
import unittest
import numpy as np
from simpegEM1D import (
GlobalEM1DProblemTD, GlobalEM1DSurveyTD,
get_vertical_discretization_time
)
from SimPEG import (
regularization, Inversion, InvProblem,
DataMisfit, Utils, Mesh, Maps, Optimization,
Tests
)
from simpegEM1D import skytem_HM_2015
wave = skytem_HM_2015()
np.random.seed(41)
class GlobalEM1DTD(unittest.TestCase):
def setUp(self, parallel=True):
time = np.logspace(-6, -3, 21)
hz = get_vertical_discretization_time(
time, facter_tmax=0.5, factor_tmin=10.
)
time_input_currents = wave.current_times[-7:]
input_currents = wave.currents[-7:]
n_sounding = 5
dx = 20.
hx = np.ones(n_sounding) * dx
mesh = Mesh.TensorMesh([hx, hz], x0='00')
inds = mesh.gridCC[:, 1] < 25
inds_1 = mesh.gridCC[:, 1] < 50
sigma = np.ones(mesh.nC) * 1./100.
sigma[inds_1] = 1./10.
sigma[inds] = 1./50.
sigma_em1d = sigma.reshape(mesh.vnC, order='F').flatten()
mSynth = np.log(sigma_em1d)
x = mesh.vectorCCx
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
n_sounding = rx_locations.shape[0]
rx_type_global = np.array(
["dBzdt"], dtype=str
).repeat(n_sounding, axis=0)
field_type_global = np.array(
['secondary'], dtype=str
).repeat(n_sounding, axis=0)
wave_type_global = np.array(
['general'], dtype=str
).repeat(n_sounding, axis=0)
time_global = [time for i in range(n_sounding)]
src_type_global = np.array(
["CircularLoop"], dtype=str
).repeat(n_sounding, axis=0)
a_global = np.array(
[13.], dtype=float
).repeat(n_sounding, axis=0)
input_currents_global = [
input_currents for i in range(n_sounding)
]
time_input_currents_global = [
time_input_currents for i in range(n_sounding)
]
mapping = Maps.ExpMap(mesh)
survey = GlobalEM1DSurveyTD(
rx_locations=rx_locations,
src_locations=src_locations,
topo=topo,
time=time_global,
src_type=src_type_global,
rx_type=rx_type_global,
field_type=field_type_global,
wave_type=wave_type_global,
a=a_global,
input_currents=input_currents_global,
time_input_currents=time_input_currents_global
)
problem = GlobalEM1DProblemTD(
mesh, sigmaMap=mapping, hz=hz, parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
class GlobalEM1DTD_Height(unittest.TestCase):
def setUp(self, parallel=True):
time = np.logspace(-6, -3, 21)
time_input_currents = wave.current_times[-7:]
input_currents = wave.currents[-7:]
hz = get_vertical_discretization_time(
time, facter_tmax=0.5, factor_tmin=10.
)
hz = np.r_[1.]
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
e = np.ones(n_sounding)
mSynth = np.r_[e*np.log(1./100.), e*20]
x = np.arange(n_sounding)
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
rx_type_global = np.array(
["dBzdt"], dtype=str
).repeat(n_sounding, axis=0)
field_type_global = np.array(
['secondary'], dtype=str
).repeat(n_sounding, axis=0)
wave_type_global = np.array(
['general'], dtype=str
).repeat(n_sounding, axis=0)
time_global = [time for i in range(n_sounding)]
src_type_global = np.array(
["CircularLoop"], dtype=str
).repeat(n_sounding, axis=0)
a_global = np.array(
[13.], dtype=float
).repeat(n_sounding, axis=0)
input_currents_global = [
input_currents for i in range(n_sounding)
]
time_input_currents_global = [
time_input_currents for i in range(n_sounding)
]
wires = Maps.Wires(('sigma', n_sounding),('h', n_sounding))
expmap = Maps.ExpMap(nP=n_sounding)
sigmaMap = expmap * wires.sigma
survey = GlobalEM1DSurveyTD(
rx_locations=rx_locations,
src_locations=src_locations,
topo=topo,
time=time_global,
src_type=src_type_global,
rx_type=rx_type_global,
field_type=field_type_global,
wave_type=wave_type_global,
a=a_global,
input_currents=input_currents_global,
time_input_currents=time_input_currents_global,
half_switch=True
)
problem = GlobalEM1DProblemTD(
[], sigmaMap=sigmaMap, hMap=wires.h, hz=hz, parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
mesh = Mesh.TensorMesh([int(n_sounding * 2)])
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
if __name__ == '__main__':
unittest.main()
|
11489084
|
import torch
import torch.utils.data as data
import os
import urllib.request
import zipfile
import json
from survae.data import TrainValidTestLoader, DATA_PATH
class Text8(TrainValidTestLoader):
def __init__(self, root=DATA_PATH, seq_len=256, download=True):
self.train = Text8Dataset(root, seq_len=seq_len, split='train', download=download)
self.valid = Text8Dataset(root, seq_len=seq_len, split='valid')
self.test = Text8Dataset(root, seq_len=seq_len, split='test')
class Text8Dataset(data.Dataset):
"""
The text8 dataset consisting of 100M characters (with vocab size 27).
We here split the dataset into (90M, 5M, 5M) characters for
(train, val, test) as in [1,2,3].
The sets are then split into chunks of equal length as specified by `seq_len`.
The default is 256, corresponding to what was used in [1]. Other choices
include 180, as [2] reports using.
[1] Discrete Flows: Invertible Generative Models of Discrete Data
Tran et al., 2019, https://arxiv.org/abs/1905.10347
[2] Architectural Complexity Measures of Recurrent Neural Networks
Zhang et al., 2016, https://arxiv.org/abs/1602.08210
[3] Subword Language Modeling with Neural Networks
Mikolov et al., 2013, http://www.fit.vutbr.cz/~imikolov/rnnlm/char.pdf
"""
def __init__(self, root=DATA_PATH, seq_len=256, split='train', download=False):
assert split in {'train', 'valid', 'test'}
self.root = os.path.join(os.path.expanduser(root), 'text8')
self.seq_len = seq_len
self.split = split
if not self._check_raw_exists():
if download:
self.download()
else:
raise RuntimeError('Dataset not found. You can use download=True to download it.')
if not self._check_processed_exists(split):
self._preprocess_data(split)
# Load data
self.data = torch.load(self.processed_filename(split))
# Load lookup tables
char2idx_file = os.path.join(self.root, 'char2idx.json')
idx2char_file = os.path.join(self.root, 'idx2char.json')
with open(char2idx_file) as f:
self.char2idx = json.load(f)
with open(idx2char_file) as f:
self.idx2char = json.load(f)
def __getitem__(self, index):
return self.data[index], self.seq_len
def __len__(self):
return len(self.data)
def s2t(self, s):
assert len(s) == self.seq_len, 'String not of length {}'.format(self.seq_len)
return torch.tensor([self.char2idx[char] for char in s])
def t2s(self, t):
return ''.join([self.idx2char[t[i]] if t[i] < len(self.idx2char) else ' ' for i in range(self.seq_len)])
def text2tensor(self, text):
if isinstance(text, str):
tensor = self.s2t(text).unsqueeze(0)
else:
tensor = torch.stack([self.s2t(s) for s in text], dim=0)
return tensor.unsqueeze(1) # (B, 1, L)
def tensor2text(self, tensor):
assert tensor.dim() == 3, 'Tensor should have shape (batch_size, 1, {})'.format(self.seq_len)
assert tensor.shape[1] == 1, 'Tensor should have shape (batch_size, 1, {})'.format(self.seq_len)
assert tensor.shape[2] == self.seq_len, 'Tensor should have shape (batch_size, 1, {})'.format(self.seq_len)
bsize = tensor.shape[0]
text = [self.t2s(tensor[b].squeeze(0)) for b in range(bsize)]
return text
def _preprocess_data(self, split):
# Read raw data
rawdata = zipfile.ZipFile(self.local_filename).read('text8').decode('utf-8')
# Extract vocab
vocab = sorted(list(set(rawdata)))
char2idx, idx2char = {}, []
for i, char in enumerate(vocab):
char2idx[char] = i
idx2char.append(char)
# Extract subset
if split == 'train':
rawdata = rawdata[:90000000]
elif split == 'valid':
rawdata = rawdata[90000000:95000000]
elif split == 'test':
rawdata = rawdata[95000000:]
# Encode characters
data = torch.tensor([char2idx[char] for char in rawdata])
# Split into chunks
data = data[:self.seq_len*(len(data)//self.seq_len)]
data = data.reshape(-1, 1, self.seq_len)
# Save processed data
torch.save(data, self.processed_filename(split))
# Save lookup tables
char2idx_file = os.path.join(self.root, 'char2idx.json')
idx2char_file = os.path.join(self.root, 'idx2char.json')
with open(char2idx_file, 'w') as f:
json.dump(char2idx, f)
with open(idx2char_file, 'w') as f:
json.dump(idx2char, f)
@property
def local_filename(self):
return os.path.join(self.root, 'text8.zip')
def processed_filename(self, split):
return os.path.join(self.root, '{}.pt'.format(split))
def download(self):
if not os.path.exists(self.root):
os.makedirs(self.root)
print('Downloading text8...')
url = 'http://mattmahoney.net/dc/text8.zip'
print('Downloading from {}...'.format(url))
urllib.request.urlretrieve(url, self.local_filename)
print('Saved to {}'.format(self.local_filename))
def _check_raw_exists(self):
return os.path.exists(self.local_filename)
def _check_processed_exists(self, split):
return os.path.exists(self.processed_filename(split))
|
11489086
|
from django.conf.urls import url
from django.urls import path
from . import views
from archeutils import views as arche_views
app_name = 'shps'
urlpatterns = [
url(
r'^ids$',
arche_views.get_ids,
name='get_ids'
),
url(
r'^arche$',
arche_views.project_as_arche_graph,
name='project_as_arche'
),
url(
r'^arche-title-img$',
arche_views.get_title_img,
name='arche_title_img'
),
url(r'^where-was/$', views.WhereWas.as_view(), name='where_was'),
url(r'^shapes/$', views.TempSpatialListView.as_view(), name='browse_shapes'),
url(
r'^shape/arche/(?P<pk>[0-9]+)$',
arche_views.res_as_arche_graph,
name='arche_md'
),
path('permalink/<unique>/', views.PermaLinkView.as_view(), name='permalink-view'),
url(r'^shape/detail/(?P<pk>[0-9]+)$', views.TempSpatialDetailView.as_view(),
name='shape_detail'),
url(r'^shape/delete/(?P<pk>[0-9]+)$', views.TempSpatialDelete.as_view(),
name='shape_delete'),
url(r'^shape/edit/(?P<pk>[0-9]+)$', views.TempSpatialUpdate.as_view(),
name='shape_edit'),
url(r'^shape/create/$', views.TempSpatialCreate.as_view(),
name='shape_create'),
url(r'^sources/$', views.SourceListView.as_view(), name='browse_sources'),
url(r'^source/detail/(?P<pk>[0-9]+)$', views.SourceDetailView.as_view(),
name='source_detail'),
url(r'^source/delete/(?P<pk>[0-9]+)$', views.SourceDelete.as_view(),
name='source_delete'),
url(r'^source/edit/(?P<pk>[0-9]+)$', views.SourceUpdate.as_view(),
name='source_edit'),
url(r'^source/create/$', views.SourceCreate.as_view(),
name='source_create')
]
|
11489115
|
import shutil
from os import path
from pathlib import Path
from states.binfetchers import BinFetcher
class LocalFetcher(BinFetcher):
def __fetch_impl__(self, bin_info, bin_file):
if not path.exists(bin_file):
raise Exception("missing bin: {}".format(bin_file))
class FSCopyFetcher(BinFetcher):
def __fetch_impl__(self, bin_info, bin_file):
if "src" not in bin_info:
raise Exception("Missing src info")
src = bin_info["src"]
if not path.exists(src):
raise Exception("missing bin: {}".format(src))
self.app.detail("Copying {}".format(src))
shutil.copy(src, bin_file)
p = Path(bin_file).stat()
self.app.detail("{:,d} bytes copied".format(p.st_size))
|
11489120
|
from datetime import datetime
class Person:
def __init__(self, name, birthday):
self.name = name
self.birthday = birthday
self.year_detector()
def year_detector(self):
"""
Метод определения возраста на основе полной даты рождения + datetime
"""
now = datetime.now()
year_now = int(now.strftime("%Y"))
year, _, _ = map(int, self.birthday.split("-"))
self.year = year_now - year
def info(self):
print(
"\n*Класс персона*\nФИО: "
+ self.name
+ "\n"
+ "Дата рождения: "
+ self.birthday
)
def years_old(self):
print("Возраст: " + str(self.year))
def years_old_int(self):
return self.year
|
11489154
|
from scipy.stats import mannwhitneyu,wilcoxon
import numpy as np
from scipy.io import mmread
import pandas as pd
X = mmread('RFiles/all_data.mtx')
X = X.tocsr()
celllabels = np.load('Notebooks/meta/celllabels.npy')
isCSF = np.load('Notebooks/meta/isCSF.npy')
isMS = np.load('Notebooks/meta/isMS.npy')
logX = np.log10(1+X.todense())
scaling_factor = logX.mean(axis=1)
norm_X = logX - scaling_factor.reshape(len(scaling_factor), 1)
# def MannWhitneyUTest(norm_X, idx1, idx2):
# res = []
# for i in range(X.shape[1]):
# x= np.asarray(X[idx1,i].todense()).ravel()
# y= np.asarray(X[idx2,i].todense()).ravel()
# if(len(np.unique(np.concatenate([x,y])))==1):
# res.append([-1,-1])
# else:
# res.append(mannwhitneyu(x,y,alternative = 'two-sided'))
# stat = np.asarray([x[0] for x in res])
# pvalue = np.asarray([x[1] for x in res])
# return(stat,pvalue)
def MannWhitneyUTest(X, idx1, idx2):
res = []
for i in range(X.shape[1]):
x= np.asarray(X[idx1,i]).ravel()
y= np.asarray(X[idx2,i]).ravel()
if(len(np.unique(np.concatenate([x,y])))==1):
res.append([-1,-1])
else:
res.append(mannwhitneyu(x,y,alternative = 'two-sided'))
stat = np.asarray([x[0] for x in res])
pvalue = np.asarray([x[1] for x in res])
return(stat,pvalue)
celltypes = ['B1', 'B2', 'CD4', 'CD8a', 'CD8n', 'Gran', 'MegaK', 'Mono', 'NK1',
'NK2', 'Tdg', 'Tregs', 'mDC1', 'mDC2', 'ncMono', 'pDC', 'plasma']
for i in celltypes:
idx1 = (celllabels==i) & isCSF & isMS
idx2 = (celllabels==i) & isCSF & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.norm.MSinCSF.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i) & isCSF & (isMS == False)
idx2 = (celllabels==i) & (isCSF == False) & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.norm.tissue_control.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i) & (isCSF == False) & isMS
idx2 = (celllabels==i) & (isCSF == False) & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.norm.MSinPBMC.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i) & isMS
idx2 = (celllabels==i) & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.norm.MS.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i)
idx2 = (celllabels!=i)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.norm.allclusters.%s.csv'%i)
norm_X = norm_X[celllabels=='CD4',:]
batchid = batchid[celllabels=='CD4']
isCSF = isCSF[celllabels=='CD4']
isMS = isMS[celllabels=='CD4']
celllabels = np.load('Notebooks/meta/CD4.clusters.npy')
celltypes = np.unique(celllabels)
for i in celltypes:
idx1 = (celllabels==i) & isCSF & isMS
idx2 = (celllabels==i) & isCSF & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.CD4.MSinCSF.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i) & isCSF & (isMS == False)
idx2 = (celllabels==i) & (isCSF == False) & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.CD4.tissue_control.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i) & (isCSF == False) & isMS
idx2 = (celllabels==i) & (isCSF == False) & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.CD4.MSinPBMC.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i) & isMS
idx2 = (celllabels==i) & (isMS == False)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.CD4.MS.%s.csv'%i)
for i in celltypes:
idx1 = (celllabels==i)
idx2 = (celllabels!=i)
if (np.sum(idx1)>10) and (np.sum(idx2)>10):
stat,pvalue = MannWhitneyUTest(norm_X, idx1, idx2)
clusterid = np.repeat(i, len(stat))
res = pd.DataFrame([clusterid,stat,pvalue],index=['clusterid','stat','pvalue']).T
res.to_csv('DE/wilcoxon/MannWhitneyU.CD4.allclusters.%s.csv'%i)
|
11489162
|
from . import gainesville
from . import cheminf
from . import jena
from girder.utility.model_importer import ModelImporter
from molecules.constants import PluginSettings
def upload_molecule(mol):
settings = ModelImporter.model('setting')
uri_base = settings.get(PluginSettings.SEMANTIC_URI_BASE)
if uri_base is None:
uri_base = 'http://localhost:8888'
uri_base = uri_base.rstrip('/')
gainesville_graph = gainesville.create_molecule_graph(uri_base, mol)
gainesville_id = '%s_gainesville' % mol['_id']
jena.upload_rdf(gainesville_id, gainesville_graph)
cheminf_graph = cheminf.create_molecule_graph(uri_base, mol)
cheminf_id = '%s_cheminf' % mol['_id']
jena.upload_rdf(cheminf_id, cheminf_graph)
|
11489175
|
import numpy as np
import cv2
import random
import colorsys
import collections
import time
# from playsound import playsound
from pprint import pprint
# # 记录目标对应的计数
# OrderedDict([('river_boat', {'down': 0, 'left': 0, 'right': 0, 'up': 0}),
# ('speedboat', {'down': 0, 'left': 1, 'right': 0, 'up': 0})])
counter_dict = collections.OrderedDict()
# # 计数记忆(保证不重复计数)range的参数根据你的视频目标数保持数量级一致
counter_memory = dict.fromkeys(range(54000), 0)
COLORS_10 =[(144,238,144),(178, 34, 34),(221,160,221),( 0,255, 0),( 0,128, 0),(210,105, 30),(220, 20, 60),
(192,192,192),(255,228,196),( 50,205, 50),(139, 0,139),(100,149,237),(138, 43,226),(238,130,238),
(255, 0,255),( 0,100, 0),(127,255, 0),(255, 0,255),( 0, 0,205),(255,140, 0),(255,239,213),
(199, 21,133),(124,252, 0),(147,112,219),(106, 90,205),(176,196,222),( 65,105,225),(173,255, 47),
(255, 20,147),(219,112,147),(186, 85,211),(199, 21,133),(148, 0,211),(255, 99, 71),(144,238,144),
(255,255, 0),(230,230,250),( 0, 0,255),(128,128, 0),(189,183,107),(255,255,224),(128,128,128),
(105,105,105),( 64,224,208),(205,133, 63),( 0,128,128),( 72,209,204),(139, 69, 19),(255,245,238),
(250,240,230),(152,251,152),( 0,255,255),(135,206,235),( 0,191,255),(176,224,230),( 0,250,154),
(245,255,250),(240,230,140),(245,222,179),( 0,139,139),(143,188,143),(255, 0, 0),(240,128,128),
(102,205,170),( 60,179,113),( 46,139, 87),(165, 42, 42),(178, 34, 34),(175,238,238),(255,248,220),
(218,165, 32),(255,250,240),(253,245,230),(244,164, 96),(210,105, 30)]
### 你的面板类别打印设置
class_names = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
#### 是否开启警戒区检测
#warning_area_monitoring_switch = True
warning_area_monitoring_switch = False
### 警戒区域 从左到右 顺时针点位
if warning_area_monitoring_switch:
# 警戒区点坐标 (需要与直线斜率一致)
polys = np.array([[800, 150],[1100, 150],[1050, 230],[750, 230],], np.int32)
poly_points = polys.tolist()
polys = polys.reshape((-1,1,2))
else:
pass
### 字体样式
font_style = cv2.FONT_HERSHEY_SIMPLEX
frame_num =0
############### 直线两个点设置##################
# 近似垂直线
# line = [(680, 100), (265, 720)]
# line = [(812, 89), (692, 1078)]
# line = [(748, 218), (656, 756)]
# line = [(920, 0), (920, 1080)] # HPJ001
# line = [(623, 0), (623, 1080)] # HPJ002
#line = [(922, 0), (922, 1200)] # WT002 WT001
# # 近似水平线
# line = [(583, 839), (1906, 652)] # WSK001X24
# line = [(0, 600), (1920, 600)] # HZNH1
# line = [(0, 830), (1920, 830)] # HZNH2
# line = [(0, 530), (2100, 530)] # for vehicle.mp4
line = [(0, 430), (2100, 430)] # for people.mp4
##### 近似计算直线 垂直 还是水平
def assess_horizontal_or_vertical(line):
hrz_difference = line[1][0]-line[0][0]
vtc_difference = line[1][1]-line[0][1]
squared_difference = hrz_difference ** 2 - vtc_difference ** 2
if squared_difference >= 0:
horizontal_true_vertical_false = True
else:
horizontal_true_vertical_false = False
return horizontal_true_vertical_false
##### 当前计数线垂直还是水平,如果情况特殊可以手动设值,horizontal(True) or vertical(False)
horizontal_True_vertical_False = assess_horizontal_or_vertical(line)
def draw_data_panel(img, bboxes, fps):
"""Draw the panel.
Args:
img: image
bbox: bbox
identities: identities
offset: offset
Returns:
The new image.
"""
if bboxes is None:
target_num = 0
else:
target_num = len(bboxes)
global counter_dict
global font_style
num_recorded_class = len(counter_dict)
# # 图层1 先绘制信息面板矩形,以保持透明底层
# alpha = 0.3
# image_h, image_w, _ = img.shape
# overlay = img.copy() # img副本 以供填充覆盖
# cv2.rectangle(img, (0,0), (image_w//3 ,num_recorded_class * 40 + 70), (32,36,46), thickness=-1)
# img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0) # img副本叠加
# 图层1 先绘制信息面板矩形,以保持透明底层
alpha = 0.3
image_h, image_w, _ = img.shape
overlay = img.copy() # img副本 以供填充覆盖
cv2.rectangle(img, (0,0), (image_w//3 -70 ,num_recorded_class * 40 + 100), (32,36,46), thickness=-1)
img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0) # img副本叠加
# 图层2 信息面板文字描述
vertical_increment = 20
vertical_correction = 20
horizontal_increment = image_w // 5
up_or_left_sum = 0
down_or_right_sum = 0
text_thickness = int(0.6 * (image_h + image_w) / 1000)
font_scale = 0.5
# sum 纵向坐标偏移量
sum_increment = num_recorded_class * 20 +40
################ 按目标分类的计数信息填入 ####################
# counter_dict = { speedboat {'up': 0, 'down': 0, 'left': 0, 'right': 0}, ..., river_boat {'up': 0, 'down': 0, 'left': 0, 'right': 0} }
for key,values in counter_dict.items():
vertical_correction += vertical_increment # 每次新的键值对 纵向坐标 ++vertical_increment
# 检测物类别具体描述 type
cv2.putText(img, " {}".format(key) ,(0, vertical_correction), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 计数线接近水平放置时
if horizontal_True_vertical_False:
# up内河计数
cv2.putText(img,"{}".format(values['up']),(horizontal_increment, vertical_correction), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# down内河计数
cv2.putText(img,"{}".format(values['down']),(horizontal_increment+50, vertical_correction), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 上下 累计计数
up_or_left_sum += values['up']
down_or_right_sum += values['down']
# 计数线近似垂直放置时
else:
# left内河计数
cv2.putText(img,"{}".format(values['left']),(horizontal_increment, vertical_correction), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# right内河计数
cv2.putText(img,"{}".format(values['right']),(horizontal_increment+50, vertical_correction), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 左右 累计计数
up_or_left_sum += values['left']
down_or_right_sum += values['right']
################ 不分左右上下的计数信息面板 ####################
# 左下角 累计值
cv2.putText(img, " cumulative count" ,(0, sum_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 左下角 当前值 current_targets
cv2.putText(img, " target number" ,(0, sum_increment+20), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 左下角 FPS 当前值
cv2.putText(img, " fps" ,(0, sum_increment+40), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# # 左下角 时间
# cv2.putText(img, " time" ,(0, sum_increment+60), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 左下角 主网络
cv2.putText(img, " detector" ,(0, sum_increment+60), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 左下角 署名
cv2.putText(img, " author" ,(0, sum_increment+80), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 左上角 分类计数
cv2.putText(img," type",(0, vertical_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
if horizontal_True_vertical_False:
# up_count
cv2.putText(img,"up",(horizontal_increment, vertical_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# down_count
cv2.putText(img,"down",(horizontal_increment+50, vertical_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 目前up/down计数的 cumulative count 数字
cv2.putText(img,"{}".format(up_or_left_sum),(horizontal_increment, sum_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
cv2.putText(img,"{}".format(down_or_right_sum),(horizontal_increment+50, sum_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 当前目标数 current_targets
cv2.putText(img,"{}".format(target_num),(horizontal_increment , sum_increment + 20 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
cv2.putText(img,"{}".format(target_num),(horizontal_increment + 50, sum_increment + 20 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 当前FPS资料
cv2.putText(img,"{:0.1f}".format(fps),(horizontal_increment , sum_increment + 40 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
cv2.putText(img,"{:0.1f}".format(fps),(horizontal_increment + 50, sum_increment + 40), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# # 当前时间
# cv2.putText(img,"{}".format( time.strftime("%Y%m%d %H:%M:%S", time.localtime()) ),(horizontal_increment , sum_increment + 60 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 主网络
cv2.putText(img,"{}".format("CenterNet"),(horizontal_increment , sum_increment + 60 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 署名描述
cv2.putText(img,"{}".format("Clemente420"),(horizontal_increment , sum_increment + 80 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
else:
# left_count
cv2.putText(img,"left",(horizontal_increment, vertical_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# right_count
cv2.putText(img,"right",(horizontal_increment+50, vertical_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 目前left/right计数的 cumulative count 数字
cv2.putText(img,"{}".format(up_or_left_sum),(horizontal_increment, sum_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
cv2.putText(img,"{}".format(down_or_right_sum),(horizontal_increment+50, sum_increment), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 当前目标数 current_targets
cv2.putText(img,"{}".format(target_num),(horizontal_increment , sum_increment + 20 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
cv2.putText(img,"{}".format(target_num),(horizontal_increment + 50, sum_increment + 20 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 当前FPS资料
cv2.putText(img,"{:0.1f}".format(fps),(horizontal_increment , sum_increment + 40 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
cv2.putText(img,"{:0.1f}".format(fps),(horizontal_increment + 50, sum_increment + 40), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# # 当前时间
# cv2.putText(img,"{}".format( time.strftime("%Y%m%d %H:%M:%S", time.localtime()) ),(horizontal_increment , sum_increment + 60 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 主网络
cv2.putText(img,"{}".format("CenterNet"),(horizontal_increment , sum_increment + 60 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
# 署名描述
cv2.putText(img,"{}".format("Clemente420"),(horizontal_increment , sum_increment + 80 ), font_style, font_scale, (8,196,254), thickness = text_thickness, lineType=cv2.LINE_AA)
################## 记录当前检测到的目标数量 #####################
# global frame_num
# frame_num += 1
# print("target_num {} {}\n".format(frame_num, target_num ))
########### 不分左右上下的计数信息面板 #######################
# FPS当前
# 时间
# 署名作者
return img
def draw_line_and_area(img,image_h, image_w,line=line):
"""Draw the panel.
Args:
img: image
bbox: bbox
identities: identities
offset: offset
Returns:
The new image.
"""
#############################################################
# 画警戒区
if warning_area_monitoring_switch:
cv2.polylines(img, [polys], isClosed=True, color=(0,0,255), thickness=1, lineType=cv2.LINE_AA)
else:
pass
#############################################################
# 判断水平或垂直计数,然后draw line
if horizontal_True_vertical_False:
cv2.line(img, line[0], line[1], (8,196,254), thickness=2, lineType=cv2.LINE_AA) # 参数要求整数 除法用//
else:
cv2.line(img, line[0], line[1], (8,196,254), thickness=2, lineType=cv2.LINE_AA) # 参数要求整数 除法用//
return img
# 判断是否进入警戒区
def is_point_in(x, y, polygon_points):
count = 0
x1, y1 = polygon_points[0]
x1_part = (y1 > y) or ((x1 - x > 0) and (y1 == y)) # x1在哪一部分中
x2, y2 = '', '' # points[1]
polygon_points.append((x1, y1))
for point in polygon_points[1:]:
x2, y2 = point
x2_part = (y2 > y) or ((x2 > x) and (y2 == y)) # x2在哪一部分中
if x2_part == x1_part:
x1, y1 = x2, y2
continue
mul = (x1 - x)*(y2 - y) - (x2 - x)*(y1 - y)
if mul > 0: # 叉积大于0 逆时针
count += 1
elif mul < 0:
count -= 1
x1, y1 = x2, y2
x1_part = x2_part
if count == 2 or count == -2:
return True
else:
return False
def intersect(A,B,C,D): # ab是目标框中心的两个近点,cd是线段
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
def ccw(A,B,C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
def draw_bbox(img, box, cls_name, identity=None, offset=(0,0)):
'''
draw box of an id
'''
x1,y1,x2,y2 = [int(i+offset[idx%2]) for idx,i in enumerate(box)]
# set color and label text
color = COLORS_10[identity%len(COLORS_10)] if identity is not None else COLORS_10[0]
label = '{} {}'.format(cls_name, identity)
# box text and bar
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
cv2.rectangle(img,(x1, y1),(x2,y2),color,2)
cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 1, [255,255,255], 1)
return img
def draw_bboxes(img, bbox, identities=None, offset=(0,0)):
"""Draw the bboxes.
Args:
img: image
bbox: bbox
identities: identities
offset: offset
Returns:
The new image.
"""
for i,box in enumerate(bbox):
x1,y1,x2,y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = COLORS_10[id%len(COLORS_10)]
label = '{} {}'.format("object", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
return img
def add_cls_confi_draw_bboxes(img, bbox, identities=None, confidences=None, class_nums=None,points=None, offset=(0,0)):
image_h, image_w, _ = img.shape
num_classes = len(class_names)
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
# 颜色是反的 需要从BGR 转换到 RGB (0,1,2) 需要转换到 (2,1,0)
p0 = (0, 0)
p1 = (0, 0)
global font_style
for i,box in enumerate(bbox):
# 检测框的左上和右下两个点坐标
x1,y1,x2,y2 = [int(i) for i in box]
# 偏移量修正
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# 跟踪识别号
track_id = int(identities[i]) if identities is not None else 0
# 该目标的分类名
class_name = class_names[class_nums[i]]
# 该目标的置信度
confidence = confidences[i]/100
# 检测框的颜色
bbox_color = colors[class_nums[i]]
# 检测框的粗细值
bbox_thick = int(0.7 * (image_h + image_w) / 500)
fontScale = 0.5 # 分类字体和填充框大小
#############################################################
# 对应目标的计数字典
global counter_dict
# 如果不存在相应分类的键,则初始化键值对为0
if class_name not in counter_dict:
counter_dict[class_name] = {}
counter_dict[class_name]['up'] =0
counter_dict[class_name]['down'] =0
counter_dict[class_name]['left'] =0
counter_dict[class_name]['right'] =0
else:
pass
#############################################################
# 统计部分
if len(points[track_id]) >= 3:
p0 = points[track_id][-1] # p0是最新的目标点中心
p1 = points[track_id][-3] # p1 是前四帧的目标点中心
# 判断进出 p0是当前帧目标中心坐标,p1是前一帧的目标中心点数
if intersect(p0, p1, line[0], line[1]) and counter_memory[track_id] != 1:
# 如果是横向统计
if horizontal_True_vertical_False:
if p0[1] < p1[1]: #最新点的y坐标小于 之前点的y坐标 在向上走
counter_dict[class_name]['up'] += 1 #字典是嵌套形式
counter_memory[track_id] = 1
elif p0[1] > p1[1]: #最新点的y坐标 大于 之前点的y坐标 在向下走
counter_dict[class_name]['down'] += 1
counter_memory[track_id] = 1
else:
pass
# 如果是左右统计
else:
if p0[0] < p1[0]: #最新点的x坐标小于 之前点的x坐标 在向左走
counter_dict[class_name]['left'] += 1 #字典是嵌套形式
counter_memory[track_id] = 1
elif p0[0] > p1[0]: #最新点的x坐标 大于 之前点的x坐标 在向右走
counter_dict[class_name]['right'] += 1
counter_memory[track_id] = 1
else:
pass
#############################################################
# 是否开启警戒区检测
if warning_area_monitoring_switch:
# 进入警戒区
if is_point_in(p0[0], p0[1], poly_points):
# 报警声音 TODO
# 警戒区 显示WARNING
# cv2.putText(img,"WARNING!!!",(p0[0] - 30, p0[1] + 20), font_style, 0.6, (0,0,0), 1,lineType=cv2.LINE_AA)
# cv2.putText(img,"RESTRICTED AREA",(p0[0] - 70, p0[1] + 40), font_style, 0.6, (0,0,0), 1,lineType=cv2.LINE_AA)
cv2.arrowedLine(img, (p0[0]-80, p0[1]-10), (p0[0]-40, p0[1]-10), (0, 0, 0), thickness=2, line_type=cv2.LINE_AA, shift=0, tipLength=0.2)
cv2.arrowedLine(img, (p0[0]+80, p0[1]-10), (p0[0]+40, p0[1]-10), (0, 0, 0), thickness=2, line_type=cv2.LINE_AA, shift=0, tipLength=0.2)
cv2.putText(img,"RESTRICTED WARNING!",(771, 296), font_style, 1, (31,31,197), 2,lineType=cv2.LINE_AA)
cv2.rectangle(img,(746, 262),(1135 ,320),(31,31,197),3)
# 内部填充
overlay = img.copy() # img副本 以供填充覆盖
cv2.fillPoly(overlay, [polys], color=(0,0,255))
alpha = 0.5
img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0) # img副本叠加
# 警戒区以外的区域
else:
pass
# 不开启警戒区检测
else:
pass
#######################################################
#####TODO 安全距离警告 #########################
# cv2.putText(img,"DISTANCE WARNING!",(705, 515), font_style, 1.5, (31,31,197), 3,lineType=cv2.LINE_AA)
# cv2.rectangle(img,(675, 432),(1200,574),(31,31,197),5)
#############################################################
# 检测框及相关属性描述
# 检测框
cv2.rectangle(img,(x1, y1),(x2,y2), bbox_color, bbox_thick)
# 描述标记文字和文字框
label = "{}: {}".format(class_name, confidence)
t_size = cv2.getTextSize(label, 0, fontScale, thickness=bbox_thick)[0]
# 画分类处的文字框
cv2.rectangle(img, (x1, y1 -3), (x1 + t_size[0], y1 - t_size[1] - 6), bbox_color, thickness=-1) # 填充
cv2.putText(img,label,(x1,y1 - 5), font_style, fontScale, (0,0,0), bbox_thick//3,lineType=cv2.LINE_AA)
###############################################
# 画轨迹 序号就是 对应的跟踪id号 point=[[跟踪id对应的双向队列],.....] 包裹队列的列表
for j in range(1, len(points[track_id])):
if points[track_id][j - 1] is None or points[track_id][j] is None:
###############################################
# TODO 滑动平均处理历史轨迹点 moving_average
# points[track_id] = points[track_id] # 滑动平均处理 moving_average
continue
# thickness = int(np.sqrt(32 / float(j + 1)) * 2) #第一个点重 后续线逐渐变细
# 轨迹曲线绘制
cv2.line(img,(points[track_id][j-1]), (points[track_id][j]),(8,196,255),thickness = 2,lineType=cv2.LINE_AA)
# 链接两个点为直线(跟踪点)
# cv2.line(img, p0, p1, (8,196,254), 5,lineType=cv2.LINE_AA) # 把这两个中心的点连接
## 跟踪的序列号
# cv2.putText(img,"{}".format(track_id),(p0[0]+5, p0[1]+5), font_style, 0.8*fontScale, (32,36,46), 1,lineType=cv2.LINE_AA)
# 画圆心
# cv2.circle(img, (p0), radius=3, color=(46, 36, 32), thickness=-1,lineType=cv2.LINE_AA)
# 跟踪标记小圆点
cv2.circle(img, (p0), radius=3, color=(46, 36, 32), thickness=-1,lineType=cv2.LINE_AA)
# 跟踪识别号
cv2.putText(img,"{}".format(track_id),(p0[0]+5, p0[1]+5), font_style, 0.8*fontScale, (32,36,46), 1,lineType=cv2.LINE_AA)
# # 旧版经典三原色
# # 分类计数面板矩形 先绘制 后绘制文字
# cv2.rectangle(img, (0,0), (horizontal_increment,image_h//15 * 5+20), (8,196,254), thickness=-1)
# # up/left 内河计数面板矩形
# cv2.rectangle(img, (horizontal_increment,0), (horizontal_increment+50,image_h//15 * 5+20), (110,90,208), thickness=-1)
# # down/right 内河计数面板矩形
# cv2.rectangle(img, (horizontal_increment+50,0), (horizontal_increment+100,image_h//15 * 5+20), (211,0,148), thickness=-1)
return img
def softmax(x):
assert isinstance(x, np.ndarray), "expect x be a numpy array"
x_exp = np.exp(x*5)
return x_exp/x_exp.sum()
def softmin(x):
assert isinstance(x, np.ndarray), "expect x be a numpy array"
x_exp = np.exp(-x)
return x_exp/x_exp.sum()
if __name__ == '__main__':
x = np.arange(10)/10.
x = np.array([0.5,0.5,0.5,0.6,1.])
y = softmax(x)
z = softmin(x)
import ipdb; ipdb.set_trace()
|
11489231
|
from trading_ig.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# if you need to cache to DB your requests
from datetime import timedelta
import requests_cache
from getting_realtime_data.data_retrieval import Data_Retrieval
from sending_orders.order_management import Order_Management
from management_of_position.position_management import Position_Management
from predefined_functions.initialisation import Initialisation
import time
from datetime import datetime, timedelta
from predefined_functions.defined_functionality import Defined_Functionality
import pandas as pd
import traceback
import time
import matplotlib.pyplot as plt
import os
# the newest one where you make market order base on price movements 5 or more and try to catch the trend
class Algo0:
def __init__(self, instrument_1, instrument_2, instrument_3, index):
self.set_of_outcomes = set()
logging.basicConfig(level=logging.INFO)
self.df = Defined_Functionality()
self.EUR_USD = instrument_1
self.GBP_USD = instrument_2
self.EUR_GBP = instrument_3
self.index = index
self.instrument_df = {
instrument_1:[],
instrument_2:[],
instrument_3:[]
}
self.diffs_df = {
instrument_1:[],
instrument_2:[],
instrument_3:[]
}
self.old_prices = {
instrument_1:0,
instrument_2:0,
instrument_3:0
}
# self.EUR_USD = "CS.D.EURUSD.TODAY.IP"
# self.GBP_USD = "CS.D.GBPUSD.TODAY.IP"
# self.EUR_GBP = "CS.D.EURGBP.TODAY.IP"
self.trigger_value = 0
list_of_epics = [self.GBP_USD, self.EUR_USD , self.EUR_GBP]
self.df.set_epics_to_look_for(epic_list=list_of_epics)
self.map_epic_data_minute = {}
self.map_epic_data_minute_average = {}
self.triggers = {
"down": False,
"down_up":False,
"up":False,
"up_down":False
}
for epic in list_of_epics:
self.map_epic_data_minute[epic] = []
self.map_epic_data_minute_average[epic] = []
self.first_timestamp = None
self.high = None
self.low = None
self.closing_increments = []
self.initial_position_size_limit = 0
self.df.start_data_from_market_data_socket(list_of_epics)
self.df.start_data_from_account_and_trade_data_socket()
# time to build up some data
self.flag = False
self.timestamp = 121
def run(self):
# build up data
# time.sleep(10)
while (True):
try:
position = self.df.get_open_positions()
tuple_of_commands = self.gather_data_create_signals_close_positions(position)
if tuple_of_commands != None:
position = self.create_initial_position(direction=tuple_of_commands[0], epic=tuple_of_commands[1])
except Exception as e:
print(e, " error in the algo")
# traceback.print_exc()
def gather_data_create_signals_close_positions(self, positions):
instruments = [self.EUR_USD, self.EUR_GBP, self.GBP_USD]
# new
EUR_GBP_data = self.df.get_quote_data_from_socket(epic=self.EUR_GBP)
EUR_USD_data = self.df.get_quote_data_from_socket(epic=self.EUR_USD)
GBP_USD_data = self.df.get_quote_data_from_socket(epic=self.GBP_USD)
market_data = [EUR_USD_data, EUR_GBP_data, GBP_USD_data]
if positions != None:
list_closing_positions = []
for single_position in positions:
epic = single_position["market"]["epic"]
if not epic in instruments:
continue
index_place = instruments.index(epic)
# closing position handled by the stop algo - for now
# closing = self.check_position_closing(single_position, market_data[index_place])
# if closing != None:
# print(closing)
return None
# old
EUR_GBP_data_old = self.df.get_old_quote_data_from_socket(epic=self.EUR_GBP)
if EUR_GBP_data_old == None: return None
EUR_USD_data_old = self.df.get_old_quote_data_from_socket(epic=self.EUR_USD)
if EUR_USD_data_old == None: return None
GBP_USD_data_old = self.df.get_old_quote_data_from_socket(epic=self.GBP_USD)
if GBP_USD_data_old == None: return None
# new
EUR_GBP_data["mid"] = (EUR_GBP_data["BID"] + EUR_GBP_data["OFFER"]) / 2.0
EUR_USD_data["mid"] = (EUR_USD_data["BID"] + EUR_USD_data["OFFER"]) / 2.0
GBP_USD_data["mid"] = (GBP_USD_data["BID"] + GBP_USD_data["OFFER"]) / 2.0
# old
EUR_GBP_data_old["mid"] = (EUR_GBP_data_old["BID"] + EUR_GBP_data_old["OFFER"]) / 2.0
EUR_USD_data_old["mid"] = (EUR_USD_data_old["BID"] + EUR_USD_data_old["OFFER"]) / 2.0
GBP_USD_data_old["mid"] = (GBP_USD_data_old["BID"] + GBP_USD_data_old["OFFER"]) / 2.0
if self.index == 0:
# EURUSD
# GBPUSD
# EURGBP
# outcome = 1/EUR_USD_data["mid"]*EUR_GBP_data["mid"]*GBP_USD_data["mid"]
outcome = 1/EUR_USD_data["mid"]*EUR_GBP_data["mid"]*GBP_USD_data["mid"]
elif self.index == 1:
# USDJPY
# GBPUSD
# GBPJPY
outcome = 1 * EUR_USD_data["mid"] / EUR_GBP_data["mid"] * GBP_USD_data["mid"]
# this section has been edited to look for the signals we buy and sell with to check alpha and beta are okay
self.set_of_outcomes.add(round(outcome,2))
if not 9999 < outcome < 10001:
self.trigger_value = outcome
print("extreme event occured")
self.flag = True
self.timestamp = time.time()
self.instrument_df = {
instruments[0]: [],
instruments[1]: [],
instruments[2]: []
}
self.diffs_df = {
instruments[0]: [],
instruments[1]: [],
instruments[2]: []
}
self.old_prices={
instruments[0]: EUR_USD_data_old["mid"],
instruments[1]: EUR_GBP_data_old["mid"],
instruments[2]: GBP_USD_data_old["mid"]
}
# return None
# finding percentage change
diff_1 = (EUR_USD_data["mid"] - EUR_USD_data_old["mid"]) / EUR_USD_data_old["mid"]
diff_2 = (EUR_GBP_data["mid"] - EUR_GBP_data_old["mid"]) / EUR_GBP_data_old["mid"]
diff_3 = (GBP_USD_data["mid"] - GBP_USD_data_old["mid"]) / GBP_USD_data_old["mid"]
diffs_org = [diff_1, diff_2, diff_3]
diffs = [abs(diff_1), abs(diff_2), abs(diff_3)]
# starts with zero
max_value = max(diffs)
index_value = diffs.index(max_value)
# time
if (time.time() - self.timestamp) > 120 and self.flag:
self.flag = False
print("--end--")
instruments_dataframe = pd.DataFrame(self.instrument_df)
instruments_dataframe_normal = instruments_dataframe.copy(deep=True)
instruments_dataframe_normal[instruments[0]] = instruments_dataframe_normal[instruments[0]] / instruments_dataframe_normal[instruments[0]].max()
instruments_dataframe_normal[instruments[1]] = instruments_dataframe_normal[instruments[1]] / instruments_dataframe_normal[instruments[1]].max()
instruments_dataframe_normal[instruments[2]] = instruments_dataframe_normal[instruments[2]] / instruments_dataframe_normal[instruments[2]].max()
# check if the old price when a extreme outcome event occurs, if that shows correct signals to buying and selling -------------------------------------------------------------------
instruments_dataframe_old_price = instruments_dataframe.copy(deep=True)
instruments_dataframe_old_price[instruments[0]] = instruments_dataframe_old_price[instruments[0]] / self.old_prices[instruments[0]]
instruments_dataframe_old_price[instruments[1]] = instruments_dataframe_old_price[instruments[1]] / self.old_prices[instruments[1]]
instruments_dataframe_old_price[instruments[2]] = instruments_dataframe_old_price[instruments[2]] / self.old_prices[instruments[2]]
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
diffs_dataframe = pd.DataFrame(self.diffs_df)
diffs_dataframe_normal = diffs_dataframe.copy(deep=True)
diffs_dataframe_normal[instruments[0]] = diffs_dataframe_normal[instruments[0]] / diffs_dataframe_normal[instruments[0]].max()
diffs_dataframe_normal[instruments[1]] = diffs_dataframe_normal[instruments[1]] / diffs_dataframe_normal[instruments[1]].max()
diffs_dataframe_normal[instruments[2]] = diffs_dataframe_normal[instruments[2]] / diffs_dataframe_normal[instruments[2]].max()
path = 'D:/currency_arbitrage_data/'+instruments[0]+instruments[1]+instruments[2]
if not os.path.exists(path):
os.makedirs(path)
path = path+"/"
dt_string = datetime.now().strftime("%d-%m-%Y %H_%M_%S")
string_trigger_value = str(self.trigger_value).replace(".","_")
ax = instruments_dataframe.plot()
ax.get_figure().savefig(path+dt_string+"_outcome_"+string_trigger_value+"_instruments_dataframe.png")
plt.close(ax.get_figure())
ax = instruments_dataframe_normal.plot()
ax.get_figure().savefig(path+dt_string+"_outcome_"+string_trigger_value+"_instruments_dataframe_normal.png")
plt.close(ax.get_figure())
ax = diffs_dataframe.plot()
ax.get_figure().savefig(path+dt_string+"_outcome_"+string_trigger_value+"_diffs_dataframe.png")
plt.close(ax.get_figure())
ax = diffs_dataframe_normal.plot()
ax.get_figure().savefig(path+dt_string+"_outcome_"+string_trigger_value+"_diffs_dataframe_normal.png")
plt.close(ax.get_figure())
if self.flag:
print(outcome)
print(diffs_org)
print("1_data:", EUR_USD_data["mid"], " 2_data:", EUR_GBP_data["mid"], " 3_data:", GBP_USD_data["mid"])
print("largest index: ", index_value)
self.instrument_df[instruments[0]].append(EUR_USD_data["mid"])
self.instrument_df[instruments[1]].append(EUR_GBP_data["mid"])
self.instrument_df[instruments[2]].append(GBP_USD_data["mid"])
self.diffs_df[instruments[0]].append(diffs_org[0])
self.diffs_df[instruments[1]].append(diffs_org[1])
self.diffs_df[instruments[2]].append(diffs_org[2])
print("-------------")
# we determine buy / sell signals here -----------------------------------------------------------------------------
# price has gone down and is going to bounce back up so we buy
# if diffs_org[index_value] < 0:
# #buy
# print("BUY signal on", instruments[index_value])
# return "BUY", instruments[index_value]
# elif diffs_org[index_value] > 0:
# #sell
# print("SELL signal on", instruments[index_value])
# return "SELL", instruments[index_value]
def check_position_closing(self, position, market_data):
spread = 1
direction = position["position"]["direction"]
# long
if direction == "BUY":
# we sell at the bid
if (market_data["BID"] - position["position"]["openLevel"]) > spread:
#close
return self.close_position(position)
elif direction == "SELL":
if (position["position"]["openLevel"] - market_data["OFFER"]) > spread:
#close
return self.close_position(position)
def create_initial_position(self, epic, direction):
position = self.df.find_open_position_by_epic(epic=epic)
if position == "error":
return position
if len(position) >= 1:
return position
if direction == "BUY":
position = self.df.create_open_position(epic=epic, direction="BUY", size=1,)
else:
position = self.df.create_open_position(epic=epic, direction="SELL", size=1)
return position
def close_position(self, position):
if len(position) == 0:
self.initial_position_size_limit = 0
return
size = position["position"]["dealSize"]
closing = self.df.close_position(size=size, position=position[0])
return closing
def signal_generation(self, epic):
signals_levels = None
# minute_10 = 60 * 10
# minute_10 = 60
minute_10 = 6
datetime_now = datetime.now()
data = None
if (self.first_timestamp != None):
difference = (datetime_now - self.first_timestamp)
data = self.df.get_quote_data_from_socket(epic=epic)
# self.finding_lows_highs(data=data)
if (difference.seconds > minute_10):
data = self.df.get_quote_data_from_socket(epic=epic)
self.first_timestamp = datetime_now
self.map_epic_data_minute[epic].append(data)
# self.finding_lows_highs(data=data, reset=True)
else:
data = self.df.get_quote_data_from_socket(epic=epic)
self.first_timestamp = datetime_now
self.map_epic_data_minute[epic].append(data)
# self.finding_lows_highs(data=data)
if len(self.map_epic_data_minute[epic]) > 3:
self.map_epic_data_minute[epic].pop(0)
sell_level = None
buy_level = None
object_epic_data = self.map_epic_data_minute[epic][-1]
bid = object_epic_data["BID"]
offer = object_epic_data["OFFER"]
high = object_epic_data["HIGH"]
low = object_epic_data["LOW"]
object_epic_data = self.map_epic_data_minute[epic][-2]
bid_second = object_epic_data["BID"]
offer_second = object_epic_data["OFFER"]
high_second = object_epic_data["HIGH"]
low_second = object_epic_data["LOW"]
# object_epic_data = self.map_epic_data_minute[epic][-3]
# bid_third = object_epic_data["snapshot"]["bid"]
# offer_third = object_epic_data["snapshot"]["offer"]
# high_third = object_epic_data["snapshot"]["high"]
# low_third = object_epic_data["snapshot"]["low"]
# # the price is going down there you should buy
# if offer_third > offer_second > offer:
# buy_level = 1
# # the price is going up therefore you should sell
# elif bid_third < bid_second < bid:
# sell_level = 1
# instead here we are using bid/offer
bid_offer_second_average = (bid_second + offer_second)/2.0
self.map_epic_data_minute_average[epic].append(bid_offer_second_average)
bid_offer_average = (bid + offer)/2.0
self.map_epic_data_minute_average[epic].append(bid_offer_average)
if len(self.map_epic_data_minute_average[epic]) > 10:
self.map_epic_data_minute_average[epic].pop(0)
if self.triggers["down"] == True:
# sudden change up
if bid_offer_second_average < bid_offer_average:
self.triggers["down_up"] = True
elif self.triggers["up"] == True:
# sudden change down
if bid_offer_second_average > bid_offer_average:
self.triggers["up_down"] = True
# going down - trailing
if (self.triggers["up"] != True) and bid_offer_second_average > bid_offer_average:
self.triggers["down"] = True
self.triggers["up"] = False
# going up - trailing
if (self.triggers["down"] != True) and bid_offer_second_average < bid_offer_average:
self.triggers["up"] = True
self.triggers["down"] = False
# we are going down and found a downwards peak as the price rises up
if self.triggers["down"] and self.triggers["down_up"]:
sell_level = 1
self.triggers = {
"down": False,
"down_up": False,
"up": False,
"up_down": False
}
elif self.triggers["up"] and self.triggers["up_down"]:
buy_level = 1
self.triggers = {
"down": False,
"down_up": False,
"up": False,
"up_down": False
}
if (sell_level == None) and (buy_level == None):
return None
signals_levels = {
"SELL": sell_level,
"BUY": buy_level
}
return signals_levels
def finding_lows_highs(self, data, reset=False):
bid = data["snapshot"]["bid"]
offer = data["snapshot"]["offer"]
if reset:
epic = data["instrument"]["epic"]
object_dict = self.map_epic_data_minute[epic][-1]
object_dict["snapshot"]["high"] = self.high
object_dict["snapshot"]["low"] = self.low
self.map_epic_data_minute[epic][-1] = object_dict
# start looking at the new interval
if self.high == None or reset:
self.high = offer
if self.low == None or reset:
self.low = bid
if bid < self.low:
self.low = bid
if offer > self.high:
self.high = offer
|
11489306
|
info = {
"UNIT_NUMBERS": {
"cero": 0,
"un": 1,
"una": 1,
"uno": 1,
"dos": 2,
"tres": 3,
"cuatro": 4,
"cinco": 5,
"seis": 6,
"siete": 7,
"ocho": 8,
"nueve": 9
},
"DIRECT_NUMBERS": {
"diez": 10,
"once": 11,
"doce": 12,
"trece": 13,
"catorce": 14,
"quince": 15,
"dieciséis": 16,
"diecisiete": 17,
"dieciocho": 18,
"diecinueve": 19,
"veinte": 20,
"veintiuna": 21,
"veintiuno": 21,
"veintiún": 21,
"veintidós": 22,
"veintitrés": 23,
"veinticuatro": 24,
"veinticinco": 25,
"veintiséis": 26,
"veintisiete": 27,
"veintiocho": 28,
"veintinueve": 29
},
"TENS": {
"treinta": 30,
"cuarenta": 40,
"cincuenta": 50,
"sesenta": 60,
"setenta": 70,
"ochenta": 80,
"noventa": 90
},
"HUNDREDS": {
"cien": 100,
"ciento": 100,
"doscientas": 200,
"doscientos": 200,
"trescientas": 300,
"trescientos": 300,
"cuatrocientas": 400,
"cuatrocientos": 400,
"quinientas": 500,
"quinientos": 500,
"seiscientas": 600,
"seiscientos": 600,
"setecientas": 700,
"setecientos": 700,
"ochocientas": 800,
"ochocientos": 800,
"novecientas": 900,
"novecientos": 900
},
"BIG_POWERS_OF_TEN": {
"mil": 1000,
"millones": 1000000,
"millón": 1000000,
"millardo": 1000000000,
"billones": 1000000000000,
"billón": 1000000000000,
"trillones": 1000000000000000000,
"trillón": 1000000000000000000,
"cuatrillones": 1000000000000000000000000,
"cuatrillón": 1000000000000000000000000,
"quintillones": 1000000000000000000000000000000,
"quintillón": 1000000000000000000000000000000,
"sextillones": 1000000000000000000000000000000000000,
"sextillón": 1000000000000000000000000000000000000,
"septillones": 1000000000000000000000000000000000000000000,
"septillón": 1000000000000000000000000000000000000000000,
"octillones": 1000000000000000000000000000000000000000000000000,
"octillón": 1000000000000000000000000000000000000000000000000,
"nonillones": 1000000000000000000000000000000000000000000000000000000,
"nonillón": 1000000000000000000000000000000000000000000000000000000,
"decillón": 1000000000000000000000000000000000000000000000000000000000000,
"decillones": 1000000000000000000000000000000000000000000000000000000000000000000,
"undecillones": 1000000000000000000000000000000000000000000000000000000000000000000,
"undecillón": 1000000000000000000000000000000000000000000000000000000000000000000,
"duodecillones": 1000000000000000000000000000000000000000000000000000000000000000000000000,
"duodecillón": 1000000000000000000000000000000000000000000000000000000000000000000000000,
"tredecillones": 1000000000000000000000000000000000000000000000000000000000000000000000000000000,
"tredecillón": 1000000000000000000000000000000000000000000000000000000000000000000000000000000,
"gúgol": 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,
"vigintillones": 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,
"vigintillón": 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000,
"centillón": 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
},
"SKIP_TOKENS": [
"y"
],
"USE_LONG_SCALE": True
}
|
11489358
|
from .core import *
from .about import __version__
from .about import __author__
from .about import __title__
from .about import __summary__
from .about import __email__
|
11489363
|
import torch
import numpy as np
import numba
import copy
from ...utils import common_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...ops.iou3d_nms import iou3d_nms_utils
import warnings
try:
from numba.errors import NumbaPerformanceWarning
warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
except:
pass
def random_flip_along_x(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
return gt_boxes, points
def random_flip_along_y(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
return gt_boxes, points
def global_rotation(gt_boxes, points, rot_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
return gt_boxes, points
def global_scaling(gt_boxes, points, scale_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def global_sampling(gt_boxes, points, gt_boxes_mask, sample_ratio_range, prob):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C)
gt_boxes_mask: (N), boolen mask for gt_boxes
sample_ratio_range: [min, max]. ratio to keep points remain.
prob: prob to dentermine whether sampling this frame
Returns:
"""
if np.random.uniform(0, 1) > prob:
return gt_boxes, points, gt_boxes_mask
num_points = points.shape[0]
sample_ratio = np.random.uniform(sample_ratio_range[0], sample_ratio_range[1])
remain_points_num = int(num_points * sample_ratio)
# shuffle points
shuffle_idx = np.random.permutation(points.shape[0])
points = points[shuffle_idx]
# sample points
points = points[:remain_points_num]
# mask empty gt_boxes
num_points_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, :3]),
torch.from_numpy(gt_boxes[:, :7])
).numpy().sum(axis=1)
mask = (num_points_in_gt >= 1)
gt_boxes_mask = gt_boxes_mask & mask
return gt_boxes, points, gt_boxes_mask
def scale_pre_object(gt_boxes, points, gt_boxes_mask, scale_perturb, num_try=50):
"""
uniform sacle object with given range
Args:
gt_boxes: (N, 7) under unified coordinates
points: (M, 3 + C) points in lidar
gt_boxes_mask: (N), boolen mask for
scale_perturb:
num_try:
Returns:
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(scale_perturb, (list, tuple, np.ndarray)):
scale_perturb = [-scale_perturb, scale_perturb]
# boxes wise scale ratio
scale_noises = np.random.uniform(scale_perturb[0], scale_perturb[1], size=[num_boxes, num_try])
for k in range(num_boxes):
if gt_boxes_mask[k] == 0:
continue
scl_box = copy.deepcopy(gt_boxes[k])
scl_box = scl_box.reshape(1, -1).repeat([num_try], axis=0)
scl_box[:, 3:6] = scl_box[:, 3:6] * scale_noises[k].reshape(-1, 1).repeat([3], axis=1)
# detect conflict
# [num_try, N-1]
if num_boxes > 1:
self_mask = np.ones(num_boxes, dtype=np.bool_)
self_mask[k] = False
iou_matrix = iou3d_nms_utils.boxes_bev_iou_cpu(scl_box, gt_boxes[self_mask])
ious = np.max(iou_matrix, axis=1)
no_conflict_mask = (ious == 0)
# all trys have conflict with other gts
if no_conflict_mask.sum() == 0:
continue
# scale points and assign new box
try_idx = no_conflict_mask.nonzero()[0][0]
else:
try_idx = 0
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
points[:, 0:3],np.expand_dims(gt_boxes[k], axis=0)).squeeze(0)
obj_points = points[point_masks > 0]
obj_center, lwh, ry = gt_boxes[k, 0:3], gt_boxes[k, 3:6], gt_boxes[k, 6]
# relative coordinates
obj_points[:, 0:3] -= obj_center
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), -ry).squeeze(0)
new_lwh = lwh * scale_noises[k][try_idx]
obj_points[:, 0:3] = obj_points[:, 0:3] * scale_noises[k][try_idx]
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), ry).squeeze(0)
# calculate new object center to avoid object float over the road
obj_center[2] += (new_lwh[2] - lwh[2]) / 2
obj_points[:, 0:3] += obj_center
points[point_masks > 0] = obj_points
gt_boxes[k, 3:6] = new_lwh
# if enlarge boxes, remove bg points
if scale_noises[k][try_idx] > 1:
points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(gt_boxes[k],
axis=0)).squeeze(0)
keep_mask = ~np.logical_xor(point_masks, points_dst_mask)
points = points[keep_mask]
return points, gt_boxes
def normalize_object_size(boxes, points, boxes_mask, size_res):
"""
:param boxes: (N, 7) under unified boxes
:param points: (N, 3 + C)
:param boxes_mask
:param size_res: (3) [l, w, h]
:return:
"""
points = copy.deepcopy(points)
boxes = copy.deepcopy(boxes)
for k in range(boxes.shape[0]):
# skip boxes that not need to normalize
if boxes_mask[k] == 0:
continue
masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes[k:k+1]).squeeze(0)
obj_points = points[masks > 0]
obj_center, lwh, ry = boxes[k, 0:3], boxes[k, 3:6], boxes[k, 6]
obj_points[:, 0:3] -= obj_center
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), -ry).squeeze(0)
new_lwh = lwh + np.array(size_res)
# skip boxes that shift to have negative
if (new_lwh < 0).any():
boxes_mask[k] = False
continue
scale_lwh = new_lwh / lwh
obj_points[:, 0:3] = obj_points[:, 0:3] * scale_lwh
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), ry).squeeze(0)
# calculate new object center to avoid object float over the road
obj_center[2] += size_res[2] / 2
obj_points[:, 0:3] += obj_center
points[masks > 0] = obj_points
boxes[k, 3:6] = new_lwh
# if enlarge boxes, remove bg points
if (np.array(size_res) > 0).any():
points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(boxes[k],
axis=0)).squeeze(0)
keep_mask = ~np.logical_xor(masks, points_dst_mask)
points = points[keep_mask]
return points, boxes
def rotate_objects(gt_boxes, points, gt_boxes_mask, rotation_perturb, prob, num_try=50):
"""
Args:
gt_boxes: [N, 7] (x, y, z, dx, dy, dz, heading) on unified coordinate
points: [M]
gt_boxes_mask: [N] bool
rotation_perturb: ratation noise parameter
prob: prob to random rotate object
num_try: times to try rotate one object
Returns:
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
# with prob to rotate each object
rot_mask = np.random.uniform(0, 1, size=[num_boxes]) < prob
# generate random ratate noise for each boxes
rot_noise = np.random.uniform(rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
for idx in range(num_boxes):
# don't need to rotate this object
if (not rot_mask[idx]) or (not gt_boxes_mask[idx]):
continue
# generate rotated boxes num_try times
rot_box = copy.deepcopy(gt_boxes[idx])
# [num_try, 7]
rot_box = rot_box.reshape(1, -1).repeat([num_try], axis=0)
rot_box[:, 6] += rot_noise[idx]
# detect conflict
# [num_try, N-1]
if num_boxes > 1:
self_mask = np.ones(num_boxes, dtype=np.bool_)
self_mask[idx] = False
iou_matrix = iou3d_nms_utils.boxes_bev_iou_cpu(rot_box, gt_boxes[self_mask])
ious = np.max(iou_matrix, axis=1)
no_conflict_mask = (ious == 0)
# all trys have conflict with other gts
if no_conflict_mask.sum() == 0:
continue
# rotate points and assign new box
try_idx = no_conflict_mask.nonzero()[0][0]
else:
try_idx = 0
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(gt_boxes[idx], axis=0)).squeeze(0)
object_points = points[point_masks > 0]
object_center = gt_boxes[idx][0:3]
object_points[:, 0:3] -= object_center
object_points = common_utils.rotate_points_along_z(object_points[np.newaxis, :, :],
np.array([rot_noise[idx][try_idx]]))[0]
object_points[:, 0:3] += object_center
points[point_masks > 0] = object_points
# remove bg points that lie the position we want to place object
points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(rot_box[try_idx], axis=0)).squeeze(0)
keep_mask = ~np.logical_xor(point_masks, points_dst_mask)
points = points[keep_mask]
gt_boxes[idx] = rot_box[try_idx]
return gt_boxes, points
|
11489367
|
import aiohttp
import asyncio
import logging
from typing import (
Dict,
Optional
)
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.logger import HummingbotLogger
from hummingbot.core.utils.async_utils import safe_ensure_future
class CoinGeckoDataFeed(DataFeedBase):
cgdf_logger: Optional[HummingbotLogger] = None
_cgdf_shared_instance: "CoinGeckoDataFeed" = None
BASE_URL = "https://api.coingecko.com/api/v3"
@classmethod
def get_instance(cls) -> "CoinGeckoDataFeed":
if cls._cgdf_shared_instance is None:
cls._cgdf_shared_instance = CoinGeckoDataFeed()
return cls._cgdf_shared_instance
@classmethod
def logger(cls) -> HummingbotLogger:
if cls.cgdf_logger is None:
cls.cgdf_logger = logging.getLogger(__name__)
return cls.cgdf_logger
def __init__(self, update_interval: float = 30.0):
super().__init__()
self._ev_loop = asyncio.get_event_loop()
self._price_dict: Dict[str, float] = {}
self._update_interval = update_interval
self.fetch_data_loop_task: Optional[asyncio.Task] = None
@property
def name(self) -> str:
return "coin_gecko_api"
@property
def price_dict(self) -> Dict[str, float]:
return self._price_dict.copy()
@property
def health_check_endpoint(self) -> str:
return f"{self.BASE_URL}/ping"
def get_price(self, asset: str) -> float:
return self._price_dict.get(asset.upper())
async def fetch_data_loop(self):
while True:
try:
await self.fetch_data()
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(f"Error getting data from {self.name}", exc_info=True,
app_warning_msg="Couldn't fetch newest prices from Coin Gecko. "
"Check network connection.")
await asyncio.sleep(self._update_interval)
async def update_asset_prices(self):
try:
client: aiohttp.ClientSession = await self._http_client()
price_url: str = f"{self.BASE_URL}/coins/markets"
price_dict: Dict[str, float] = {}
for i in range(1, 5):
params: Dict[str, str] = {"vs_currency": "usd", "order": "market_cap_desc", "per_page": 250,
"page": i, "sparkline": "false"}
try:
async with client.request("GET", price_url, params=params) as resp:
results: Dict[str, Dict[str, float]] = await resp.json()
if 'error' in results:
raise Exception(f"{results['error']}")
for result in results:
symbol = result["symbol"].upper()
price = float(result["current_price"]) if result["current_price"] is not None else 0.0
if symbol not in price_dict:
price_dict[symbol] = price
except Exception as e:
self.logger().warning(f"Coin Gecko API request failed. Exception: {str(e)}")
raise e
await asyncio.sleep(0.1)
self._price_dict = price_dict
except Exception:
raise
async def fetch_data(self):
await self.update_asset_prices()
self._ready_event.set()
async def start_network(self):
await self.stop_network()
self.fetch_data_loop_task = safe_ensure_future(self.fetch_data_loop())
async def stop_network(self):
if self.fetch_data_loop_task is not None:
self.fetch_data_loop_task.cancel()
self.fetch_data_loop_task = None
|
11489373
|
expected_output = {
"total_entries_displayed": 3,
"index": {
1: {
"advertisement_ver": 2,
"capabilities": "Router Switch CVTA phone port",
"device_id": "R6(9P57K4EJ8CA)",
"duplex_mode": "full",
"entry_addresses": {"172.16.1.203": {}},
"hold_time": 133,
"local_interface": "GigabitEthernet0/0",
"management_addresses": {"172.16.1.203": {}},
"native_vlan": "",
"platform": "N9K-9000v",
"port_id": "mgmt0",
"software_version": "Cisco Nexus Operating System (NX-OS) Software, Version 9.2(1)",
"vtp_management_domain": "",
},
2: {
"advertisement_ver": 2,
"capabilities": "Router Switch CVTA phone port",
"device_id": "R7(9QBDKB58F76)",
"duplex_mode": "full",
"entry_addresses": {"172.16.1.204": {}},
"hold_time": 126,
"local_interface": "GigabitEthernet0/0",
"management_addresses": {"172.16.1.204": {}},
"native_vlan": "",
"platform": "N9K-9000v",
"port_id": "mgmt0",
"software_version": "Cisco Nexus Operating System (NX-OS) Software, Version 9.2(1)",
"vtp_management_domain": "",
},
3: {
"advertisement_ver": 2,
"capabilities": "Router Source-Route-Bridge",
"device_id": "R5.cisco.com",
"duplex_mode": "",
"entry_addresses": {"172.16.1.202": {}},
"hold_time": 177,
"local_interface": "GigabitEthernet0/0",
"management_addresses": {"172.16.1.202": {}},
"native_vlan": "",
"platform": "Cisco ",
"port_id": "GigabitEthernet0/0",
"software_version": "Cisco IOS Software, IOSv Software (VIOS-ADVENTERPRISEK9-M), Version 15.7(3)M3, RELEASE SOFTWARE (fc2)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2018 by Cisco Systems, Inc.\nCompiled Wed 01-Aug-18 16:45 by prod_rel_team",
"vtp_management_domain": "",
},
},
}
|
11489378
|
import sys, os
sys.path.insert(0,os.getcwd())
# dictionay to check valid words
import enchant
D = enchant.Dict("en_US")
# code for bot ai
import ai
from typing import Any,Dict,List
# implementation class
'''
class ScrabbleBotHandler(object):
def usage(scrabble):
return 'bot for playing scrabble'
def handle_message(scrabble, message, bot_handler):
response = get_response(message, bot_handler)
bot_handler.send_reply(message, response)
handler_class = ScrabbleBotHandler
'''
def get_response(message: Dict[str, str], bot_handler: Any):
data = message['content'].split()
if len(data) == 3:
if(data[2] == "start"):
bot_handler.storage.put("his_points", "0")
bot_handler.storage.put("bot_points", "0")
board = [['#' for i in range(9)] for j in range(9)]
board_str = show_board(board)
bot_handler.storage.put("board", board_str)
bot_handler.storage.put("game_on", "true")
return board_str + show_points(0, 0)
else:
return "Invalid Input."
elif len(data) == 5:
if bot_handler.storage.get("game_on") != "true":
return "No Paused Game!"
row = int(data[2])
col = int(data[3])
c = data[4][0].lower()
his_points = int(bot_handler.storage.get("his_points"))
bot_points = int(bot_handler.storage.get("bot_points"))
board_str = bot_handler.storage.get("board")
board = get_board(board_str)
if valid_move(row, col, c):
board[row][col] = c
his_points += get_points(row, col, board)
board_str = show_board(board)
ret = game_result(board, his_points, bot_points)
if ret != "":
bot_handler.storage.put("game_on", "false")
return ret
x, y, z = ai.medium_bot(board)
ret = ""
if x != -1:
board[x][y] = z
# print ("board x,y=", board[x][y])
bot_points += get_points(x, y, board)
ret = "Bot Moves " + z + " At (" + str(x) + ", " + str(y) + ")\n"
board_str = show_board(board)
bot_handler.storage.put("board", board_str)
bot_handler.storage.put("his_points", his_points)
bot_handler.storage.put("bot_points", bot_points)
ret += board_str+show_points(his_points, bot_points)
return ret
else:
return "Invalid Input."
else:
return "Invalid Input."
# how many points will you get for (row, col) move
def get_points(row, col, board):
ret = 0
for i in range(0, col+1):
for j in range(col, 9):
s = ""
for k in range(i, j+1):
s += board[row][k]
if (D.check(s) or D.check(s[::-1])) and len(s) > 1:
# print(s)
ret += len(s)
for i in range(0, row+1):
for j in range(row, 9):
s = ""
for k in range(i, j+1):
s += board[k][col]
if (D.check(s) or D.check(s[::-1])) and len(s) > 1:
# print(s)
ret += len(s)
return ret
# check if a (row, col, c) move is valid
def valid_move(row, col, c):
if row < 0 or row >= 9:
return False
if col < 0 or col >= 9:
return False
if (c <= '0' or c >= 'z') and board[row][col] == '#':
return False
return True
# parse board from string
def get_board(board_str):
board = []
i = 0
for row_str in board_str.split("\n"):
if i > 0:
board.append(row_str.split(" ")[1:])
i += 1
return board
# parse board to string
def show_board(board):
ret = "0 "
for i in range(9):
ret += str(i) + " "
ret += "\n"
for i in range(9):
ret += str(i) + " "
for j in range(9):
ret += board[i][j] + " "
ret += "\n"
return ret
# show points of user and bot
def show_points(his_points, bot_points):
ret = ""
ret += "Your Points: " + str(his_points)
ret += "\tBot Points: " + str(bot_points)
return ret
# check result of game
def game_result(board, his_points, bot_points):
for i in range(9):
for j in range(9):
if board[i][j] == '#':
return ""
if his_points > bot_points:
return "You Win :)"
elif bot_points > his_points:
return "Bot Wins :("
else:
return "It's a Draw :|"
|
11489397
|
import sys
from autonetkit.workflow.workflow import BaseWorkflow
def main(filename):
"""
@param filename:
"""
workflow = BaseWorkflow()
network_model = workflow.load(filename)
workflow.run(network_model, target_platform="kathara")
if __name__ == '__main__':
filename = sys.argv[1]
main(filename)
|
11489451
|
import subprocess
import click
def ping_ip(ip_address, count):
"""
Ping IP_ADDRESS and return True/False
"""
reply = subprocess.run(
f"ping -c {count} -n {ip_address}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
if reply.returncode == 0:
return True
else:
return False
def ping_ip_addresses(ip_addresses, count):
reachable = []
unreachable = []
with click.progressbar(ip_addresses, label="Пингую адреса") as bar:
for ip in bar:
if ping_ip(ip, count):
reachable.append(ip)
else:
unreachable.append(ip)
return reachable, unreachable
@click.command()
@click.argument("ip_addresses", nargs=-1, required=True)
@click.option("--count", "-c", default=2, type=int, help="Number of packets")
def main(ip_addresses, count):
"""
Ping IP_ADDRESS
"""
reachable, unreachable = ping_ip_addresses(ip_addresses, count)
for ip in reachable:
click.secho(f"IP-адрес {ip:15} пингуется", fg="green", bold=True)
for ip in unreachable:
click.secho(f"IP-адрес {ip:15} не пингуется", fg="red", bold=True)
if __name__ == "__main__":
main()
"""
$ python example_03_ping_ip_list_progress_bar.py 8.8.8.8 8.8.4.4 10.1.1.1 192.168.100.1
Пингую адреса [####################################] 100%
IP-адрес 8.8.8.8 пингуется
IP-адрес 8.8.4.4 пингуется
IP-адрес 192.168.100.1 пингуется
IP-адрес 10.1.1.1 не пингуется
"""
|
11489486
|
from tool.runners.python import SubmissionPy
class ThChSubmission(SubmissionPy):
def run(self, input):
seats = {
get_seat_id(*get_row_and_seat(boarding_pass))
for boarding_pass in input.split("\n")
}
all_seats = set(range(2 ** 10 - 1))
for seat_id in all_seats - seats:
if seat_id - 1 in seats and seat_id + 1 in seats:
return seat_id
def get_row_and_seat(boarding_pass):
row_min, row_max = 0, 127
for c in boarding_pass[:7]:
middle = (row_max + row_min) // 2
row_min, row_max = (row_min, middle) if c == "F" else (middle + 1, row_max)
row = row_min
seat_min, seat_max = 0, 7
for c in boarding_pass[-3:]:
middle = (seat_max + seat_min) // 2
seat_min, seat_max = (seat_min, middle) if c == "L" else (middle + 1, seat_max)
seat = seat_min
return (row, seat)
def get_seat_id(row, seat):
return row * 8 + seat
|
11489488
|
import os, sys, inspect
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from core.bounds import WSR_mu_plus
from core.concentration import get_tlambda, get_lhat_from_table, get_lhat_from_table_binarysearch
import numpy as np
from scipy.optimize import brentq
from tqdm import tqdm
import pdb
if __name__ == "__main__":
n_cal = int(4000)
n_val = int(1000)
n_lambda = 10000
n_reps = int(1e2)
epsilon = 1e-10
maxiters = int(1e5)
num_grid_bennett = 1000
mus = [0.05, 0.1, 0.2]
deltas = [0.001, 0.01, 0.05, 0.1]
lambdas_table = np.linspace(0,1,n_lambda)
delta = .1
gamma = .1
# set up losses to ramp from 0 to 1 linearly.
example_loss_table = np.random.uniform(size=(n_cal+n_val,n_lambda))
np.cumsum(example_loss_table,axis=1, out=example_loss_table)
example_loss_table[:,1:] = example_loss_table[:,:-1]
example_loss_table[:,0] = 0
example_loss_table = example_loss_table / (example_loss_table.max(axis=1)[:,None])
example_loss_table = example_loss_table[:,::-1]
risks = np.zeros((n_reps,))
# get the bound
bound_str = 'WSR'
bound_fn = WSR_mu_plus
tlambda = get_tlambda(1500,deltas,n_cal,None,None,None,epsilon,maxiters,bound_str,bound_fn)
for j in tqdm(range(n_reps)):
np.random.shuffle(example_loss_table)
calib_loss_table, val_loss_table = (example_loss_table[:n_cal], example_loss_table[n_cal:])
# get lhat (should be close to gamma)
lhat = get_lhat_from_table_binarysearch(calib_loss_table, lambdas_table, gamma, delta, tlambda, bound_str)
val_losses = val_loss_table[:,np.argmax(lambdas_table == lhat)]
risks[j] = val_losses.mean()
print((risks > gamma).mean())
pdb.set_trace()
print(risks)
#sigmahat = np.sqrt(2*muhat*(1-muhat))
#ucb = HBB_mu_plus(muhat, sigmahat, n_cal, delta, num_grid_bennett, maxiters) # 1 and 100 are dummy arguments.
#x = np.random.binomial(n_cal,ucb,size=(n_reps,))/n_cal
#print( (x <= muhat).mean() * np.e / delta ) # Should be near 1
#for mu in mus:
# for delta in deltas:
# print(f"mu: {mu}, delta: {delta}")
# def _to_invert(muhat):
# sigmahat = np.sqrt(2*muhat*(1-muhat))
# return HBB_mu_plus(muhat, sigmahat, n_cal, delta, num_grid_bennett, maxiters) - mu
# thresh = brentq(_to_invert, 1e-10, mu, maxiter=maxiters)
# x = np.random.binomial(n_cal,mu,size=(n_reps,))/n_cal
# print(f"empirical/theory: { (x <= thresh).mean() * np.e / delta }")
|
11489667
|
import bacon
shader = bacon.Shader(vertex_source=
"""
precision highp float;
attribute vec3 a_Position;
attribute vec2 a_TexCoord0;
attribute vec4 a_Color;
varying vec2 v_TexCoord0;
varying vec4 v_Color;
uniform mat4 g_Projection;
void main()
{
gl_Position = g_Projection * vec4(a_Position, 1.0);
v_TexCoord0 = a_TexCoord0;
v_Color = a_Color;
}
""",
fragment_source=
"""
precision highp float;
uniform sampler2D g_Texture0;
uniform float brightness;
uniform float contrast;
varying vec2 v_TexCoord0;
varying vec4 v_Color;
void main()
{
// Standard vertex color and texture
vec4 color = v_Color * texture2D(g_Texture0, v_TexCoord0);
// Brightness / contrast
color = vec4(brightness + 0.5) + (color - vec4(0.5)) * vec4(contrast);
gl_FragColor = color;
}
""")
brightness = shader.uniforms['brightness']
contrast = shader.uniforms['contrast']
kitten = bacon.Image('res/kitten.png')
class Game(bacon.Game):
def on_tick(self):
bacon.clear(0, 0, 0, 1)
bacon.set_shader(shader)
brightness.value = bacon.mouse.y / float(bacon.window.height)
contrast.value = bacon.mouse.x / float(bacon.window.width)
bacon.draw_image(kitten, 0, 0)
bacon.run(Game())
|
11489691
|
from zzcore import StdAns
import re, requests
from subprocess import getoutput,call
from config import REMOTE_MC_URL
class Ans(StdAns):
AllowGroup = [959613860, 125733077, 204097403, 1140391080]
def GETMSG(self):
if len(self.parms) < 2:
return '不加参数是坏文明!'
cmd = self.parms[1]
AllowCmd = ['list','status','say']
if cmd in AllowCmd:
if cmd == 'status':
msg = getStatus()
elif cmd == 'list':
msg = getList()
elif cmd == 'say':
saywhat = self.raw_msg['raw_message'][8:]
msg = say(saywhat)
else:
msg = '汝是不是在mc后面添加了奇怪的参数,咱可只知道 status list 和 say。'
return msg
def getStatus():
if REMOTE_MC_URL:
output = requests.post(f'{REMOTE_MC_URL}/status').text
else:
output = getoutput('papermc status')
p = re.compile(r'processes = ([0-9]*) \(')
prsnum = re.findall(p,output)[0]
p = re.compile(r' \((.*?)\)',re.S)
prsnames = re.findall(p,output)[0].split(', ')
p = re.compile(r'Total memory usage = (.*)$')
memory = re.findall(p,output)[0]
msg = '咱的MC服务器现在有 '
for prsname in prsnames:
msg = msg + prsname + ' '
msg = msg + '这' + prsnum +'个进程,\n一共占用了' + memory +'内存呢。'
return msg
def getList():
if REMOTE_MC_URL:
output = requests.post(f'{REMOTE_MC_URL}/list').text
else:
output = getoutput('papermc command list')
p = re.compile(r'There are (.*?)[ of a max]', re.S)
online = re.findall(p,output)[0]
if online == '0':
msg = '咱看着没有人在线哎\n_(-ω-`_)⌒)_'
else:
msg = '有' + online + '个小伙伴在线!'
p = re.compile(r'online: (.*?)[\n>]', re.S)
players = re.findall(p,output)[0].split(', ')
for player in players:
msg = msg + '\n' + player
return msg
def say(saywhat):
if not saywhat:
return '汝让咱say what?o(≧口≦)o'
if REMOTE_MC_URL:
code = requests.post(f'{REMOTE_MC_URL}/say',data=saywhat).text
else:
shellcmd = ['papermc','command','say',saywhat]
code = call(shellcmd)
if code == '0':
msg = '咱已经把消息传过去了。'
else:
msg = '٩(ŏ﹏ŏ、)۶竟然失败了,汝是不是让我发送奇怪的话过去!'
return msg
|
11489701
|
import structlog
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
log = structlog.get_logger(__name__)
class Config(AppConfig):
name = 'readthedocs.builds'
label = 'builds'
verbose_name = _("Builds")
def ready(self):
import readthedocs.builds.tasks
|
11489723
|
from setuptools import setup, find_packages
version = '{{ cookiecutter.version }}dev'
install_requires = [
'pytest-pypom-navigation',
'colander',
'pytest-variables[yaml]',
'pytest-bdd',
'pytest-splinter',
'pypom_form',
{%- if cookiecutter.testrail == 'y' %}
'pytest-testrail',
{%- endif %}
]
{%- if cookiecutter.pytest_play == 'y' %}
play_require = [
'pytest-play',
'play_selenium',
'play_requests',
'play_sql',
'play_cassandra',
'play_dynamodb',
'play_websocket',
'play_mqtt',
]
{%- endif %}
tests_require = [
'pycodestyle',
'pytest-cov',
'tox',
'mock',
'pytest-html',
'pytest-repeat',
'pytest-randomly',
]
{%- set license_classifiers = {
'MIT license': 'License :: OSI Approved :: MIT License',
'BSD license': 'License :: OSI Approved :: BSD License',
'ISC license': 'License :: OSI Approved :: ISC License (ISCL)',
'Apache Software License 2.0': 'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
} %}
setup(name='{{cookiecutter.project_slug}}',
version=version,
description="{{ cookiecutter.project_short_description }}",
long_description=open("README.rst").read() + "\n" +
open("CHANGES.rst").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Developers",
"Framework :: Pytest",
"Topic :: Software Development :: Testing",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords='{{ cookiecutter.project_slug }}',
author_email='{{ cookiecutter.email }}',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
""",
extras_require={
'tests': tests_require,
{%- if cookiecutter.pytest_play == 'y' %}
'play': play_require,
{%- endif %}
},
)
|
11489812
|
import keras.backend as K
import numpy as np
from kfs.layers.convolutional import (Convolution2DEnergy_TemporalBasis,
Convolution2DEnergy_TemporalCorrelation)
def _test_smoke(channel_order=None):
from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
from keras.models import Sequential
#from keras.layers import Flatten, Dense
input_shape = (12, 3, 64, 64)
if channel_order is None:
channel_order = K.image_data_format()
if channel_order == 'channels_last':
input_shape = (12, 64, 64, 3)
rng = np.random.RandomState(42)
datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
if channel_order == 'channels_last':
datums = datums.transpose(0, 1, 3, 4, 2)
nn2 = Sequential()
nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
padding='same',
temporal_kernel_size=5,
input_shape=input_shape))
nn2.compile(loss='mse', optimizer='sgd')
pred2 = nn2.predict(datums)
return nn2, nn2.predict(datums)
def test_smoke_channels_first():
K.set_image_data_format('channels_first')
_test_smoke('channels_first')
def test_smoke_channels_last():
K.set_image_data_format('channels_last')
_test_smoke('channels_last')
def _test_equivalence(channel_order=None):
from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
from keras.models import Sequential
#from keras.layers import Flatten, Dense
input_shape = (12, 3, 64, 64)
if channel_order is None:
channel_order = K.image_data_format()
if channel_order == 'channels_last':
input_shape = (12, 64, 64, 3)
nn = Sequential()
nn.add(Convolution2DEnergy_TemporalBasis(8, 16, 4, (5, 5), 7,
padding='same',
input_shape=input_shape,
data_format=channel_order))
rng = np.random.RandomState(42)
datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
if channel_order == 'channels_last':
datums = datums.transpose(0, 1, 3, 4, 2)
nn.compile(loss='mse', optimizer='sgd')
nn2 = Sequential()
nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
padding='same',
input_shape=input_shape,
data_format=channel_order))
nn2.compile(loss='mse', optimizer='sgd')
nn2.set_weights(nn.get_weights())
pred1 = nn.predict(datums)
pred2 = nn2.predict(datums)
assert ((pred1 - pred2) == 0.).all()
return nn, nn.predict(datums), nn2, nn2.predict(datums)
def test_equivalence_channels_first():
K.set_image_data_format('channels_first')
_test_equivalence('channels_first')
def test_equivalence_channels_last():
K.set_image_data_format('channels_last')
_test_equivalence('channels_last')
|
11489818
|
import numpy as np
import pycountry_convert as pc
def get_continent(country):
try:
country_code = pc.country_name_to_country_alpha2(country, cn_name_format='default')
return pc.country_alpha2_to_continent_code(country_code)
except (KeyError, TypeError):
return country
def fix_country(country):
if country == 'US':
return 'United States'
elif country == 'Korea, South':
return 'South Korea'
elif country == 'Taiwan*':
return 'Taiwan*'
return country
def wrangle_data(covid_df, pop_df):
covid_df = covid_df.assign(Date=covid_df['Date'].astype(np.datetime64))
covid_df['Longitude'] = covid_df['Long']
covid_df['Latitude'] = covid_df['Lat']
covid_df['Country'] = covid_df['Country/Region'].fillna('')
covid_df['Country'] = covid_df['Country'].apply(fix_country)
covid_df['Continent'] = covid_df['Country'].apply(get_continent)
covid_df = covid_df.merge(pop_df, how='left', left_on='Country', right_on='Country')
covid_df['State'] = covid_df['Province/State'].fillna(covid_df['Country'])
covid_df['StateCountry'] = covid_df['State'] + ' ' + covid_df['Country']
covid_df['Active'] = covid_df['Confirmed'] - covid_df['Recovered']
covid_df = covid_df.assign(
logCumConf=np.where(
covid_df['Confirmed'] > 0,
np.log(covid_df['Confirmed']) /
np.where(
covid_df['Confirmed'] > 700,
np.log(1.01),
np.log(1.05)
),
0
)
)
per_capita_adjust = 100000.0
def per_capita(col):
return (
covid_df[col]
/ covid_df['Population']
* per_capita_adjust
).round(0)
covid_df['ActivePerCapita'] = per_capita('Active')
covid_df['ConfirmedPerCapita'] = per_capita('Confirmed')
covid_df['RecoveredPerCapita'] = per_capita('Recovered')
covid_df['DeathsPerCapita'] = per_capita('Deaths')
covid_df['log10'] = np.where(covid_df['Confirmed'] > 0,
np.ceil(np.log10(covid_df['Confirmed'])), 0)
covid_df['log_group'] = np.power(10, covid_df['log10'] - 1).astype(np.int).astype(str) \
+ '-' + np.power(10, covid_df['log10']).astype(np.int).astype(str)
covid_df['Description'] = covid_df['State'] + ', ' \
+ covid_df['Country'] + ', ' \
+ covid_df['Continent'] + '<br>' \
+ 'Confirmed: ' + covid_df['Confirmed'].astype(str) + '<br>' \
+ 'Confirmed Per Capita: ' + covid_df['ConfirmedPerCapita'].astype(str) + '<br>' \
+ 'Recovered: ' + covid_df['Recovered'].astype(str) + '<br>' \
+ 'Active: ' + covid_df['Active'].astype(str) + '<br>' \
+ 'Deaths: ' + covid_df['Deaths'].astype(str) + '<br>' \
+ 'Confirmed Range: ' + covid_df['log_group'].astype(str) + '<br>'
return covid_df
|
11489842
|
from sentimentja import Analyzer
from pprint import pprint
analyzer = Analyzer()
pprint(analyzer.analyze([
"final fantasy 14 超楽しい",
"クソゲーはつまらん",
"エアリスが死んで悲しい",
"冒険の書が消える音こわい",
"廃人ゲーマーのスキルすごい",
"ケフカキモい"
]))
|
11489878
|
import rmf_adapter.type as types
import numpy as np
import datetime
# TYPES ======================================================================
def test_types():
# Test CPPDeliveryMsg
msg = types.CPPDeliveryMsg("pickup_place",
"pickup_dispenser",
"dropoff_place",
"dropoff_ingestor")
assert msg.pickup_place_name == "pickup_place"
assert msg.pickup_dispenser == "pickup_dispenser"
assert msg.dropoff_place_name == "dropoff_place"
assert msg.dropoff_ingestor == "dropoff_ingestor"
msg.pickup_place_name += "_rawr"
msg.pickup_dispenser += "_rawr"
msg.dropoff_place_name += "_rawr"
msg.dropoff_ingestor += "_rawr"
assert msg.pickup_place_name == "pickup_place_rawr"
assert msg.pickup_dispenser == "pickup_dispenser_rawr"
assert msg.dropoff_place_name == "dropoff_place_rawr"
assert msg.dropoff_ingestor == "dropoff_ingestor_rawr"
|
11489881
|
import logging
# from uuid import UUID
from weakref import WeakValueDictionary
from openpathsampling.netcdfplus.base import StorableNamedObject, StorableObject
from openpathsampling.netcdfplus.cache import MaxCache, Cache, NoCache, \
WeakLRUCache
from openpathsampling.netcdfplus.proxy import LoaderProxy
from future.utils import iteritems
import sys
if sys.version_info > (3, ):
long = int
unicode = str
logger = logging.getLogger(__name__)
init_log = logging.getLogger('openpathsampling.initialization')
class HashedList(dict):
def __init__(self):
super(HashedList, self).__init__()
dict.__init__(self)
self._list = []
def append(self, key):
dict.__setitem__(self, key, len(self))
self._list.append(key)
# noinspection PyCallByClass
def extend(self, t):
l = len(self)
dict.update(self, zip(t, range(l, l + len(t))))
self._list.extend(t)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._list[value] = key
def __getitem__(self, key):
return dict.__getitem__(self, key)
def index(self, key):
return self._list[key]
def mark(self, key):
if key not in self:
dict.__setitem__(self, key, -2)
def unmark(self, key):
if key in self:
dict.__delitem__(self, key)
def clear(self):
dict.clear(self)
self._list = []
@property
def list(self):
return self._list
class ObjectStore(StorableNamedObject):
"""
Base Class for storing complex objects in a netCDF4 file. It holds a
reference to the store file.`
Attributes
----------
content_class : :obj:`openpathsampling.netcdfplus.base.StorableObject`
a reference to the class type to be stored using this Storage. Must be
subclassed from :obj:`openpathsampling.netcdfplus.base.StorableObject`
json : string
if already computed a JSON Serialized string of the object
cache : :py:class:`openpathsampling.netcdfplus.cache.Cache`
a dictionary that holds references to all stored elements by index
or string for named objects. This is only used for cached access
if caching is not `False`. Must be of type
:obj:`openpathsampling.netcdfplus.base.StorableObject` or subclassed.
"""
_restore_non_initial_attr = False
allowed_types = [
'int', 'float', 'long', 'str', 'bool',
'numpy.float32', 'numpy.float64',
'numpy.int8', 'numpy.inf16', 'numpy.int32', 'numpy.int64',
'numpy.uint8', 'numpy.uinf16', 'numpy.uint32', 'numpy.uint64',
'index', 'length', 'uuid'
]
default_store_chunk_size = 256
_log_debug = False
class DictDelegator(object):
def __init__(self, store, dct):
self.prefix = store.prefix + '_'
self.dct = dct
def __getitem__(self, item):
return self.dct[self.prefix + item]
def __contains__(self, item):
return (self.prefix + item) in self.dct
def prefix_delegate(self, dct):
return ObjectStore.DictDelegator(self, dct)
default_cache = 10000
def __init__(self, content_class, json=True, nestable=False):
"""
Parameters
----------
content_class
json : bool or str `json` or `jsonobj`
if `False` the store will not create a json variable for
serialization if `True` the store will use the json pickling to
store objects and a single storable object will be serialized and
not referenced. If a string is given the string is taken as the
variable type of the json variable. Here only two values are
allowed: `jsonobj` (equivalent to `True`) or `json` which will
also reference directly given storable objects.
nestable : bool
if `True` this marks the content_class to be saved as nested dict
objects and not a pointing to saved objects. So the saved complex
object is only stored once and not split into several objects that
are referenced by each other in a tree-like fashion
Notes
-----
Usually you want caching, but limited. Recommended is to use an LRUCache
with a reasonable maximum number of objects that depends on the typical
number of objects to cache and their size
The class that takes care of storing data in a file is called a
`Storage`, so the netCDF+ subclassed `Storage` is a storage.
The classes that know how to load and save an object from the storage
are called `Store`, like ObjectStore, SampleStore, etc...
The difference between `json` and `jsonobj` is subtle. Consider
storing a complex object. Then there are two ways to do that.
1. `json`: Store a reference to the object (provided) it is stored and
2. `jsonobj`: serialize the object and only use references for contained
objects. All inner objects will always be stored using references.
The only exception is using nestable. Consider objects that contain
references to objects of the same type, like e.g. operations in an
equation (2*3 + 3). Each operation represents a value but each
operation needs values to operate on. To save such an object you have
again two options:
1. `nestable=False`. Store all single objects and always reference
the contained objects. For an equation that would mean to store several
objects `op1 = plus(op2, 3), op2 = times(2, 3)`. Since this is correct
though not intuitive you can also use
2. `nestable=True`. Store all the serialized objects nested into one
object (string). For our example this corresponds to
`plus(times(2,3), 3)`.
"""
super(ObjectStore, self).__init__()
self._storage = None
self.content_class = content_class
self.prefix = None
self.cache = NoCache()
self._free = set()
self._cached_all = False
self.nestable = nestable
self._created = False
self.attribute_list = {}
self.cv = {}
# This will not be stored since its information is contained in the
# dimension names
self._dimension_prefix_store = None
self.variables = dict()
self.vars = dict()
self.units = dict()
self.index = None
self.proxy_index = WeakValueDictionary()
if json in [True, False, 'json', 'jsonobj']:
self.json = json
else:
raise ValueError(
'Valid settings for json are only True, False, `json` or '
'`jsonobj`.')
if self.content_class is not None \
and not issubclass(self.content_class, StorableObject):
raise ValueError(
'Content class "%s" must be subclassed from StorableObject.' %
self.content_class.__name__)
self.fallback_store = None
def is_created(self):
return self._created
def to_dict(self):
return {
'content_class': self.content_class,
'json': self.json,
'nestable': self.nestable
}
def register_fallback(self, store):
self.fallback_store = store
def register(self, storage, prefix):
"""
Associate the object store to a specific storage with a given prefix
Parameters
----------
storage : :class:`openpathsampling.netcdfplus.NetCDFPlus`
the storage to be associated with
prefix : str
the name under which
"""
self._storage = storage
self.prefix = prefix
self.variables = self.prefix_delegate(self.storage.variables)
self.units = self.prefix_delegate(self.storage.units)
self.vars = self.prefix_delegate(self.storage.vars)
self.index = self.create_uuid_index()
def create_uuid_index(self):
return HashedList()
def restore(self):
self.load_indices()
def load_indices(self):
self.index.clear()
self.index.extend(self.vars['uuid'][:])
@property
def storage(self):
"""Return the associated storage object
Returns
-------
:class:`openpathsampling.netcdfplus.NetCDFPlus`
the referenced storage object
"""
if self._storage is None:
raise RuntimeError(
'A storage needs to be added to this store to be used! '
'Use .register() to do so.')
return self._storage
def __str__(self):
return repr(self)
def __repr__(self):
return 'store.%s[%s] : %s' % (
self.prefix,
self.content_class.__name__ if self.content_class is not None else
'None/ANY',
str(len(self)) + ' object(s)' if self._created else
'(not created)'
)
@property
def simplifier(self):
"""
Return the simplifier instance used to create JSON serialization
Returns
-------
:class:`openpathsampling.netcdfplus.dictify.StorableObjectJSON`
the simplifier object used in the associated storage
"""
return self.storage.simplifier
def set_caching(self, caching):
"""
Set the caching mode for this store
Parameters
----------
caching : :class:`openpathsampling.netcdfplus.Cache`
"""
if caching is None:
caching = self.default_cache
if caching is True:
caching = MaxCache()
elif caching is False:
caching = NoCache()
elif type(caching) is int:
caching = WeakLRUCache(caching)
if isinstance(caching, Cache):
self.cache = caching.transfer(self.cache)
def idx(self, obj):
"""
Return the index in this store for a given object
Parameters
----------
obj : :class:`openpathsampling.netcdfplus.base.StorableObject`
the object that can be stored in this store for which its index is
to be returned
Returns
-------
int or `None`
The integer index of the given object or `None` if it is not
stored yet
"""
return self.index[obj.__uuid__]
def __iter__(self):
"""
Add iteration over all elements in the storage
"""
# we want to iterator in the order object were saved!
for uuid in self.index._list:
yield self.load(uuid)
def __len__(self):
"""
Return the number of stored objects
Returns
-------
int
number of stored objects
"""
return len(self.storage.dimensions[self.prefix])
def write(self, variable, idx, obj, attribute=None):
if attribute is None:
attribute = variable
var = self.vars[variable]
val = getattr(obj, attribute)
var[int(idx)] = val
if var.var_type.startswith('lazy'):
proxy = var.store.proxy(val)
if isinstance(obj, LoaderProxy):
# for a loader proxy apply it to the real object
setattr(obj.__subject__, attribute, proxy)
else:
setattr(obj, attribute, proxy)
def proxy(self, item):
"""
Return a proxy of a object for this store
Parameters
----------
item : :py:class:`openpathsampling.netcdfplus.base.StorableObject`
or int The item or index that points to an object in this store
and to which a proxy is requested.
Returns
-------
"""
if item is None:
return None
try:
idx = item.__uuid__
except AttributeError:
idx = item
# tt = type(item)
# if tt is int:
# idx = self.vars['uuid'][item]
# elif tt is long:
# idx = item
# elif tt in [str, unicode]:
# if item[0] == '-':
# return None
# idx = int(UUID(item))
# else:
#
return LoaderProxy.new(self, idx)
def __contains__(self, item):
if item.__uuid__ in self.index:
return True
if self.fallback_store is not None and item in self.fallback_store:
return True
if self.storage.fallback is not None and item in self.storage.fallback:
return True
return False
def __getitem__(self, item):
"""
Enable numpy style selection of object in the store
"""
try:
if isinstance(item, (long, int)):
if item < 0:
item += len(self)
return self.load(item)
elif type(item) is str:
return self.load(item)
elif type(item) is slice:
return [self.load(idx)
for idx in range(*item.indices(len(self)))]
elif type(item) is list:
return [self.load(idx) for idx in item]
elif item is Ellipsis:
return iter(self)
except KeyError:
return None
def get(self, item):
try:
return self[item]
except KeyError:
return None
def _load(self, idx):
obj = self.vars['json'][idx]
return obj
def clear_cache(self):
"""Clear the cache and force reloading"""
self.cache.clear()
self._cached_all = False
def cache_all(self):
"""Load all samples as fast as possible into the cache"""
if not self._cached_all:
idxs = range(len(self))
jsons = self.variables['json'][:]
[self.add_single_to_cache(i, j) for i, j in zip(
idxs,
jsons)]
self._cached_all = True
def _save(self, obj, idx):
self.vars['json'][idx] = obj
@property
def last(self):
"""
Returns the last generated trajectory. Useful to continue a run.
Returns
-------
:py:class:`openpathsampling.netcdfplus.base.StorableObject`
the last stored object in this store
"""
return self.load(len(self) - 1)
@property
def first(self):
"""
Returns the first stored object.
Returns
-------
:py:class:`openpathsampling.netcdfplus.base.StorableObject`
the actual first stored object
"""
return self.load(0)
def free(self):
"""
Return the number of the next free index for this store
Returns
-------
index : int
the number of the next free index in the storage.
Used to store a new object.
"""
# start at first free position in the storage
idx = len(self)
# # and skip also reserved potential stored ones
# while idx in self._free:
# idx += 1
return idx
# def reserve_idx(self, idx):
# """
# Locks an idx as used
#
# Parameters
# ----------
# idx : int
# the integer index to be reserved
# """
# self._free.add(idx)
#
# def release_idx(self, idx):
# """
# Releases a lock on an idx
#
# Parameters
# ----------
# idx : int
# the integer index to be released
# """
# self._free.discard(idx)
def initialize(self):
"""
Initialize the associated storage to allow for object storage. Mainly
creates an index dimension with the name of the object.
"""
# define dimensions used for the specific object
self.storage.create_dimension(self.prefix, 0)
if self.json:
jsontype = 'jsonobj'
if type(self.json) is str:
jsontype = self.json
self.create_variable(
"json",
jsontype,
description='A json serialized version of the object',
chunksizes=tuple([65536])
)
# TODO: Change to 16byte string
self.create_variable(
"uuid", 'uuid',
description='The uuid of the object',
chunksizes=tuple([65536])
)
self._created = True
# ==========================================================================
# INITIALISATION UTILITY FUNCTIONS
# ==========================================================================
def create_variable(
self,
var_name,
var_type,
dimensions=None,
chunksizes=None,
description=None,
simtk_unit=None,
maskable=False
):
"""
Create a new variable in the netCDF storage. This is just a helper
function to structure the code better.
Parameters
==========
var_name : str
The var_name of the variable to be created
var_type : str
The string representing the type of the data stored in the
variable. Allowed are strings of native python types in which
case the variables will be treated as python or a string of the
form 'numpy.type' which will refer to the numpy data types.
Numpy is preferred sinec the api to netCDF uses numpy and thus
it is faster. Possible input strings are
`int`, `float`, `long`, `str`, `numpy.float32`, `numpy.float64`,
`numpy.int8`, `numpy.int16`, `numpy.int32`, `numpy.int64`, `json`,
`obj.<store>`, `lazyobj.<store>`
dimensions : str or tuple of str
A tuple representing the dimensions used for the netcdf variable.
If not specified then the default dimension of the storage is used.
If the last dimension is `'...'` then it is assumed that the
objects are of variable length. In netCDF this is usually
referred to as a VLType. We will treat is just as another
dimension, but it can only be the last dimension.
description : str
A string describing the variable in a readable form.
chunksizes : tuple of int
A tuple of ints per number of dimensions. This specifies in what
block sizes a variable is stored. Usually for object related stuff
we want to store everything of one object at once so this is often
(1, ..., ...)
simtk_unit : str
A string representing the units used for this variable. Can be
used with all var_types although it makes sense only for numeric
ones.
maskable : bool, default: False
If set to `True` the values in this variable can only partially
exist and if they have not yet been written they are filled with
a fill_value which is treated as a non-set variable. The created
variable will interpret this values as `None` when returned
"""
# add the main dimension to the var_type
if type(dimensions) is str:
dimensions = [dimensions]
if type(dimensions) is int:
if dimensions == 1:
dimensions = ['scalar']
else:
dimensions = [dimensions]
if dimensions is None:
dimensions = (self.prefix,)
else:
dimensions = tuple([self.prefix] + list(dimensions))
store_chunk_size = ObjectStore.default_store_chunk_size
if chunksizes is None and len(dimensions) == 1:
chunksizes = (store_chunk_size, )
elif chunksizes is not None and dimensions[-1] == '...' \
and len(dimensions) == len(chunksizes) + 2:
chunksizes = tuple([store_chunk_size] + list(chunksizes))
elif chunksizes is not None and dimensions[-1] != '...' \
and len(dimensions) == len(chunksizes) + 1:
chunksizes = tuple([store_chunk_size] + list(chunksizes))
if self.dimension_prefix:
dimensions = tuple(
[dimensions[0]] +
[
self.dimension_prefix + dim if type(dim) is str and
dim != '...' else dim for dim in dimensions[1:]
]
)
chunksizes = tuple(
[chunksizes[0]] +
[
self.dimension_prefix + chs
if type(chs) is str else chs for chs in chunksizes[1:]
]
)
self.storage.create_variable(
self.prefix + '_' + var_name,
var_type=var_type,
dimensions=dimensions,
chunksizes=chunksizes,
description=description,
simtk_unit=simtk_unit,
maskable=maskable
)
@property
def dimension_prefix(self):
if self._dimension_prefix_store is not None:
return self._dimension_prefix_store.prefix
else:
return ''
def set_dimension_prefix_store(self, prefix_store=None):
"""
Select which store or none should be used to prefix dimension names
If you want to create multiple instances of a store and these should
have differently long dimensions you need unique names for these. This
way you can select a store and the dimensions will be prefixed with the
stores prefix
Parameters
----------
prefix_store : :obj:`openpathsampling.netcdf.ObjectStore`
the store from which to use its prefix / name to prefix
dimension names
"""
self._dimension_prefix_store = prefix_store
# ==========================================================================
# LOAD/SAVE DECORATORS FOR CACHE HANDLING
# ==========================================================================
def load(self, idx):
"""
Returns an object from the storage.
Parameters
----------
idx : int
the integer index of the object to be loaded
Returns
-------
:py:class:`openpathsampling.netcdfplus.base.StorableObject`
the loaded object
"""
if isinstance(idx, (long, int)):
if idx < 1000000000:
n_idx = idx
elif idx in self.index:
n_idx = self.index[idx]
else:
if self.fallback_store is not None:
return self.fallback_store.load(idx)
elif self.storage.fallback is not None:
return self.storage.fallback.stores[self.name].load(idx)
else:
raise ValueError(
'str %s not found in storage or fallback' % idx)
else:
raise ValueError(
'indices need to be a 32-byte UUID in long format or a simple int ')
if n_idx < 0:
return None
# if it is in the cache, return it
try:
obj = self.cache[n_idx]
if self._log_debug:
logger.debug(
'Found IDX #' + str(idx) + ' in cache. Not loading!')
return obj
except KeyError:
pass
if self._log_debug:
logger.debug(
'Calling load object of type `%s` @ IDX #%d' %
(self.content_class.__name__, n_idx))
if n_idx >= len(self):
logger.warning(
'Trying to load from IDX #%d > number of object %d' %
(n_idx, len(self)))
return None
elif n_idx < 0:
logger.warning((
'Trying to load negative IDX #%d < 0. '
'This should never happen!!!') % n_idx)
raise RuntimeError(
'Loading of negative int should result in no object. '
'This should never happen!')
else:
obj = self._load(n_idx)
if self._log_debug:
logger.debug(
'Calling load object of type %s and IDX # %d ... DONE' %
(self.content_class.__name__, n_idx))
if obj is not None:
self._get_id(n_idx, obj)
# update cache there might have been a change due to naming
self.cache[n_idx] = obj
if self._log_debug:
logger.debug(
'Try loading UUID object of type %s and IDX # %d ... DONE' %
(self.content_class.__name__, n_idx))
if self._log_debug:
logger.debug(
'Finished load object of type %s and IDX # %d ... DONE' %
(self.content_class.__name__, n_idx))
return obj
@staticmethod
def reference(obj):
return obj.__uuid__
def remember(self, obj):
"""
Tell a store that an obj should be assumed as stored
This is useful, if you do not want to store an object in a specific
store. Especially to make sure attributes are not stored multiple times
Parameters
----------
obj : :py:class:`openpathsampling.netcdfplus.base.StorableObject`
the object to be fake stored
"""
self.index.mark(obj.__uuid__)
def forget(self, obj):
"""
This will revert remembering non-stored objects.
Stored objects cannot be forgotten
Parameters
----------
obj : :py:class:`openpathsampling.netcdfplus.base.StorableObject`
the object to be forgotten
"""
self.index.unmark(obj.__uuid__)
def save(self, obj, idx=None):
"""
Saves an object to the storage.
Parameters
----------
obj : :class:`openpathsampling.netcdfplus.base.StorableObject`
the object to be stored
idx : int or string or `None`
the index to be used for storing. This is highly discouraged since
it changes an immutable object (at least in the storage). It is
better to store also the new object and just ignore the
previously stored one.
"""
uuid = obj.__uuid__
if uuid in self.index:
# has been saved so quit and do nothing
if not self.index[uuid] == -1:
return self.reference(obj)
# numbers other than -1 are reserved for other things
if isinstance(obj, LoaderProxy):
if obj._store is self:
# is a proxy of a saved object so do nothing
return uuid
else:
# it is stored but not in this store so we try storing the
# full attribute which might be still in cache or memory
# if that is not the case it will be stored again. This can
# happen when you load from one store save to another. And load
# again after some time while the cache has been changed and try
# to save again the loaded object. We will not explicitly store
# a table that matches objects between different storages.
return self.save(obj.__subject__)
if self.fallback_store is not None and \
self.storage.exclude_from_fallback:
if obj in self.fallback_store:
return self.reference(obj)
elif self.storage.fallback is not None and \
self.storage.exclude_from_fallback:
if obj in self.storage.fallback:
return self.reference(obj)
if not isinstance(obj, self.content_class):
raise ValueError((
'This store can only store object of base type "%s". Given '
'obj is of type "%s". You might need to use another store.')
% (self.content_class, obj.__class__.__name__)
)
# n_idx = self.free()
n_idx = len(self.index)
# mark as saved so circular dependencies will not cause infinite loops
self.index.append(uuid)
# make sure in nested saving that an IDX is not used twice!
# self.reserve_idx(n_idx)
logger.debug('Saving ' + str(type(obj)) + ' using IDX #' + str(n_idx))
try:
self._save(obj, n_idx)
self._auto_complete(obj, n_idx)
self.cache[n_idx] = obj
except:
# in case we did not succeed remove the mark as being saved
del self.index[uuid]
raise
# self.release_idx(n_idx)
self._set_id(n_idx, obj)
return self.reference(obj)
def __setitem__(self, key, value):
"""
Enable saving using __setitem__
"""
self.save(value, key)
# def load_single(self, idx):
# return self._load(idx)
#
# def load_range(self, start, end):
# return map(self._load, range(start, end))
def add_single_to_cache(self, idx, json):
"""
Add a single object to cache by json
Parameters
----------
idx : int
the index where the object was stored
json : str
json string the represents a serialized version of the stored object
"""
if idx not in self.cache:
obj = self.simplifier.from_json(json)
self._get_id(idx, obj)
self.cache[idx] = obj
self.index[obj.__uuid__] = idx
return obj
# def uuid(self, uuid):
# """
# Return last object with a given uuid
#
# Parameters
# ----------
# uuid : str
# the uuid to be searched for
#
# Returns
# -------
# :py:class:`openpathsampling.netcdfplus.base.StorableObject`
# the last object with a given uuid. This is to mimic an immutable
# object. Once you (re-)save with the same uuid you replace the old
# one and hence you leed to load the last stored one.
#
# """
# return self.load(uuid)
def _set_id(self, idx, obj):
self.vars['uuid'][idx] = obj.__uuid__
def _get_id(self, idx, obj):
obj.__uuid__ = self.index.index(int(idx))
# CV SUPPORT
def _auto_complete(self, obj, pos):
for attribute, attribute_store in self.attribute_list.items():
if not attribute_store.allow_incomplete:
# value = attribute._cache_dict._get(obj)
# if value is None:
# # not in cache so compute it if possible
# if attribute._eval_dict:
# value = attribute._eval_dict([obj])[0]
value = attribute(obj)
if value is not None:
if attribute_store.allow_incomplete:
attribute_store[obj] = value
else:
n_idx = pos
attribute_store.vars['value'][n_idx] = value
attribute_store.cache[n_idx] = value
def complete_attribute(self, attribute):
"""
Compute all missing values of a CV and store them
Parameters
----------
attribute : :obj:`openpathsampling.netcdfplus.PseudoAttribute`
"""
if attribute not in self.attribute_list:
return
attribute_store = self.attribute_list[attribute]
key_store = self.storage.attributes.key_store(attribute)
if attribute_store.allow_incomplete:
# for complete this does not make sense
# TODO: Make better looping over this to not have
# to load all the indices at once
# can be problematic for 10M+ stored attributes
indices = self.vars['uuid'][:]
for pos, idx in enumerate(indices):
if pos not in attribute_store.index:
# this value is not stored to go ahead
proxy = LoaderProxy.new(key_store, idx)
# # get from cache first, this is fastest
# value = attribute._cache_dict._get(proxy)
#
# if value is None:
# # not in cache so compute it if possible
# if attribute._eval_dict:
# value = attribute._eval_dict([proxy])[0]
# else:
# value = None
value = attribute(proxy)
if value is not None:
n_idx = attribute_store.free()
attribute_store.vars['value'][n_idx] = value
attribute_store.vars['index'][n_idx] = pos
attribute_store.index[pos] = n_idx
attribute_store.cache[n_idx] = value
def sync_attribute(self, attribute):
"""
Store all cached values of a CV in the diskcache
Parameters
----------
attribute : :obj:`openpathsampling.CollectiveVariable`
"""
if attribute not in self.attribute_list:
return
attribute_store = self.attribute_list[attribute]
# for complete this does not make sense
if attribute_store.allow_incomplete:
# loop all objects in the fast CV cache
for obj, value in iteritems(attribute._cache_dict.cache):
if value is not None:
pos = self.pos(obj)
# if the attribute is not saved, there is nothing we can do
if pos is None:
continue
# if the value is stored, skip it
if pos in attribute_store.index:
continue
n_idx = attribute_store.free()
attribute_store.vars['value'][n_idx] = value
attribute_store.vars['index'][n_idx] = pos
attribute_store.index[pos] = n_idx
attribute_store.cache[n_idx] = value
@staticmethod
def _get_attribute_name(attribute_idx):
return 'attribute' + str(attribute_idx)
def pos(self, obj):
return self.index.get(obj.__uuid__)
def pos_uuid(self, uid):
return self.index.get(uid)
def add_attribute(
self, store_cls, attribute, template,
allow_incomplete=None, chunksize=None):
"""
Parameters
----------
store_cls : :obj:`openpathsampling.netcdfplus.ValueStore`
attribute : :obj:`openpathsampling.CollectiveVariable`
template : :obj:`openpathsampling.engines.Baseattribute`
chunksize : int
allow_incomplete : bool
Returns
-------
:obj:`openpathsampling.netcdfplus.ObjectStore`
int
"""
if attribute in self.attribute_list:
return self.attribute_list[attribute]
key_store = self.storage.attributes.key_store(attribute)
if allow_incomplete is None:
allow_incomplete = attribute.diskcache_allow_incomplete
if chunksize is None:
chunksize = attribute.diskcache_chunksize
if template is None:
template = attribute.diskcache_template
if not allow_incomplete:
# in complete mode we force chunk size one to match it to attributes
# chunksize = self.default_store_chunk_size
chunksize = self.variables['uuid'].chunking()[0]
# determine value type and shape
params = self.storage.get_value_parameters(attribute(template))
shape = params['dimensions']
if shape is None:
chunksizes = None
else:
chunksizes = tuple(params['dimensions'])
# attribute_idx = self.storage.attributes.index[attribute.__uuid__]
value_store = store_cls(
attribute.key_class,
allow_incomplete=allow_incomplete,
chunksize=chunksize
)
store_name = self.name + '_' + attribute.name
self.storage.create_store(store_name, value_store, False)
if value_store.allow_incomplete:
# we are not using the .initialize function here since we
# only have one variable and only here know its shape
self.storage.create_dimension(value_store.prefix, 0)
if shape is not None:
shape = tuple(list(shape))
chunksizes = tuple([chunksize] + list(chunksizes))
else:
shape = tuple()
chunksizes = tuple([chunksize])
# create the variable
value_store.create_variable(
'value',
var_type=params['var_type'],
dimensions=shape,
chunksizes=chunksizes,
simtk_unit=params['simtk_unit'],
)
value_store.create_variable('index', 'index')
else:
# todo: seems to be a bug in NetCDF4. Need to set chunksize to 1
# see Issue https://github.com/Unidata/netcdf4-python/issues/566
# I assume this will still work as expected.
# chunksize = self.default_store_chunk_size
# chunksize = self.variables['uuid'].chunking()[0]
chunksize = 1
if shape is not None:
shape = tuple([self.name] + list(shape))
chunksizes = tuple([chunksize] + list(chunksizes))
else:
shape = tuple([self.name])
chunksizes = tuple([chunksize])
# create the variable
value_store.storage.create_variable(
store_name + '_value',
var_type=params['var_type'],
dimensions=shape,
chunksizes=chunksizes,
simtk_unit=params['simtk_unit'],
)
value_store.initialize()
# the value
self.attribute_list[attribute] = value_store
attribute_idx = self.storage.attributes.index[attribute.__uuid__]
self.storage.attributes.vars['cache'][attribute_idx] = value_store
# use the cache and function of the CV to fill the store when it is made
if not allow_incomplete:
indices = self.vars['uuid'][:]
for pos, idx in enumerate(indices):
proxy = LoaderProxy.new(key_store, idx)
# value = attribute._cache_dict._get(proxy)
#
# if value is None:
# # not in cache so compute it if possible
# if attribute._eval_dict:
# value = attribute._eval_dict([proxy])[0]
# else:
# value = None
value = attribute(proxy)
if value is not None:
value_store.vars['value'][pos] = value
value_store.cache[pos] = value
attribute.set_cache_store(value_store)
return value_store
|
11489885
|
from os import environ
MINECRAFT_POCKET_EDITION = 0
MINECRAFT_PI = 1
MINECRAFT_DESKTOP = 2
minecraftType = MINECRAFT_POCKET_EDITION
try:
minecraftType = int(environ['MINECRAFT_TYPE'])
except:
pass
isPE = ( minecraftType != MINECRAFT_DESKTOP )
|
11489886
|
import numpy as np
from lifelong_rl.optimizers.random_shooting.rs_optimizer import RSOptimizer
class CEMOptimizer(RSOptimizer):
def __init__(
self,
sol_dim,
num_iters,
population_size,
elites_frac,
cost_function,
upper_bound=1,
lower_bound=-1,
epsilon=1e-3,
polyak=0.2,
):
super().__init__(
sol_dim,
num_iters,
population_size,
cost_function,
upper_bound=upper_bound,
lower_bound=lower_bound,
epsilon=epsilon,
polyak=polyak,
)
self.elites_frac = max(min(elites_frac, 1), .01)
def update_sol(self, costs, samples, noise, init_mean, init_var):
elites = samples[np.argsort(costs)][:int(self.elites_frac * self.population_size)]
updated_mean = np.mean(elites, axis=0)
updated_var = np.var(elites, axis=0)
return updated_mean, updated_var
|
11489903
|
import enolib
from enolib import TerminalReporter
from tests.util import snapshot
input = '''
> comment
# section
field: value
list:
- item
- item
> comment
- item
## subsection
fieldset:
entry = value
> comment
entry = value
'''.strip()
def test_terminal_reporter_produces_colored_terminal_output():
document = enolib.parse(input, reporter=TerminalReporter)
snippet = document._context.reporter(document._context).report_element(document._context.document['elements'][0]).snippet()
# Uncomment this to inspect the snippet correctness in a terminal for review
# print(snippet)
assert snippet == snapshot(snippet, 'tests/reporters/snapshots/terminal_reporter_produces_colored_terminal_output.snap.sh')
|
11489957
|
import unittest
import numpy as np
from tf2rl.misc.huber_loss import huber_loss
class TestHuberLoss(unittest.TestCase):
def test_huber_loss(self):
"""Test of huber loss
huber_loss() allows two types of inputs:
- `y_target` and `y_pred`
- `diff`
"""
# [1, 1] -> [0.5, 0.5]
loss = huber_loss(np.array([1., 1.]), delta=1.)
np.testing.assert_array_equal(
np.array([0.5, 0.5]),
loss.numpy())
# [0,0] and [10, 10] -> [9.5, 9.5]
loss = huber_loss(np.array([10., 10.]), delta=1.)
np.testing.assert_array_equal(
np.array([9.5, 9.5]),
loss.numpy())
# [0,0] and [-1, -2] -> [0.5, 1.5]
loss = huber_loss(np.array([-1., -2.]), delta=1.)
np.testing.assert_array_equal(
np.array([0.5, 1.5]),
loss.numpy())
if __name__ == '__main__':
unittest.main()
|
11489978
|
from enum import Enum
from typing import Dict, Type, Tuple
from nonebot.typing import overrides
from nonebot.utils import escape_tag
from nonebot.adapters import Event as BaseEvent
from .message import Message
from .api import Message as GuildMessage
from .api import User, Guild, Member, Channel
from .api import MessageAudited, MessageReaction
class EventType(str, Enum):
# Init Event
READY = "READY"
RESUMED = "RESUMED"
# GUILDS
GUILD_CREATE = "GUILD_CREATE"
GUILD_UPDATE = "GUILD_UPDATE"
GUILD_DELETE = "GUILD_DELETE"
CHANNEL_CREATE = "CHANNEL_CREATE"
CHANNEL_UPDATE = "CHANNEL_UPDATE"
CHANNEL_DELETE = "CHANNEL_DELETE"
# GUILD_MEMBERS
GUILD_MEMBER_ADD = "GUILD_MEMBER_ADD"
GUILD_MEMBER_UPDATE = "GUILD_MEMBER_UPDATE"
GUILD_MEMBER_REMOVE = "GUILD_MEMBER_REMOVE"
# GUILD_MESSAGES
MESSAGE_CREATE = "MESSAGE_CREATE"
# GUILD_MESSAGE_REACTIONS
MESSAGE_REACTION_ADD = "MESSAGE_REACTION_ADD"
MESSAGE_REACTION_REMOVE = "MESSAGE_REACTION_REMOVE"
# DIRECT_MESSAGE
DIRECT_MESSAGE_CREATE = "DIRECT_MESSAGE_CREATE"
# MESSAGE_AUDIT
MESSAGE_AUDIT_PASS = "MESSAGE_AUDIT_PASS"
MESSAGE_AUDIT_REJECT = "MESSAGE_AUDIT_REJECT"
# FORUM_EVENT
THREAD_CREATE = "THREAD_CREATE"
THREAD_UPDATE = "THREAD_UPDATE"
THREAD_DELETE = "THREAD_DELETE"
POST_CREATE = "POST_CREATE"
POST_DELETE = "POST_DELETE"
REPLY_CREATE = "REPLY_CREATE"
REPLY_DELETE = "REPLY_DELETE"
# AUDIO_ACTION
AUDIO_START = "AUDIO_START"
AUDIO_FINISH = "AUDIO_FINISH"
AUDIO_ON_MIC = "AUDIO_ON_MIC"
AUDIO_OFF_MIC = "AUDIO_OFF_MIC"
# AT_MESSAGES
AT_MESSAGE_CREATE = "AT_MESSAGE_CREATE"
class Event(BaseEvent):
__type__: EventType
@overrides(BaseEvent)
def get_event_name(self) -> str:
return self.__type__
@overrides(BaseEvent)
def get_event_description(self) -> str:
return escape_tag(str(self.dict()))
@overrides(BaseEvent)
def get_message(self) -> Message:
raise ValueError("Event has no message!")
@overrides(BaseEvent)
def get_user_id(self) -> str:
raise ValueError("Event has no context!")
@overrides(BaseEvent)
def get_session_id(self) -> str:
raise ValueError("Event has no context!")
@overrides(BaseEvent)
def is_tome(self) -> bool:
return False
# Meta Event
class MetaEvent(Event):
@overrides(BaseEvent)
def get_type(self) -> str:
return "meta_event"
class ReadyEvent(MetaEvent):
__type__ = EventType.READY
version: int
session_id: str
user: User
shard: Tuple[int, int]
class ResumedEvent(MetaEvent):
__type__ = EventType.RESUMED
# Guild Event
class GuildEvent(Event, Guild):
op_user_id: str
@overrides(BaseEvent)
def get_type(self) -> str:
return "notice"
class GuildCreateEvent(GuildEvent):
__type__ = EventType.GUILD_CREATE
class GuildUpdateEvent(GuildEvent):
__type__ = EventType.GUILD_UPDATE
class GuildDeleteEvent(GuildEvent):
__type__ = EventType.GUILD_DELETE
# Channel Event
class ChannelEvent(Event, Channel):
op_user_id: str
@overrides(BaseEvent)
def get_type(self) -> str:
return "notice"
class ChannelCreateEvent(ChannelEvent):
__type__ = EventType.CHANNEL_CREATE
class ChannelUpdateEvent(ChannelEvent):
__type__ = EventType.CHANNEL_UPDATE
class ChannelDeleteEvent(ChannelEvent):
__type__ = EventType.CHANNEL_DELETE
# Guild Member Event
class GuildMemberEvent(Event, Member):
guild_id: str
op_user_id: str
@overrides(BaseEvent)
def get_type(self) -> str:
return "notice"
@overrides(Event)
def get_user_id(self) -> str:
return str(self.user.id) # type: ignore
@overrides(Event)
def get_session_id(self) -> str:
return str(self.user.id) # type: ignore
class GuildMemberAddEvent(GuildMemberEvent):
__type__ = EventType.GUILD_MEMBER_ADD
class GuildMemberUpdateEvent(GuildMemberEvent):
__type__ = EventType.GUILD_MEMBER_UPDATE
class GuildMemberRemoveEvent(GuildMemberEvent):
__type__ = EventType.GUILD_MEMBER_REMOVE
# Message Event
class MessageEvent(Event, GuildMessage):
to_me: bool = False
@overrides(BaseEvent)
def get_type(self) -> str:
return "message"
@overrides(Event)
def get_user_id(self) -> str:
return str(self.author.id) # type: ignore
@overrides(Event)
def get_session_id(self) -> str:
return str(self.author.id) # type: ignore
@overrides(Event)
def get_message(self) -> Message:
if not hasattr(self, "_message"):
setattr(self, "_message", Message.from_guild_message(self))
return getattr(self, "_message")
@overrides(Event)
def is_tome(self) -> bool:
return self.to_me
class MessageCreateEvent(MessageEvent):
__type__ = EventType.MESSAGE_CREATE
class AtMessageCreateEvent(MessageEvent):
__type__ = EventType.AT_MESSAGE_CREATE
to_me: bool = True
class DirectMessageCreateEvent(MessageEvent):
__type__ = EventType.DIRECT_MESSAGE_CREATE
to_me: bool = True
# Message Audit Event
class MessageAuditEvent(Event, MessageAudited):
@overrides(BaseEvent)
def get_type(self) -> str:
return "notice"
class MessageAuditPassEvent(MessageAuditEvent):
__type__ = EventType.MESSAGE_AUDIT_PASS
class MessageAuditRejectEvent(MessageAuditEvent):
__type__ = EventType.MESSAGE_AUDIT_REJECT
# Message Reaction Event
class MessageReactionEvent(Event, MessageReaction):
@overrides(BaseEvent)
def get_type(self) -> str:
return "notice"
@overrides(Event)
def get_user_id(self) -> str:
return str(self.user_id)
@overrides(Event)
def get_session_id(self) -> str:
return str(self.user_id)
class MessageReactionAddEvent(MessageReactionEvent):
__type__ = EventType.MESSAGE_REACTION_ADD
class MessageReactionRemoveEvent(MessageReactionEvent):
__type__ = EventType.MESSAGE_REACTION_REMOVE
# TODO: Audio Event
event_classes: Dict[str, Type[Event]] = {
EventType.READY.value: ReadyEvent,
EventType.RESUMED.value: ResumedEvent,
EventType.GUILD_CREATE.value: GuildCreateEvent,
EventType.GUILD_DELETE.value: GuildDeleteEvent,
EventType.GUILD_UPDATE.value: GuildUpdateEvent,
EventType.CHANNEL_CREATE.value: ChannelCreateEvent,
EventType.CHANNEL_DELETE.value: ChannelDeleteEvent,
EventType.CHANNEL_UPDATE.value: ChannelUpdateEvent,
EventType.GUILD_MEMBER_ADD.value: GuildMemberAddEvent,
EventType.GUILD_MEMBER_UPDATE.value: GuildMemberUpdateEvent,
EventType.GUILD_MEMBER_REMOVE.value: GuildMemberRemoveEvent,
EventType.MESSAGE_CREATE.value: MessageCreateEvent,
EventType.AT_MESSAGE_CREATE.value: AtMessageCreateEvent,
EventType.DIRECT_MESSAGE_CREATE.value: DirectMessageCreateEvent,
EventType.MESSAGE_AUDIT_PASS.value: MessageAuditPassEvent,
EventType.MESSAGE_AUDIT_REJECT.value: MessageAuditRejectEvent,
EventType.MESSAGE_REACTION_ADD.value: MessageReactionAddEvent,
EventType.MESSAGE_REACTION_REMOVE.value: MessageReactionRemoveEvent,
}
__all__ = [
"EventType",
"Event",
"GuildEvent",
"GuildCreateEvent",
"GuildUpdateEvent",
"GuildDeleteEvent",
"ChannelEvent",
"ChannelCreateEvent",
"ChannelUpdateEvent",
"ChannelDeleteEvent",
"GuildMemberEvent",
"GuildMemberAddEvent",
"GuildMemberUpdateEvent",
"GuildMemberRemoveEvent",
"MessageEvent",
"MessageCreateEvent",
"AtMessageCreateEvent",
"DirectMessageCreateEvent",
"MessageAuditEvent",
"MessageAuditPassEvent",
"MessageAuditRejectEvent",
"MessageReactionEvent",
"MessageReactionAddEvent",
"MessageReactionRemoveEvent",
]
|
11489982
|
import json
import pytest
import requests
from .fixtures import tornado_server, tornado_app, sample_data1_server, sample_data2_server
from .util import (
assert_error, assert_success, assert_created, assert_deleted, Client
)
def test_malformed(sample_data1_server):
client = sample_data1_server
assert_error(client.post("/events", data="Non-JSON"), 400)
assert_error(
client.create(
"/events",
hostname="example",
user="<EMAIL>",
note="This is a test event"
),
400
)
assert_error(
client.create(
"/events",
hostname="example",
category="blah",
user="<EMAIL>",
note="This is a test event"
),
500
)
assert_error(
client.create(
"/events",
hostname="example",
state="blah",
user="<EMAIL>",
note="This is a test event"
),
500
)
def test_creation(sample_data1_server):
client = sample_data1_server
assert_success(
client.get("/eventtypes"),
{
"eventTypes": [{"category": "system-reboot",
"description": "This system requires a reboot.",
"restricted": False,
"id": 1,
"state": "required"},
{"category": "system-reboot",
"description": "This system rebooted.",
"restricted": False,
"id": 2,
"state": "completed"},
{"category": "system-maintenance",
"description": "This system requires maintenance.",
"restricted": False,
"id": 3,
"state": "required"},
{"category": "system-maintenance",
"description": "This system is ready for maintenance.",
"restricted": False,
"id": 4,
"state": "ready"},
{"category": "system-maintenance",
"description": "System maintenance completed.",
"restricted": False,
"id": 5,
"state": "completed"},
{"category": "system-shutdown",
"description": "System shutdown required.",
"restricted": False,
"id": 6,
"state": "required"},
{"category": "system-shutdown",
"description": "System shutdown completed.",
"restricted": False,
"id": 7,
"state": "completed"}],
"limit": 10,
"offset": 0,
"totalEventTypes": 7,
}
)
assert_success(
client.get("/events"),
{
"events": [{"eventTypeId": 1,
"hostId": 1,
"id": 1,
"note": "example needs a reboot",
"user": "<EMAIL>"},
{"eventTypeId": 2,
"hostId": 1,
"id": 2,
"note": "example needs a rebooted",
"user": "<EMAIL>"}],
"limit": 10,
"offset": 0,
"totalEvents": 2
},
strip=["timestamp"]
)
assert_created(
client.create(
"/events",
hostname="example",
user="<EMAIL>",
eventTypeId=1,
note="This is a test event"
),
"/api/v1/events/3"
)
assert_success(
client.get("/events/3"),
{
"id": 3,
"hostId": 1,
"note": "This is a test event",
"eventTypeId": 1,
"user": "<EMAIL>"
},
strip=["timestamp"]
)
assert_created(
client.create(
"/events",
hostname="example",
user="<EMAIL>",
category="system-reboot",
state="completed",
note="This is another test event"
),
"/api/v1/events/4"
)
assert_success(
client.get("/events/4"),
{
"id": 4,
"hostId": 1,
"note": "This is another test event",
"eventTypeId": 2,
"user": "<EMAIL>"
},
strip=["timestamp"]
)
def test_update(sample_data1_server):
client = sample_data1_server
assert_created(
client.create(
"/events",
hostname="example",
user="<EMAIL>",
eventTypeId=1,
note="This is a test event"
),
"/api/v1/events/3"
)
assert_success(
client.get("/events/3"),
{
"id": 3,
"hostId": 1,
"note": "This is a test event",
"eventTypeId": 1,
"user": "<EMAIL>"
},
strip=["timestamp"]
)
assert_error(client.put("/events/3", json={"note": "New note"}), 405)
def test_multi_host_events(sample_data1_server):
client = sample_data1_server
result = client.create(
"/events",
hostnames=["example","sample","test"],
user="<EMAIL>",
eventTypeId=1,
note="This is a test event"
)
result_json = result.json()
assert result_json['totalEvents'] == 3
def test__before_after_query(sample_data1_server):
client = sample_data1_server
event2 = client.get("/events/2").json()
assert event2['timestamp'] is not None
assert_created(
client.create(
"/events",
hostname="example",
user="<EMAIL>",
eventTypeId=1,
note="This is a test event"
),
"/api/v1/events/3"
)
new_event = client.get("/events/3").json()
new_timestamp = new_event['timestamp']
result = client.get("/events/?after={}".format(new_timestamp)).json()
assert result['totalEvents'] == 1
result = client.get("/events/?before={}".format(new_timestamp)).json()
assert result['totalEvents'] == 2
def test_after_event_type_query(sample_data2_server):
client = sample_data2_server
assert_success(
client.get("/events"),
{
"limit": 10,
"offset": 0,
"totalEvents": 15
},
strip=["timestamp", "events"]
)
assert_success(
client.get("/events?hostname=example&afterEventType=3"),
{
"limit": 10,
"offset": 0,
"totalEvents": 3
},
strip=["timestamp", "events"]
)
def test_after_event_id_query(sample_data2_server):
"""
Test the afterEventId param to the events endpoint.
"""
client = sample_data2_server
assert_success(
client.get("/events"),
{
"limit": 10,
"offset": 0,
"totalEvents": 15
},
strip=["timestamp", "events"]
)
# There are 8 events with hostname=example in the data set,
# with ID values 1-8. After event Id 2, there should be 7 total events.
assert_success(
client.get("/events?hostname=example&afterEventId=2"),
{
"limit": 10,
"offset": 0,
"totalEvents": 7
},
strip=["timestamp", "events"]
)
# After event Id 3 there should be 6 total events.
assert_success(
client.get("/events?hostname=example&afterEventId=3"),
{
"limit": 10,
"offset": 0,
"totalEvents": 6
},
strip=["timestamp", "events"]
)
# Test with the after argument.
assert_success(
client.get("/events?hostname=example&afterEventId=2&after=1970-01-01"),
{
"limit": 10,
"offset": 0,
"totalEvents": 7
},
strip=["timestamp", "events"]
)
# After event Id 3 there should be 6 total events.
assert_success(
client.get("/events?hostname=example&afterEventId=3"),
{
"limit": 10,
"offset": 0,
"totalEvents": 6
},
strip=["timestamp", "events"]
)
# Test with the after argument.
assert_success(
client.get("/events?hostname=example&afterEventId=2&after=1970-01-01"),
{
"limit": 10,
"offset": 0,
"totalEvents": 7
},
strip=["timestamp", "events"]
)
# Test with the before argument.
assert_success(
client.get("/events?hostname=example&afterEventId=2&before=2050-01-01"),
{
"limit": 10,
"offset": 0,
"totalEvents": 7
},
strip=["timestamp", "events"]
)
# Test with the before and after argument.
assert_success(
client.get("/events?hostname=example&afterEventId=2&after=1970-01-01&before=2050-01-01"),
{
"limit": 10,
"offset": 0,
"totalEvents": 7
},
strip=["timestamp", "events"]
)
# Test with the before and after argument, with no events.
assert_success(
client.get("/events?hostname=example&afterEventId=2&after=1970-01-01&before=1970-02-01"),
{
"limit": 10,
"offset": 0,
"totalEvents": 0
},
strip=["timestamp", "events"]
)
def test_count_events_disabled(sample_data2_server):
client = sample_data2_server
client.tornado_server.tornado_app.my_settings["count_events"] = False
assert_success(
client.get("/events"),
{
"limit": 10,
"offset": 0,
},
strip=["timestamp", "events"]
)
|
11489989
|
import json
from django import forms
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from u2flib_server import u2f
class SecondFactorForm(forms.Form):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.request = kwargs.pop('request')
self.appId = kwargs.pop('appId')
return super(SecondFactorForm, self).__init__(*args, **kwargs)
class KeyResponseForm(SecondFactorForm):
response = forms.CharField()
def __init__(self, *args, **kwargs):
super(KeyResponseForm, self).__init__(*args, **kwargs)
if self.data:
self.sign_request = self.request.session['u2f_sign_request']
else:
self.sign_request = u2f.begin_authentication(self.appId, [
d.to_json() for d in self.user.u2f_keys.all()
])
self.request.session['u2f_sign_request'] = self.sign_request
def validate_second_factor(self):
response = json.loads(self.cleaned_data['response'])
try:
device, login_counter, _ = u2f.complete_authentication(self.sign_request, response)
# TODO: store login_counter and verify it's increasing
device = self.user.u2f_keys.get(key_handle=device['keyHandle'])
device.last_used_at = timezone.now()
device.save()
del self.request.session['u2f_sign_request']
return True
except ValueError:
self.add_error('__all__', 'U2F validation failed -- bad signature.')
return False
class KeyRegistrationForm(SecondFactorForm):
response = forms.CharField()
class BackupCodeForm(SecondFactorForm):
INVALID_ERROR_MESSAGE = _("That is not a valid backup code.")
code = forms.CharField(label=_("Code"), widget=forms.TextInput(attrs={'autocomplete': 'off'}))
def validate_second_factor(self):
count, _ = self.user.backup_codes.filter(code=self.cleaned_data['code']).delete()
if count == 0:
self.add_error('code', self.INVALID_ERROR_MESSAGE)
return False
elif count == 1:
return True
else:
assert False, \
"Impossible, there should never be more than one object with the same code."
class TOTPForm(SecondFactorForm):
INVALID_ERROR_MESSAGE = _("That token is invalid.")
token = forms.CharField(
min_length=6,
max_length=6,
label=_("Token"),
widget=forms.TextInput(attrs={'autocomplete': 'off'})
)
def validate_second_factor(self):
for device in self.user.totp_devices.all():
if device.validate_token(self.cleaned_data['token']):
device.last_used_at = timezone.now()
device.save()
return True
self.add_error('token', self.INVALID_ERROR_MESSAGE)
return False
|
11489993
|
import sys
class Progress:
def __init__(self, total, message='progress'):
self.message = message
self.total = total
self.current = 0
def next(self, step=1, extra_message=''):
bar_length, status = 20, ""
self.current += step
progress = float(self.current) / float(self.total)
if progress >= 1.:
progress = 1
block = int(round(bar_length * progress))
text = "\r{} [{}] {:.0f}% {} {}".format(self.message,
"#" * block + "-" * (bar_length - block), round(progress * 100, 0),
status, extra_message)
sys.stdout.write(text)
sys.stdout.flush()
@staticmethod
def finish():
sys.stdout.write("\n")
sys.stdout.flush()
@staticmethod
def print_counter(counter, message):
text = "\r{}: {}".format(message, counter)
sys.stdout.write(text)
sys.stdout.flush()
|
11490017
|
from aiogram.types import InlineKeyboardMarkup
from aiogram.types import InlineKeyboardButton
from aiogram.types import ChatType
from bot.platforms.telegram.utilities.keyboards import cancel_button
from bot.utilities.types import Command
def login_way_chooser(is_old: bool, chat_type: ChatType) -> InlineKeyboardMarkup:
setup_way_chooser_keyboard: InlineKeyboardMarkup = InlineKeyboardMarkup(row_width=1)
if is_old:
setup_way_chooser_keyboard.row(cancel_button())
if chat_type == ChatType.PRIVATE:
setup_way_chooser_keyboard.add(
InlineKeyboardButton(text="по логину-паролю от ББ", callback_data=Command.LOGIN_BB.value),
InlineKeyboardButton(text="по номеру группы", callback_data=Command.LOGIN_COMPACT.value)
)
else:
setup_way_chooser_keyboard.row(InlineKeyboardButton(text="продолжить", callback_data=Command.LOGIN_COMPACT.value))
return setup_way_chooser_keyboard
def againer() -> InlineKeyboardMarkup:
againer_keyboard: InlineKeyboardMarkup = InlineKeyboardMarkup(row_width=2)
againer_keyboard.add(
cancel_button(),
InlineKeyboardButton(text="продолжить", callback_data=Command.LOGIN_COMPACT.value)
)
return againer_keyboard
def guess_approver() -> InlineKeyboardMarkup:
guess_approver_keyboard: InlineKeyboardMarkup = InlineKeyboardMarkup(row_width=2)
guess_approver_keyboard.row(cancel_button())
guess_approver_keyboard.add(
InlineKeyboardButton(text="нет", callback_data=Command.LOGIN_WRONG_GROUP_GUESS.value),
InlineKeyboardButton(text="да", callback_data=Command.LOGIN_CORRECT_GROUP_GUESS.value)
)
return guess_approver_keyboard
|
11490025
|
cn_formats = [
"rdf-xml",
"turtle",
"citeproc-json",
"citeproc-json-ish",
"text",
"ris",
"bibtex",
"crossref-xml",
"datacite-xml",
"bibentry",
"crossref-tdm",
]
cn_format_headers = {
"rdf-xml": "application/rdf+xml",
"turtle": "text/turtle",
"citeproc-json": "transform/application/vnd.citationstyles.csl+json",
"text": "text/x-bibliography",
"ris": "application/x-research-info-systems",
"bibtex": "application/x-bibtex",
"crossref-xml": "application/vnd.crossref.unixref+xml",
"datacite-xml": "application/vnd.datacite.datacite+xml",
"bibentry": "application/x-bibtex",
"crossref-tdm": "application/vnd.crossref.unixsd+xml",
}
cn_types = {
"rdf-xml": "text/xml",
"turtle": "text/plain",
"citeproc-json": "application/json",
"citeproc-json-ish": "application/json",
"text": "text/plain",
"ris": "text/plain",
"bibtex": "text/plain",
"crossref-xml": "text/xml",
"datacite-xml": "text/xml",
"bibentry": "text/plain",
"crossref-tdm": "text/xml",
}
|
11490067
|
import codecs
from reamber.algorithms.convert.ConvertBase import ConvertBase
from reamber.bms.BMSMap import BMSMap
from reamber.bms.lists.BMSBpmList import BMSBpmList
from reamber.bms.lists.notes.BMSHitList import BMSHitList
from reamber.bms.lists.notes.BMSHoldList import BMSHoldList
from reamber.quaver.QuaMap import QuaMap
class QuaToBMS(ConvertBase):
@classmethod
def convert(cls, qua: QuaMap, move_right_by: int = 0) -> BMSMap:
""" Converts qua to a BMS map
Note that column 0 is the scratch. e.g. you're converting a 7k you should have ``moveRightBy == 1`` so that the
first column is not scratch
:param move_right_by: Moves every column to the right by
:param qua:
:return:
"""
bms = BMSMap()
bms.hits = cls.cast(qua.hits, BMSHitList, dict(offset='offset', column='column'))
bms.holds = cls.cast(qua.holds, BMSHoldList, dict(offset='offset', column='column', length='length'))
bms.bpms = cls.cast(qua.bpms, BMSBpmList, dict(offset='offset', bpm='bpm'))
bms.stack().column += move_right_by
bms.title = codecs.encode(qua.title, encoding='shift_jis')
bms.artist = codecs.encode(qua.artist, encoding='shift_jis')
bms.version = codecs.encode(qua.difficulty_name, encoding='shift_jis')
return bms
|
11490108
|
from django.contrib import admin
from oldp.apps.topics.models import Topic
@admin.register(Topic)
class TopicAdmin(admin.ModelAdmin):
date_hierarchy = 'updated_date'
list_display = ('title', 'created_date', 'updated_date')
search_fields = ['title']
|
11490243
|
import numpy as np
import pyqg
import pytest
from pyqg.diagnostic_tools import calc_ispec
def test_calc_ispec():
# Create a radial sine wave spiraling out from the center of the model's
# spatial field (with a given frequency)
m = pyqg.QGModel()
radius = np.sqrt((m.x-m.x.mean())**2 + (m.y-m.y.mean())**2)
frequency = m.k[0][20]
radial_sine = np.sin(radius * frequency)
# Take its FFT
radial_sine_fft = m.fft(np.array([radial_sine, radial_sine]))[0]
# Compute an isotropic spectrum
iso_wavenumbers, iso_spectrum = calc_ispec(m, radial_sine_fft)
# Its peak should be at the closest frequency to the true frequency
spectrum_peak_idx = np.argmax(iso_spectrum)
sinewave_freq_idx = np.argmin(np.abs(iso_wavenumbers - frequency))
assert spectrum_peak_idx == sinewave_freq_idx
|
11490279
|
import sys
import random
def gen_labels(inputFile, outputFile, l_num):
v_num = 0
with open(inputFile, 'r') as fin:
for line in fin:
u,v = map(int, line.strip().split())
v_num = max(v_num, max(u, v))
v_num += 1
with open(outputFile, 'w') as fout:
for u in xrange(v_num):
l = random.randint(0, l_num - 1)
fout.write(str(u) + ' ' + str(l) + '\n')
def trans_newid_labels(labelFile, idFile, outputFile):
v_num = 0
org2newid = {}
with open(idFile, 'r') as fin:
for line in fin:
org_id, new_id = map(int, line.rstrip().split())
org2newid[org_id] = new_id
v_num = max(v_num, org_id)
v_num += 1
newid2label = {}
with open(labelFile, 'r') as fin:
for line in fin:
org_id,label = map(int, line.strip().split())
newid2label[org2newid[org_id]] = label
with open(outputFile, 'w') as fout:
for u in xrange(v_num):
new_label = newid2label[u]
fout.write(str(u) + ' ' + str(new_label) + '\n')
if __name__ == "__main__":
if (sys.argv[1] == '-l'):
gen_labels(sys.argv[2], sys.argv[3], 100)
elif (sys.argv[1] == '-t'):
trans_newid_labels(sys.argv[2], sys.argv[3], sys.argv[4])
|
11490303
|
import torch.nn as nn
import torch.optim as optim
import sys
sys.path.append('../vanilla_densenet_small/')
from densenet import DenseNet
def get_model(learning_rate=1e-3):
model = DenseNet(
growth_rate=12, block_config=(8, 12, 10),
num_init_features=48, bn_size=4, drop_rate=0.25,
final_drop_rate=0.25, num_classes=200
)
# set the first layer not trainable
model.features.conv0.weight.requires_grad = False
# the last fc layer
weights = [
p for n, p in model.named_parameters()
if 'classifier.weight' in n
]
biases = [model.classifier.bias]
# all conv layers except the first
weights_to_be_quantized = [
p for n, p in model.named_parameters()
if 'conv' in n and ('dense' in n or 'transition' in n)
]
# parameters of batch_norm layers
bn_weights = [
p for n, p in model.named_parameters()
if 'norm' in n and 'weight' in n
]
bn_biases = [
p for n, p in model.named_parameters()
if 'norm' in n and 'bias' in n
]
params = [
{'params': weights, 'weight_decay': 1e-4},
{'params': weights_to_be_quantized},
{'params': biases},
{'params': bn_weights},
{'params': bn_biases}
]
optimizer = optim.Adam(params, lr=learning_rate)
loss = nn.CrossEntropyLoss().cuda()
model = model.cuda() # move the model to gpu
return model, loss, optimizer
|
11490316
|
import sqlite3
import pickle
from datetime import datetime
import logging
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
class BlackjackSQL(object):
def __init__(self, filename):
self.sql = sqlite3.connect(filename)
self.cursor = self.sql.cursor()
self.build_db()
def build_db(self):
"""
Run all commands necessary to build database from scratch
:return: None
"""
self.cursor.execute('''CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY,
reddit_name TEXT,
bankroll INTEGER DEFAULT 500,
created_date TEXT)''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS games (
game_id INTEGER PRIMARY KEY,
user_id INTEGER,
bet INTEGER,
pickled_game TEXT,
created_date TEXT,
completed_date TEXT,
FOREIGN KEY(user_id) REFERENCES users(user_id));''')
self.cursor.execute('''PRAGMA foreign_keys = ON;''')
def get_user(self, name):
self.try_insert_new_user(name)
query = '''SELECT user_id, reddit_name, bankroll FROM users WHERE reddit_name = ?'''
self.cursor.execute(query, (name,))
user_id, name, bankroll = self.cursor.fetchone()
game = self.get_current_game(user_id)
return User(user_id, name, bankroll, game)
def try_insert_new_user(self, name):
query = '''SELECT EXISTS (SELECT 1 FROM users WHERE reddit_name = ? LIMIT 1);'''
self.cursor.execute(query, (name,)) # Returns 1 or 0
user_exists = self.cursor.fetchone()[0]
if not user_exists:
logging.info('Creating new user: %s', name)
self.cursor.execute('INSERT INTO users (reddit_name, created_date) VALUES (?,?)',
(name, datetime.now().isoformat()))
self.sql.commit()
def get_current_game(self, user_id):
query = '''SELECT pickled_game FROM games WHERE user_id = ? and completed_date is null'''
self.cursor.execute(query, (user_id,))
try:
return pickle.loads(self.cursor.fetchone()[0])
except TypeError:
return None
def insert_new_game(self, user):
self.cursor.execute('INSERT INTO games (user_id, created_date) VALUES (?,?)',
(user.user_id, datetime.now().isoformat()))
self.sql.commit()
return self.cursor.lastrowid
def store_hand_state(self, user):
if user.game.game_complete:
self.cursor.execute('UPDATE games SET pickled_game=?, completed_date=? where game_id=?',
(pickle.dumps(user.game), datetime.now().isoformat(), user.game.game_id))
else:
self.cursor.execute('UPDATE games SET pickled_game=? where game_id=?',
(pickle.dumps(user.game), user.game.game_id))
self.sql.commit()
def pay_user(self, user):
self.cursor.execute('UPDATE users SET bankroll=? where user_id=?',
(user.bankroll + user.game.payout, user.user_id))
self.sql.commit()
def charge_user(self, user):
self.cursor.execute('UPDATE users SET bankroll=? where user_id=?',
(user.bankroll - user.game.bet, user.user_id))
self.sql.commit()
class User(object):
def __init__(self, user_id, name, bankroll, game=None):
self.user_id = user_id
self.name = name
self.bankroll = bankroll
self.game = game
self.history = None
|
11490336
|
import msgDB
import _thread
import requests
import urllib3
import random
import time
import sys
import os
urllib3.disable_warnings()
def rest_program():
print('restart')
#python = sys.executable
#os.execl(python, python, * sys.argv)
#return "a"
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def local_picture(name):
img_url ='https://api.dongmanxingkong.com/suijitupian/acg/1080p/index.php'
img = requests.get(img_url,verify=False)
f = open('C:\\pic\\'+str(name)+'.jpg','ab') #存储图片,多媒体文件需要参数b(二进制文件)
f.write(img.content) #多媒体存储content
f.close()
def out():
time.sleep(36000)
msgDB.initDB()
msgDB.delMsg()
#_thread.start_new_thread(out,())
#for i in range(1000):#清除所有未处理消息
# msgDB.delMsg()
for i in range(1000):
try:
res=msgDB.listen_wxMsg()
if res==False:#未监听到消息
continue
if res[3]=="菜单":
print(res[0])
msgDB.send_wxMsg(res[0],'''功能列表:
1.汤圆刷数据
2.小姐姐连抽
3.待开发''')
msgDB.delMsg()
continue
if res[3].split()[0]=="小姐姐连抽":
print(res[0])
if len(res[3].split())!=2 or is_int(res[3].split()[1])==False:
msgDB.send_wxMsg(res[0],"参数错误")
msgDB.delMsg()
continue
for i in range(int(res[3].split()[1])):
local_picture("test")
msgDB.send_wxPicture(res[0],"C:\\pic\\"+str(random.randint(0,1000))+".jpg")
#msgDB.send_wxPicture(res[0],"C:\\1.jpg")
msgDB.delMsg()
continue
if res[3]=="debug":
msgDB.delMsg()
continue
if res[3]=="rst":
exit()
continue
msgDB.delMsg()
except:
print("error")
#msgDB.delMsg()
#msgDB.endDb()
#msgDB.initDB()
exit()
|
11490398
|
from docx import Document
from docxcompose.composer import Composer
from utils import ComposedDocument
from utils import docx_path
from utils import FixtureDocument
import pytest
def test_contains_predefined_styles_in_masters_language(merged_styles):
style_ids = [s.style_id for s in merged_styles.doc.styles]
assert 'Heading1' in style_ids
assert 'Heading1' in style_ids
assert 'Strong' in style_ids
assert 'Quote' in style_ids
def test_does_not_contain_predefined_styles_in_appended_language(merged_styles):
style_ids = [s.style_id for s in merged_styles.doc.styles]
assert 'berschrift1' not in style_ids
assert 'berschrift2' not in style_ids
assert 'Fett' not in style_ids
assert 'Zitat' not in style_ids
def test_contains_custom_styles_from_both_docs(merged_styles):
style_ids = [s.style_id for s in merged_styles.doc.styles]
assert 'MyStyle1' in style_ids
assert 'MyStyle1Char' in style_ids
assert 'MeineFormatvorlage' in style_ids
assert 'MeineFormatvorlageZchn' in style_ids
def test_contains_linked_styles(merged_styles):
style_ids = [s.style_id for s in merged_styles.doc.styles]
assert 'QuoteChar' in style_ids
def test_merged_styles_de():
doc = FixtureDocument("styles_de.docx")
composed = ComposedDocument(
"styles_de.docx", "styles_en.docx")
assert composed == doc
def test_merged_styles_en():
doc = FixtureDocument("styles_en.docx")
composed = ComposedDocument(
"styles_en.docx", "styles_de.docx")
assert composed == doc
def test_styles_are_not_switched_for_first_numbering_element():
doc = FixtureDocument("switched_listing_style.docx")
composed = ComposedDocument(
"master_switched_listing_style.docx", "switched_listing_style.docx")
assert composed == doc
@pytest.fixture
def merged_styles():
composer = Composer(Document(docx_path("styles_en.docx")))
composer.append(Document(docx_path("styles_de.docx")))
return composer
|
11490405
|
import math
from pandac.PandaModules import NodePath, Point3
from . import PartyGlobals
inverse_e = 1.0 / math.e
def getCogDistanceUnitsFromCenter(distance):
return int(round(distance * (PartyGlobals.CogActivityArenaLength / 2.0)))
class CameraManager:
nextID = 0
def __init__(self, cameraNP):
self.cameraNP = cameraNP
self.id = CameraManager.nextID
CameraManager.nextID += 1
self.otherNP = render
self.lookAtNP = NodePath('CameraManager%d.lookAtNP' % self.id)
self.lookAtEnabled = False
self.targetPos = Point3(0.0, 0.0, 0.0)
self.targetLookAtPos = Point3(0.0, 1.0, 0.0)
self.enabled = False
self.rate = 10.0
def destroy(self):
if self.enabled:
self.setEnabled(False)
self.lookAtNP.removeNode()
del self.lookAtNP
del self.targetPos
del self.targetLookAtPos
del self.otherNP
def setEnabled(self, enabled):
if enabled != self.enabled:
if enabled:
taskMgr.add(self.updateTask, 'CameraManager%d.update' % self.id)
else:
taskMgr.remove('CameraManager%d.update' % self.id)
self.enabled = enabled
def setTargetPos(self, p):
self.targetPos = p
def setPos(self, p):
self.targetPos = p
self.cameraNP.setPos(self.otherNP, p)
def setTargetLookAtPos(self, p):
self.lookAtEnabled = True
self.targetLookAtPos = p
def setLookAtPos(self, p):
self.lookAtEnabled = True
self.targetLookAtPos = p
self.lookAtNP.setPos(p)
def setHpr(self, hpr):
self.lookAtEnabled = False
self.cameraNP.setHpr(self.otherNP, hpr)
def updateTask(self, task):
newCameraPos = self.rateInterpolate(self.cameraNP.getPos(self.otherNP), self.targetPos)
self.cameraNP.setPos(self.otherNP, newCameraPos)
if self.lookAtEnabled:
newLookAtPos = self.rateInterpolate(self.lookAtNP.getPos(self.otherNP), self.targetLookAtPos)
self.lookAtNP.setPos(self.otherNP, newLookAtPos)
self.cameraNP.lookAt(self.lookAtNP)
return task.cont
def rateInterpolate(self, currentPos, targetPos):
dt = globalClock.getDt()
vec = currentPos - targetPos
return targetPos + vec * inverse_e ** (dt * self.rate)
class StrafingControl:
def __init__(self, player):
self.player = player
self.defaultOffset = Point3(1.0, -7.5, self.player.toon.getHeight() + 1.0)
def destroy(self):
self.player = None
del self.player
self.defaultOffset = None
del self.defaultOffset
return
def update(self):
self.player.tempNP.setPos(self.player.locator, self.player.toon.getPos() + self.defaultOffset)
self.player.cameraManager.setTargetPos(self.player.tempNP.getPos(render))
self.player.tempNP.setPos(self.player.locator, self.player.toon.getPos() + self.defaultOffset + Point3(0, 20, 0))
self.player.cameraManager.setTargetLookAtPos(self.player.tempNP.getPos(render))
if not self.player._aimMode and self.player.input.throwPiePressed:
self.toggleAim()
if self.player._aimMode and not self.player.input.throwPiePressed and (self.player.input.upPressed or self.player.input.downPressed or self.player.input.leftPressed or self.player.input.rightPressed):
self.toggleAim()
if not self.player._aimMode:
if not (self.player.input.upPressed or self.player.input.downPressed or self.player.input.leftPressed or self.player.input.rightPressed):
self.player.faceForward()
return
if self.player.input.throwPiePressed:
self.player.gui.updatePiePowerMeter(self.player.getPieThrowingPower(globalClock.getFrameTime()))
def toggleAim(self):
self.player._aimMode = not self.player._aimMode
if not self.player._aimMode:
self.player.orthoWalking = True
self.player.orthoWalk.start()
self.player._rotation = 0.0
self.player._prevRotation = 0.0
self.player.gui.hidePiePowerMeter()
self.player.toon.setH(0.0)
else:
self.player.orthoWalk.stop()
self.player.orthoWalking = False
self.player.toon.setH(0.0)
self.player.gui.showPiePowerMeter()
def enable(self):
self.player._aimMode = False
camera.wrtReparentTo(self.player.locator)
self.player.cameraManager.setEnabled(True)
activityView = self.player.activity.view
pos = activityView.teamCamPosLocators[self.player.team].getPos(render)
aim = activityView.teamCamAimLocators[self.player.team].getPos(render)
self.player.cameraManager.setPos(pos)
self.player.cameraManager.setLookAtPos(aim)
self.player.tempNP.reparentTo(self.player.locator)
self.player.tempNP.setPos(self.player.locator, self.player.toon.getPos() + self.defaultOffset)
self.player.cameraManager.setTargetPos(self.player.tempNP.getPos(render))
|
11490428
|
import argparse
import os
import json
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
def load_qid2query(filename):
qid2query = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
qid2query[int(l[0])] = l[1]
return qid2query
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--collection_file",
default="data/collection.tsv",
type=str,
help="The msmarco passage collection file",
)
parser.add_argument(
"--model_name_or_path",
type=str,
default=None,
help="Doc2Query predictions",
)
parser.add_argument(
"--augmented_collection_file",
type=str,
default="data/augmented_collection.jsonl",
help="The output_file for augmented doc 2 query index",
)
parser.add_argument(
"--beam_size",
type=int,
default=3,
help="number of queries to generate per passage",
)
parser.add_argument(
"--max_length",
type=int,
default=32,
help="length of document queries",
)
parser.add_argument(
'--no_cuda',
action="store_true",
help="Use this to not use cuda")
args = parser.parse_args()
print("Loading collection")
collection = load_qid2query(args.collection_file)
print("Collection loaded")
device='cuda'
if args.no_cuda:
device='cpu'
print("Loading model")
config = AutoConfig.from_pretrained(args.model_name_or_path,)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path,)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)
model.to(device)
model.resize_token_embeddings(len(tokenizer))
print("Model Loaded")
print("Augmenting passages")
augmentations = 0
#TODO Introduce batching at inference time as right now runs 1 by 1
with open(args.augmented_collection_file, 'w') as w:
for doc_id in collection:
if augmentations % 5000 == 0:
print("{} passages augmented".format(augmentations))
document_text = collection[doc_id]
input_ids = tokenizer.encode(document_text, return_tensors='pt').to(device)
outputs = model.generate(
input_ids=input_ids,
max_length=args.max_length,
do_sample=True,
top_k=10,
num_return_sequences=args.beam_size)
query_augment = ''
for i in range(args.beam_size):
query_augment += ' '
query_augment += tokenizer.decode(outputs[i], skip_special_tokens=True)
output_dict = {'id': doc_id, 'contents': document_text + query_augment}
w.write(json.dumps(output_dict) + '\n')
augmentations += 1
if __name__ == "__main__":
main()
|
11490449
|
import http.server
import socketserver
PORT = 89
HOST = "0.0.0.0"
DIRECTORY = '.' # 'livedash' when served from OpenPilot location 'openpilot/selfdrive/livedash'
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
def main():
with socketserver.ThreadingTCPServer((HOST, PORT), Handler) as httpd:
print(f"Serving at host {HOST} port {PORT}")
while True:
httpd.serve_forever()
if __name__ == "__main__":
main()
|
11490545
|
from __future__ import print_function
import lldb
import argparse
def parse_args(raw_args):
"""Parse the arguments given to write"""
# Need to provide 'prog' (name of program) here otherwise
# argparse tries to get it from sys.argv[0], which breaks
# when called in lldb.
parser = argparse.ArgumentParser(
prog='write',
description='Write the output of an lldb command to file'
)
parser.add_argument('filename')
parser.add_argument('command', nargs='+')
args = parser.parse_args(raw_args.split(' '))
# The parser splits the command into a list of strings e.g.
# ['register', 'read']
# we convert it back to a string so we can later pass it to
# lldb for evaluation
args.command = ' '.join(args.command)
return args
def write_to_file(filename, command, output):
"""Write the output to the given file, headed by the command"""
with open(filename, 'w') as f:
f.write("(lldb) " + command + '\n\n')
f.write(output)
def handle_call(debugger, raw_args, result, internal_dict):
"""Receives and handles the call to write from lldb"""
args = parse_args(raw_args)
# Run the command and store the result
res = lldb.SBCommandReturnObject()
interpreter = lldb.debugger.GetCommandInterpreter()
interpreter.HandleCommand(args.command, res)
# Get the output even
output = res.GetOutput() or res.GetError()
print(output, end='')
write_to_file(args.filename, args.command, output)
def __lldb_init_module(debugger, internal_dict):
"""Initialise the write command within lldb"""
# Tell lldb to import this script and alias it as 'write'.
# > Note: 'write' (from 'write.handle_call') is taken from the
# name of this file
debugger.HandleCommand('command script add -f write.handle_call write')
print('The "write" command has been loaded and is ready for use.')
|
11490566
|
jinxuejie = [
'国子先生晨入太学',
'招诸生立馆下',
'诲之曰',
'业精于勤荒于嬉',
'行成于思毁于随',
'方今圣贤相逢',
'治具毕张',
'拔去凶邪',
'登崇畯良',
'占小善者率以录',
'名一艺者无不庸',
'爬罗剔抉',
'刮垢磨光',
'盖有幸而获选',
'孰云多而不扬',
'诸生业患不能精',
'无患有司之不明',
'行患不能成',
'无患有司之不公',
'言未既',
'有笑于列者曰',
'先生欺余哉',
'弟子事先生于兹有年矣',
'先生口不绝吟于六艺之文',
'手不停披于百家之编',
'纪事者必提其要',
'纂言者必钩其玄',
'贪多务得',
'细大不捐',
'焚膏油以继晷',
'恒兀兀以穷年',
'先生之业',
'可谓勤矣',
'觝排异端',
'攘斥佛老',
'补苴罅漏',
'张皇幽眇',
'寻坠绪之茫茫',
'独旁搜而远绍',
'障百川而东之',
'回狂澜于既倒',
'先生之于儒',
'可谓有劳矣',
'沉浸醲郁',
'含英咀华',
'作为文章',
'其书满家',
'上规姚姒',
'浑浑无涯',
'周诰殷盘',
'佶屈聱牙',
'春秋谨严',
'左氏浮夸',
'易奇而法',
'诗正而葩',
'下逮庄骚',
'太史所录',
'子云相如',
'同工异曲',
'先生之于文',
'可谓闳其中而肆其外矣',
'少始知学',
'勇于敢为',
'长通于方',
'左右具宜',
'先生之于为人',
'可谓成矣',
'然而公不见信于人',
'私不见助于友',
'跋前踬后',
'动辄得咎',
'暂为御史',
'遂窜南夷',
'三年博士',
'冗不见治',
'命与仇谋',
'取败几时',
'冬暖而儿号寒',
'年丰而妻啼饥',
'头童齿豁',
'竟死何裨',
'不知虑此而反教人为',
'先生曰',
'吁',
'子来前',
'夫大木为杗',
'细木为桷',
'欂栌侏儒',
'椳闑扂楔',
'各得其宜',
'施以成室者',
'匠氏之工也',
'玉札丹砂',
'赤箭青芝',
'牛溲马勃',
'败鼓之皮',
'俱收并蓄',
'待用无遗者',
'医师之良也',
'登明选公',
'杂进巧拙',
'纡馀为妍',
'卓荦为杰',
'校短量长',
'惟器是适者',
'宰相之方也',
'昔者孟轲好辩',
'孔道以明',
'辙环天下',
'卒老于行',
'荀卿守正',
'大论是弘',
'逃谗于楚',
'废死兰陵',
'是二儒者',
'吐辞为经',
'举足为法',
'绝类离伦',
'优入圣域',
'其遇于世何如也',
'今先生学虽勤而不繇其统',
'言虽多而不要其中',
'文虽奇而不济于用',
'行虽修而不显于众',
'犹且月费俸钱',
'岁靡廪粟',
'子不知耕',
'妇不知织',
'乘马从徒',
'安坐而食',
'踵常途之役役',
'窥陈编以盗窃',
'然而圣主不加诛',
'宰臣不见斥',
'兹非其幸欤',
'动而得谤',
'名亦随之',
'投闲置散',
'乃分之宜',
'若夫商财贿之有亡',
'计班资之崇庳',
'忘己量之所称',
'指前人之瑕疵',
'是所谓诘匠氏之不以杙为楹',
'而訾医师以昌阳引年',
'欲进其豨苓也'
]
len_lst = [len(sent) for sent in jinxuejie]
len_set_lst = list(set(len_lst))
len_cnt = []
for slen in range(20):
print(slen, '\t', len_lst.count(slen))
|
11490573
|
import FWCore.ParameterSet.Config as cms
from Configuration.Geometry.GeometryDD4hepExtended2021_cff import *
|
11490585
|
import unittest
from uuid import uuid4
from arangodb.api import Database
from arangodb.orm.fields import NumberField, ForeignKeyField, BooleanField, CharField
from arangodb.orm.models import CollectionModel
from arangodb.query.advanced import Query
class CollectionModelManagerTestCase(unittest.TestCase):
def setUp(self):
self.database_name = 'testcase_collection_model_manager_123'
self.db = Database.create(name=self.database_name)
def tearDown(self):
Database.remove(name=self.database_name)
def test_retrieve_one_specific_model_by_char(self):
class TestModel(CollectionModel):
uuid = CharField(null=False)
TestModel.init()
model1 = TestModel()
model1.uuid = str(uuid4())
model1.save()
model2 = TestModel()
model2.uuid = str(uuid4())
model2.save()
specific_model = TestModel.objects.get(uuid=model2.uuid)
self.assertEqual(specific_model.uuid, model2.uuid)
TestModel.destroy()
def test_retrieve_one_specific_model_by_bool(self):
class TestModel(CollectionModel):
active = BooleanField(null=False)
TestModel.init()
model1 = TestModel()
model1.active = False
model1.save()
model2 = TestModel()
model2.active = True
model2.save()
specific_model1 = TestModel.objects.get(active=model1.active)
specific_model2 = TestModel.objects.get(active=model2.active)
self.assertEqual(specific_model1.document.id, model1.document.id)
self.assertEqual(specific_model2.document.id, model2.document.id)
TestModel.destroy()
def test_queryset_clone(self):
class TestModel(CollectionModel):
active = BooleanField(null=False)
TestModel.init()
model1 = TestModel()
model1.active = False
model1.save()
model2 = TestModel()
model2.active = True
model2.save()
qs1 = TestModel.objects.all()
self.assertEqual(len(qs1), 2)
self.assertEqual(len(qs1._cache), 2)
cloned_qs = qs1._clone()
self.assertEqual(len(cloned_qs._cache), 0)
TestModel.destroy()
def test_retrieve_all_models(self):
class TestModel(CollectionModel):
pass
TestModel.init()
model1 = TestModel()
model1.save()
model2 = TestModel()
model2.save()
all_models = TestModel.objects.all()
self.assertEqual(len(all_models), 2)
TestModel.destroy()
def test_filter_directly(self):
class TestModel(CollectionModel):
name = CharField()
TestModel.init()
model1 = TestModel()
model1.name = 'test'
model1.save()
model2 = TestModel()
model2.name = 'foo'
model2.save()
all_models = TestModel.objects.filter(name='foo')
self.assertEqual(len(all_models), 1)
model = all_models[0]
self.assertEqual(model.id, model2.id)
self.assertTrue(isinstance(model, TestModel))
TestModel.destroy()
def test_exclude_directly(self):
class TestModel(CollectionModel):
name = CharField()
TestModel.init()
model1 = TestModel()
model1.name = 'test'
model1.save()
model2 = TestModel()
model2.name = 'foo'
model2.save()
all_models = TestModel.objects.exclude(name='foo')
self.assertEqual(len(all_models), 1)
model = all_models[0]
self.assertEqual(model.id, model1.id)
self.assertTrue(isinstance(model, TestModel))
TestModel.destroy()
def test_iterate_over_queryset(self):
class TestModel(CollectionModel):
pass
TestModel.init()
model1 = TestModel()
model1.save()
model2 = TestModel()
model2.save()
all_models = TestModel.objects.all()
for model in all_models:
self.assertTrue(isinstance(model, TestModel))
TestModel.destroy()
def test_get_value_from_queryset_model(self):
class TestModel(CollectionModel):
text = CharField(null=False)
TestModel.init()
model1 = TestModel()
model1.text = 'dd'
model1.save()
self.assertEqual(model1.text, 'dd')
all_models = TestModel.objects.all()
self.assertEqual(len(all_models), 1)
model = all_models[0]
self.assertEqual(model.text, 'dd')
TestModel.destroy()
def test_retrieve_all_models_and_update_one(self):
class TestModel(CollectionModel):
text = CharField(null=False)
TestModel.init()
model1 = TestModel()
model1.text = 'aa'
model1.save()
model2 = TestModel()
model2.text = 'aa'
model2.save()
all_models = TestModel.objects.all()
self.assertEqual(len(all_models), 2)
model = all_models[0]
model.text = 'xx'
model.save()
all_models = TestModel.objects.all()
self.assertEqual(len(all_models), 2)
TestModel.destroy()
def test_get_or_create_model(self):
class TestModel(CollectionModel):
active = BooleanField(null=False, default=False)
TestModel.init()
all_models = TestModel.objects.all()
self.assertEqual(len(all_models), 0)
model, is_created = TestModel.objects.get_or_create(active=True)
self.assertEqual(is_created, True)
self.assertEqual(model.active, True)
model.save()
model, is_created = TestModel.objects.get_or_create(active=True)
self.assertEqual(is_created, False)
self.assertEqual(model.active, True)
all_models = TestModel.objects.all()
self.assertEqual(len(all_models), 1)
TestModel.destroy()
def test_get_or_create_with_foreign_key_model(self):
class ForeignTestModel(CollectionModel):
active = BooleanField(null=False, default=False)
class TestModel(CollectionModel):
active = BooleanField(null=False, default=False)
foreigner = ForeignKeyField(to=ForeignTestModel, related_name='other')
ForeignTestModel.init()
TestModel.init()
normal_model = ForeignTestModel()
normal_model.active = True
normal_model.save()
test_model, is_created = TestModel.objects.get_or_create(foreigner=normal_model)
if is_created:
test_model.save()
TestModel.destroy()
ForeignTestModel.destroy()
def test_order_by_model_field_attribute_asc(self):
class TestModel(CollectionModel):
order = NumberField()
TestModel.init()
model1 = TestModel()
model1.order = 3
model1.save()
model2 = TestModel()
model2.order = 1
model2.save()
model3 = TestModel()
model3.order = 2
model3.save()
all_models = TestModel.objects.all().order_by(field='order', order=Query.SORTING_ASC)
self.assertEqual(len(all_models), 3)
model = all_models[0]
self.assertEqual(model.id, model2.id)
model = all_models[1]
self.assertEqual(model.id, model3.id)
model = all_models[2]
self.assertEqual(model.id, model1.id)
TestModel.destroy()
def test_order_by_model_field_attribute_desc(self):
class TestModel(CollectionModel):
order = NumberField()
TestModel.init()
model1 = TestModel()
model1.order = 3
model1.save()
model2 = TestModel()
model2.order = 1
model2.save()
model3 = TestModel()
model3.order = 2
model3.save()
all_models = TestModel.objects.all().order_by(field='order', order=Query.SORTING_DESC)
self.assertEqual(len(all_models), 3)
model = all_models[0]
self.assertEqual(model.id, model1.id)
model = all_models[1]
self.assertEqual(model.id, model3.id)
model = all_models[2]
self.assertEqual(model.id, model2.id)
TestModel.destroy()
def test_limit_model_list(self):
class TestModel(CollectionModel):
order = NumberField()
TestModel.init()
model1 = TestModel()
model1.order = 3
model1.save()
model2 = TestModel()
model2.order = 1
model2.save()
model3 = TestModel()
model3.order = 2
model3.save()
all_models = TestModel.objects.limit(1).order_by(field='order', order=Query.SORTING_ASC)
self.assertEqual(len(all_models), 1)
model = all_models[0]
self.assertEqual(model.id, model2.id)
TestModel.destroy()
def test_limit_with_start_model_list(self):
class TestModel(CollectionModel):
order = NumberField()
TestModel.init()
model1 = TestModel()
model1.order = 3
model1.save()
model2 = TestModel()
model2.order = 1
model2.save()
model3 = TestModel()
model3.order = 2
model3.save()
all_models = TestModel.objects.all().order_by(field='order', order=Query.SORTING_ASC).limit(1, 1)
self.assertEqual(len(all_models), 1)
model = all_models[0]
self.assertEqual(model.id, model3.id)
TestModel.destroy()
|
11490588
|
class _InteropInterface:
def __init__(self):
pass
InteropInterface = _InteropInterface()
|
11490603
|
import random, heapq
# Assuming `online` is the set of users that is online, find a path to
# send `amount` coins from `frm` to `to` through `coins` where each
# step along the path is between users that have adjacent fragments.
# A transfer done in this way does not contribute to fragmentation.
def find_path(coins, frm, to, amount, online):
# Determine who is whose neighbor
neighbor_map = {}
for i in range(amount, len(coins) - amount + 1):
if coins[i-1] != coins[i]:
if coins[i-1] in online and coins[i] in online:
if coins[i-amount:i] == [coins[i-1]] * amount:
neighbor_map[coins[i-1]] = list(set(neighbor_map.get(coins[i-1], []) + [coins[i]]))
if coins[i:i+amount] == [coins[i]] * amount:
neighbor_map[coins[i]] = list(set(neighbor_map.get(coins[i], []) + [coins[i-1]]))
# Search for the path
parents = {frm: None}
q = [(0, frm)]
while q:
dist, sender = heapq.heappop(q)
neighbors = neighbor_map.get(sender, [])
for n in neighbors:
if n not in parents:
heapq.heappush(q, (dist+1, n))
parents[n] = sender
if n == to:
o = [n]
while o[0] != frm:
o.insert(0, parents[o[0]])
return o
return False
# How many fragments are in this set of coins?
def fragments(vals):
tot = 1
for i in range(1, len(vals)):
if vals[i] != vals[i-1]:
tot += 1
return tot
# Send `amt` coins from `frm` to `to`. Increases fragmentation by
# maximum 1
def send_coins(coins, frm, to, amt):
coins_to_send = amt
for i in range(len(coins)):
if coins[i] == frm:
coins[i] = to
coins_to_send -= 1
if coins_to_send == 0:
return True
return False
# Get the concrete range to transfer if we are transfering `amt`
# coins from `frm` to `to` (must be neighboring fragments)
def get_coin_shunt(coins, frm, to, amt):
i = 1
L = len(coins)
while i < L:
while i < L and coins[i] not in (frm, to):
i += 1
if not((coins[i-1] == frm and coins[i] == to) or (coins[i-1] == to and coins[i] == frm)):
i += 1
continue
if coins[i-amt:i] == [frm] * amt and coins[i] == to:
coins[i-amt:i] = [to] * amt
return (i-amt, i, to)
if coins[i:i+amt] == [frm] * amt and coins[i-1] == to:
coins[i:i+amt] = [to] * amt
return (i, i+amt, to)
i += 1
return False
# Find the largest slice controlled by `acct`
def maxslice(coins, acct):
maxsz = 0
sz = 0
for i in range(len(coins)):
if coins[i] == acct:
sz += 1
maxsz = max(sz, maxsz)
else:
sz = 0
return maxsz
# Count the number of coins and the number of fragments
# held by each user
def count_coins_and_fragments(coins):
user_count = max(coins) + 1
coin_count = [0] * user_count
frag_count = [0] * user_count
for i in range(len(coins)):
coin_count[coins[i]] += 1
if i > 0 and coins[i] != coins[i-1]:
frag_count[coins[i]] += 1
return coin_count, frag_count
userz = 25
coinz = 50000
part_online = 0.1
initial_fragments_per_user = 100
ordering = list(range(userz)) * initial_fragments_per_user
random.shuffle(ordering)
c = [ordering[i * len(ordering) //coinz] for i in range(coinz)]
balances = count_coins_and_fragments(c)[0]
for i in range(250000):
if i%100 == 0:
print(i, fragments(c))
# if i%2000 == 0:
# coin_count, frag_count = count_coins_and_fragments(c)
# print(sorted(zip(coin_count, frag_count)))
# Randomly select sender, recipient and amount
frm = random.randrange(userz)
to = random.randrange(userz)
if frm == to:
continue
pre_balance = balances[frm]
amount = random.randrange(1, 1 + int(pre_balance ** random.random())) if pre_balance >= 2 else pre_balance
full_amount = amount
# print("Paying %d coins from %d to %d" % (amount, frm, to))
# Randomly select the users that are online
online = [i for i in range(userz) if random.random() < part_online or i in (frm, to)]
while amount > 0:
maxpay = maxslice(c, frm)
pay_this_round = min(amount, maxpay)
path = find_path(c, frm, to, pay_this_round, online)
if path:
#print("Found path for %d coins (%d hops)" % (pay_this_round, len(path)-1))
assert path[0] == frm
assert path[-1] == to
shunts = []
for i in range(1, len(path)):
shunts.append(get_coin_shunt(c, path[i-1], path[i], pay_this_round))
assert shunts[-1]
for shunt in shunts:
start, end, to = shunt
c[start:end] = [to] * (end-start)
amount -= pay_this_round
else:
# print('No path, paying remaining amount %d via fragmentation' % amount)
# print('%d fragments' % fragments(c))
assert send_coins(c, frm, to, amount)
break
balances[frm] -= full_amount
balances[to] += full_amount
|
11490623
|
import math
import multiprocessing
import random
from contextlib import contextmanager, ExitStack
from functools import partial
from math import log2, floor
from pathlib import Path
from random import random
import torch
import torch.nn.functional as F
from gsa_pytorch import GSA
import trainer.losses as L
import torchvision
from PIL import Image
from einops import rearrange, reduce
from kornia import filter2d
from torch import nn, einsum
from torch.utils.data import Dataset
from torchvision import transforms
from models.stylegan.stylegan2_lucidrains import gradient_penalty
from trainer.networks import register_model
from utils.util import opt_get
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous()
# """
# Augmentation functions got images as `x`
# where `x` is tensor with this dimensions:
# 0 - count of images
# 1 - channels
# 2 - width
# 3 - height of image
# """
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_offset(x, ratio=1, ratio_h=1, ratio_v=1):
w, h = x.size(2), x.size(3)
imgs = []
for img in x.unbind(dim = 0):
max_h = int(w * ratio * ratio_h)
max_v = int(h * ratio * ratio_v)
value_h = random.randint(0, max_h) * 2 - max_h
value_v = random.randint(0, max_v) * 2 - max_v
if abs(value_h) > 0:
img = torch.roll(img, value_h, 2)
if abs(value_v) > 0:
img = torch.roll(img, value_v, 1)
imgs.append(img)
return torch.stack(imgs)
def rand_offset_h(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=ratio, ratio_v=0)
def rand_offset_v(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=0, ratio_v=ratio)
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'offset': [rand_offset],
'offset_h': [rand_offset_h],
'offset_v': [rand_offset_v],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
# constants
NUM_CORES = multiprocessing.cpu_count()
EXTS = ['jpg', 'jpeg', 'png']
# helpers
def exists(val):
return val is not None
@contextmanager
def null_context():
yield
def combine_contexts(contexts):
@contextmanager
def multi_contexts():
with ExitStack() as stack:
yield [stack.enter_context(ctx()) for ctx in contexts]
return multi_contexts
def is_power_of_two(val):
return log2(val).is_integer()
def default(val, d):
return val if exists(val) else d
def set_requires_grad(model, bool):
for p in model.parameters():
p.requires_grad = bool
def cycle(iterable):
while True:
for i in iterable:
yield i
def raise_if_nan(t):
if torch.isnan(t):
raise NanException
def gradient_accumulate_contexts(gradient_accumulate_every, is_ddp, ddps):
if is_ddp:
num_no_syncs = gradient_accumulate_every - 1
head = [combine_contexts(map(lambda ddp: ddp.no_sync, ddps))] * num_no_syncs
tail = [null_context]
contexts = head + tail
else:
contexts = [null_context] * gradient_accumulate_every
for context in contexts:
with context():
yield
def hinge_loss(real, fake):
return (F.relu(1 + real) + F.relu(1 - fake)).mean()
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def slerp(val, low, high):
low_norm = low / torch.norm(low, dim=1, keepdim=True)
high_norm = high / torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
return res
def safe_div(n, d):
try:
res = n / d
except ZeroDivisionError:
prefix = '' if int(n >= 0) else '-'
res = float(f'{prefix}inf')
return res
# helper classes
class NanException(Exception):
pass
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if not exists(old):
return new
return old * self.beta + (1 - self.beta) * new
class EMAWrapper(nn.Module):
def __init__(self, wrapped_module, following_module, rate=.995, steps_per_ema=10, steps_per_reset=1000, steps_after_no_reset=25000, reset=True):
super().__init__()
self.wrapped = wrapped_module
self.following = following_module
self.ema_updater = EMA(rate)
self.steps_per_ema = steps_per_ema
self.steps_per_reset = steps_per_reset
self.steps_after_no_reset = steps_after_no_reset
if reset:
self.wrapped.load_state_dict(self.following.state_dict())
for p in self.wrapped.parameters():
p.DO_NOT_TRAIN = True
def reset_parameter_averaging(self):
self.wrapped.load_state_dict(self.following.state_dict())
def update_moving_average(self):
for current_params, ma_params in zip(self.following.parameters(), self.wrapped.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
for current_buffer, ma_buffer in zip(self.following.buffers(), self.wrapped.buffers()):
new_buffer_value = self.ema_updater.update_average(ma_buffer, current_buffer)
ma_buffer.copy_(new_buffer_value)
def custom_optimizer_step(self, step):
if step % self.steps_per_ema == 0:
self.update_moving_average()
if step % self.steps_per_reset and step < self.steps_after_no_reset:
self.reset_parameter_averaging()
def forward(self, x):
with torch.no_grad():
return self.wrapped(x)
class RandomApply(nn.Module):
def __init__(self, prob, fn, fn_else=lambda x: x):
super().__init__()
self.fn = fn
self.fn_else = fn_else
self.prob = prob
def forward(self, x):
fn = self.fn if random() < self.prob else self.fn_else
return fn(x)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.tensor(1e-3))
def forward(self, x):
return self.g * self.fn(x)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class SumBranches(nn.Module):
def __init__(self, branches):
super().__init__()
self.branches = nn.ModuleList(branches)
def forward(self, x):
return sum(map(lambda fn: fn(x), self.branches))
class Blur(nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer('f', f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f[None, :, None]
return filter2d(x, f, normalized=True)
# dataset
def convert_image_to(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
class identity(object):
def __call__(self, tensor):
return tensor
class expand_greyscale(object):
def __init__(self, transparent):
self.transparent = transparent
def __call__(self, tensor):
channels = tensor.shape[0]
num_target_channels = 4 if self.transparent else 3
if channels == num_target_channels:
return tensor
alpha = None
if channels == 1:
color = tensor.expand(3, -1, -1)
elif channels == 2:
color = tensor[:1].expand(3, -1, -1)
alpha = tensor[1:]
else:
raise Exception(f'image with invalid number of channels given {channels}')
if not exists(alpha) and self.transparent:
alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device)
return color if not self.transparent else torch.cat((color, alpha))
def resize_to_minimum_size(min_size, image):
if max(*image.size) < min_size:
return torchvision.transforms.functional.resize(image, min_size)
return image
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
transparent=False,
greyscale=False,
aug_prob=0.
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]
assert len(self.paths) > 0, f'No images were found in {folder} for training'
if transparent:
num_channels = 4
pillow_mode = 'RGBA'
expand_fn = expand_greyscale(transparent)
elif greyscale:
num_channels = 1
pillow_mode = 'L'
expand_fn = identity()
else:
num_channels = 3
pillow_mode = 'RGB'
expand_fn = expand_greyscale(transparent)
convert_image_fn = partial(convert_image_to, pillow_mode)
self.transform = transforms.Compose([
transforms.Lambda(convert_image_fn),
transforms.Lambda(partial(resize_to_minimum_size, image_size)),
transforms.Resize(image_size),
RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)),
transforms.CenterCrop(image_size)),
transforms.ToTensor(),
transforms.Lambda(expand_fn)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# augmentations
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size, prob, types):
super().__init__()
self.D = D
self.prob = prob
self.types = types
def forward(self, images, detach=False, **kwargs):
context = torch.no_grad if detach else null_context
with context():
if random() < self.prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=self.types)
return self.D(images, **kwargs)
# modifiable global variables
norm_class = nn.BatchNorm2d
def upsample(scale_factor=2):
return nn.Upsample(scale_factor=scale_factor)
# squeeze excitation classes
# global context network
# https://arxiv.org/abs/2012.13375
# similar to squeeze-excite, but with a simplified attention pooling and a subsequent layer norm
class GlobalContext(nn.Module):
def __init__(
self,
*,
chan_in,
chan_out
):
super().__init__()
self.to_k = nn.Conv2d(chan_in, 1, 1)
chan_intermediate = max(3, chan_out // 2)
self.net = nn.Sequential(
nn.Conv2d(chan_in, chan_intermediate, 1),
nn.LeakyReLU(0.1),
nn.Conv2d(chan_intermediate, chan_out, 1),
nn.Sigmoid()
)
def forward(self, x):
context = self.to_k(x)
context = context.flatten(2).softmax(dim=-1)
out = einsum('b i n, b c n -> b c i', context, x.flatten(2))
out = out.unsqueeze(-1)
return self.net(out)
# frequency channel attention
# https://arxiv.org/abs/2012.11879
def get_1d_dct(i, freq, L):
result = math.cos(math.pi * freq * (i + 0.5) / L) / math.sqrt(L)
return result * (1 if freq == 0 else math.sqrt(2))
def get_dct_weights(width, channel, fidx_u, fidx_v):
dct_weights = torch.zeros(1, channel, width, width)
c_part = channel // len(fidx_u)
for i, (u_x, v_y) in enumerate(zip(fidx_u, fidx_v)):
for x in range(width):
for y in range(width):
coor_value = get_1d_dct(x, u_x, width) * get_1d_dct(y, v_y, width)
dct_weights[:, i * c_part: (i + 1) * c_part, x, y] = coor_value
return dct_weights
class FCANet(nn.Module):
def __init__(
self,
*,
chan_in,
chan_out,
reduction=4,
width
):
super().__init__()
freq_w, freq_h = ([0] * 8), list(range(8)) # in paper, it seems 16 frequencies was ideal
dct_weights = get_dct_weights(width, chan_in, [*freq_w, *freq_h], [*freq_h, *freq_w])
self.register_buffer('dct_weights', dct_weights)
chan_intermediate = max(3, chan_out // reduction)
self.net = nn.Sequential(
nn.Conv2d(chan_in, chan_intermediate, 1),
nn.LeakyReLU(0.1),
nn.Conv2d(chan_intermediate, chan_out, 1),
nn.Sigmoid()
)
def forward(self, x):
x = reduce(x * self.dct_weights, 'b c (h h1) (w w1) -> b c h1 w1', 'sum', h1=1, w1=1)
return self.net(x)
# generative adversarial network
class Generator(nn.Module):
def __init__(
self,
*,
image_size,
latent_dim=256,
fmap_max=512,
fmap_inverse_coef=12,
transparent=False,
greyscale=False,
freq_chan_attn=False
):
super().__init__()
resolution = log2(image_size)
assert is_power_of_two(image_size), 'image size must be a power of 2'
if transparent:
init_channel = 4
elif greyscale:
init_channel = 1
else:
init_channel = 3
fmap_max = default(fmap_max, latent_dim)
self.initial_conv = nn.Sequential(
nn.ConvTranspose2d(latent_dim, latent_dim * 2, 4),
norm_class(latent_dim * 2),
nn.GLU(dim=1)
)
num_layers = int(resolution) - 2
features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), range(2, num_layers + 2)))
features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features))
features = list(map(lambda n: 3 if n[0] >= 8 else n[1], features))
features = [latent_dim, *features]
in_out_features = list(zip(features[:-1], features[1:]))
self.res_layers = range(2, num_layers + 2)
self.layers = nn.ModuleList([])
self.res_to_feature_map = dict(zip(self.res_layers, in_out_features))
self.sle_map = ((3, 7), (4, 8), (5, 9), (6, 10))
self.sle_map = list(filter(lambda t: t[0] <= resolution and t[1] <= resolution, self.sle_map))
self.sle_map = dict(self.sle_map)
self.num_layers_spatial_res = 1
for (res, (chan_in, chan_out)) in zip(self.res_layers, in_out_features):
attn = None
sle = None
if res in self.sle_map:
residual_layer = self.sle_map[res]
sle_chan_out = self.res_to_feature_map[residual_layer - 1][-1]
if freq_chan_attn:
sle = FCANet(
chan_in=chan_out,
chan_out=sle_chan_out,
width=2 ** (res + 1)
)
else:
sle = GlobalContext(
chan_in=chan_out,
chan_out=sle_chan_out
)
layer = nn.ModuleList([
nn.Sequential(
upsample(),
Blur(),
nn.Conv2d(chan_in, chan_out * 2, 3, padding=1),
norm_class(chan_out * 2),
nn.GLU(dim=1)
),
sle,
attn
])
self.layers.append(layer)
self.out_conv = nn.Conv2d(features[-1], init_channel, 3, padding=1)
for m in self.modules():
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def forward(self, x):
x = rearrange(x, 'b c -> b c () ()')
x = self.initial_conv(x)
x = F.normalize(x, dim=1)
residuals = dict()
for (res, (up, sle, attn)) in zip(self.res_layers, self.layers):
if exists(attn):
x = attn(x) + x
x = up(x)
if exists(sle):
out_res = self.sle_map[res]
residual = sle(x)
residuals[out_res] = residual
next_res = res + 1
if next_res in residuals:
x = x * residuals[next_res]
return self.out_conv(x)
class SimpleDecoder(nn.Module):
def __init__(
self,
*,
chan_in,
chan_out=3,
num_upsamples=4,
):
super().__init__()
self.layers = nn.ModuleList([])
final_chan = chan_out
chans = chan_in
for ind in range(num_upsamples):
last_layer = ind == (num_upsamples - 1)
chan_out = chans if not last_layer else final_chan * 2
layer = nn.Sequential(
upsample(),
nn.Conv2d(chans, chan_out, 3, padding=1),
nn.GLU(dim=1)
)
self.layers.append(layer)
chans //= 2
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class Discriminator(nn.Module):
def __init__(
self,
*,
image_size,
fmap_max=512,
fmap_inverse_coef=12,
transparent=False,
greyscale=False,
disc_output_size=5,
attn_res_layers=[]
):
super().__init__()
self.image_size = image_size
resolution = log2(image_size)
assert is_power_of_two(image_size), 'image size must be a power of 2'
assert disc_output_size in {1, 5}, 'discriminator output dimensions can only be 5x5 or 1x1'
resolution = int(resolution)
if transparent:
init_channel = 4
elif greyscale:
init_channel = 1
else:
init_channel = 3
num_non_residual_layers = max(0, int(resolution) - 8)
num_residual_layers = 8 - 3
non_residual_resolutions = range(min(8, resolution), 2, -1)
features = list(map(lambda n: (n, 2 ** (fmap_inverse_coef - n)), non_residual_resolutions))
features = list(map(lambda n: (n[0], min(n[1], fmap_max)), features))
if num_non_residual_layers == 0:
res, _ = features[0]
features[0] = (res, init_channel)
chan_in_out = list(zip(features[:-1], features[1:]))
self.non_residual_layers = nn.ModuleList([])
for ind in range(num_non_residual_layers):
first_layer = ind == 0
last_layer = ind == (num_non_residual_layers - 1)
chan_out = features[0][-1] if last_layer else init_channel
self.non_residual_layers.append(nn.Sequential(
Blur(),
nn.Conv2d(init_channel, chan_out, 4, stride=2, padding=1),
nn.LeakyReLU(0.1)
))
self.residual_layers = nn.ModuleList([])
for (res, ((_, chan_in), (_, chan_out))) in zip(non_residual_resolutions, chan_in_out):
attn = None
self.residual_layers.append(nn.ModuleList([
SumBranches([
nn.Sequential(
Blur(),
nn.Conv2d(chan_in, chan_out, 4, stride=2, padding=1),
nn.LeakyReLU(0.1),
nn.Conv2d(chan_out, chan_out, 3, padding=1),
nn.LeakyReLU(0.1)
),
nn.Sequential(
Blur(),
nn.AvgPool2d(2),
nn.Conv2d(chan_in, chan_out, 1),
nn.LeakyReLU(0.1),
)
]),
attn
]))
last_chan = features[-1][-1]
if disc_output_size == 5:
self.to_logits = nn.Sequential(
nn.Conv2d(last_chan, last_chan, 1),
nn.LeakyReLU(0.1),
nn.Conv2d(last_chan, 1, 4)
)
elif disc_output_size == 1:
self.to_logits = nn.Sequential(
Blur(),
nn.Conv2d(last_chan, last_chan, 3, stride=2, padding=1),
nn.LeakyReLU(0.1),
nn.Conv2d(last_chan, 1, 4)
)
self.to_shape_disc_out = nn.Sequential(
nn.Conv2d(init_channel, 64, 3, padding=1),
Residual(Rezero(GSA(dim=64, norm_queries=True, batch_norm=False))),
SumBranches([
nn.Sequential(
Blur(),
nn.Conv2d(64, 32, 4, stride=2, padding=1),
nn.LeakyReLU(0.1),
nn.Conv2d(32, 32, 3, padding=1),
nn.LeakyReLU(0.1)
),
nn.Sequential(
Blur(),
nn.AvgPool2d(2),
nn.Conv2d(64, 32, 1),
nn.LeakyReLU(0.1),
)
]),
Residual(Rezero(GSA(dim=32, norm_queries=True, batch_norm=False))),
nn.AdaptiveAvgPool2d((4, 4)),
nn.Conv2d(32, 1, 4)
)
self.decoder1 = SimpleDecoder(chan_in=last_chan, chan_out=init_channel)
self.decoder2 = SimpleDecoder(chan_in=features[-2][-1], chan_out=init_channel) if resolution >= 9 else None
for m in self.modules():
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def forward(self, x, calc_aux_loss=False):
orig_img = x
for layer in self.non_residual_layers:
x = layer(x)
layer_outputs = []
for (net, attn) in self.residual_layers:
if exists(attn):
x = attn(x) + x
x = net(x)
layer_outputs.append(x)
out = self.to_logits(x).flatten(1)
img_32x32 = F.interpolate(orig_img, size=(32, 32))
out_32x32 = self.to_shape_disc_out(img_32x32)
if not calc_aux_loss:
return out, out_32x32, None
# self-supervised auto-encoding loss
layer_8x8 = layer_outputs[-1]
layer_16x16 = layer_outputs[-2]
recon_img_8x8 = self.decoder1(layer_8x8)
aux_loss = F.mse_loss(
recon_img_8x8,
F.interpolate(orig_img, size=recon_img_8x8.shape[2:])
)
if exists(self.decoder2):
select_random_quadrant = lambda rand_quadrant, img: \
rearrange(img, 'b c (m h) (n w) -> (m n) b c h w', m=2, n=2)[rand_quadrant]
crop_image_fn = partial(select_random_quadrant, floor(random() * 4))
img_part, layer_16x16_part = map(crop_image_fn, (orig_img, layer_16x16))
recon_img_16x16 = self.decoder2(layer_16x16_part)
aux_loss_16x16 = F.mse_loss(
recon_img_16x16,
F.interpolate(img_part, size=recon_img_16x16.shape[2:])
)
aux_loss = aux_loss + aux_loss_16x16
return out, out_32x32, aux_loss
class LightweightGanDivergenceLoss(L.ConfigurableLoss):
def __init__(self, opt, env):
super().__init__(opt, env)
self.real = opt['real']
self.fake = opt['fake']
self.discriminator = opt['discriminator']
self.for_gen = opt['gen_loss']
self.gp_frequency = opt['gradient_penalty_frequency']
self.noise = opt['noise'] if 'noise' in opt.keys() else 0
# TODO: Implement generator top-k fractional loss compensation.
def forward(self, net, state):
real_input = state[self.real]
fake_input = state[self.fake]
if self.noise != 0:
fake_input = fake_input + torch.rand_like(fake_input) * self.noise
real_input = real_input + torch.rand_like(real_input) * self.noise
D = self.env['discriminators'][self.discriminator]
fake, fake32, _ = D(fake_input, detach=not self.for_gen)
if self.for_gen:
return fake.mean() + fake32.mean()
else:
real_input.requires_grad_() # <-- Needed to compute gradients on the input.
real, real32, real_aux = D(real_input, calc_aux_loss=True)
divergence_loss = hinge_loss(real, fake) + hinge_loss(real32, fake32) + real_aux
# Apply gradient penalty. TODO: migrate this elsewhere.
if self.env['step'] % self.gp_frequency == 0:
gp = gradient_penalty(real_input, real)
self.metrics.append(("gradient_penalty", gp.clone().detach()))
divergence_loss = divergence_loss + gp
real_input.requires_grad_(requires_grad=False)
return divergence_loss
@register_model
def register_lightweight_gan_g(opt_net, opt, other_nets):
gen = Generator(**opt_net['kwargs'])
if opt_get(opt_net, ['ema'], False):
following = other_nets[opt_net['following']]
return EMAWrapper(gen, following, opt_net['rate'])
return gen
@register_model
def register_lightweight_gan_d(opt_net, opt):
d = Discriminator(**opt_net['kwargs'])
if opt_net['aug']:
return AugWrapper(d, d.image_size, opt_net['aug_prob'], opt_net['aug_types'])
return d
if __name__ == '__main__':
g = Generator(image_size=256)
d = Discriminator(image_size=256)
j = torch.randn(1,256)
r = g(j)
a, b, c = d(r)
print(a.shape)
|
11490635
|
from pykdebugparser.kevent import Kevent
from pykdebugparser.trace_handlers.perf import CallstackFlag, KperfTiState, SamplerAction
def test_perf_event(traces_parser):
events = [
Kevent(timestamp=7006023115068,
data=(b'\t\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(9, 32, 0, 0), tid=1957, debugid=620756993, eventid=620756992, func_qualifier=1),
Kevent(timestamp=7006023115085,
data=(b'E\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(69, 5, 0, 0), tid=1957, debugid=620888088, eventid=620888088, func_qualifier=0),
Kevent(timestamp=7006023115105,
data=(b'\xf0[\xc0\xb5\x01\x00\x00\x00\xd4\xe4v\x93\x01\x00\x00\x000\x99\\\x02\x01\x00'
b'\x00\x00<\x0b\x16\xd1\x01\x00\x00\x00'),
values=(7344249840, 6769009876, 4334590256, 7802850108), tid=1957, debugid=620888080, eventid=620888080,
func_qualifier=0),
Kevent(timestamp=7006023115123,
data=(b'\xd4\xe6v\x93\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(6769010388, 0, 0, 0), tid=1957, debugid=620888080, eventid=620888080, func_qualifier=0),
Kevent(timestamp=7006023115140,
data=(b'\x95\x00\x00\x00\x00\x00\x00\x00\xa5\x07\x00\x00\x00\x00\x00\x00\x80\xb1\x94m\x01'
b'\x00\x00\x00\x03\x00\xfc\xff\x00\x00\x00\x00'),
values=(149, 1957, 6133428608, 4294705155), tid=1957, debugid=620822532, eventid=620822532,
func_qualifier=0),
Kevent(timestamp=7006023115153,
data=(b'\t\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(9, 0, 0, 0), tid=1957, debugid=620756994, eventid=620756992, func_qualifier=2)
]
ret = list(traces_parser.feed_generator(events))
assert len(ret) == 1
assert ret[0].sample_what == [SamplerAction.SAMPLER_TH_INFO, SamplerAction.SAMPLER_USTACK]
assert ret[0].actionid == 32
assert ret[0].th_info.pid == 149
assert ret[0].th_info.tid == 1957
assert ret[0].th_info.dq_addr == 0x16d94b180
assert ret[0].th_info.runmode == [KperfTiState.KPERF_TI_RUNNING, KperfTiState.KPERF_TI_RUNNABLE]
assert ret[0].cs_flags == [CallstackFlag.CALLSTACK_VALID, CallstackFlag.CALLSTACK_64BIT,
CallstackFlag.CALLSTACK_KERNEL_WORDS]
assert ret[0].cs_frames == [0x1b5c05bf0, 0x19376e4d4, 0x1025c9930, 0x1d1160b3c, 0x19376e6d4]
def test_perf_event_without_stack(traces_parser):
events = [
Kevent(timestamp=7006023115068,
data=(b'\t\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(9, 32, 0, 0), tid=1957, debugid=620756993, eventid=620756992, func_qualifier=1),
Kevent(timestamp=7006023115153,
data=(b'\t\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(9, 0, 0, 0), tid=1957, debugid=620756994, eventid=620756992, func_qualifier=2)
]
ret = list(traces_parser.feed_generator(events))
assert len(ret) == 1
assert ret[0].sample_what == [SamplerAction.SAMPLER_TH_INFO, SamplerAction.SAMPLER_USTACK]
assert ret[0].actionid == 32
assert ret[0].th_info is None
assert ret[0].cs_flags is None
assert ret[0].cs_frames is None
def test_thd_data(traces_parser):
events = [
Kevent(timestamp=15773877915,
data=(b'P\x00\x00\x00\x00\x00\x00\x00\x9d\x04\x00\x00\x00\x00\x00\x00\x00'
b'\xfa\x17\x05\x01\x00\x00\x00\x03\x00\xfc\xff\x00\x00\x00\x00'),
values=(80, 1181, 4380424704, 4294705155), tid=1181, debugid=620822532, eventid=620822532,
func_qualifier=0)
]
ret = list(traces_parser.feed_generator(events))
assert len(ret) == 1
thd_data = ret[0]
assert thd_data.pid == 80
assert thd_data.tid == 1181
assert thd_data.dq_addr == 0x10517fa00
assert thd_data.runmode == [KperfTiState.KPERF_TI_RUNNING, KperfTiState.KPERF_TI_RUNNABLE]
def test_thd_cswitch(traces_parser):
events = [
Kevent(timestamp=15779569737,
data=(b'`\x10\x00\x00\x00\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(4192, 80, 0, 0), tid=4192, debugid=620822548, eventid=620822548, func_qualifier=0)
]
ret = list(traces_parser.feed_generator(events))
assert len(ret) == 1
thd_cswitch = ret[0]
assert thd_cswitch.tid == 4192
assert thd_cswitch.pid == 80
def test_stk_udata(traces_parser):
events = [
Kevent(timestamp=15771902115,
data=(b'\x94\xec\x12\x93\x01\x00\x00\x00\xa8\xf8\x12\x93\x01\x00\x00\x008\x93'
b'\x13\x93\x01\x00\x00\x00\xa4\xa5\xbe\xd9\x01\x00\x00\x00'),
values=(6762458260, 6762461352, 6762500920, 7948117412), tid=7565, debugid=620888080, eventid=620888080,
func_qualifier=0)
]
ret = list(traces_parser.feed_generator(events))
assert len(ret) == 1
stk_udata = ret[0]
assert stk_udata.frames == [0x19312ec94, 0x19312f8a8, 0x193139338, 0x1d9bea5a4]
def test_stk_uhdr(traces_parser):
events = [
Kevent(timestamp=15772304192,
data=(b'E\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'),
values=(69, 7, 0, 0), tid=6206, debugid=620888088, eventid=620888088, func_qualifier=0)
]
ret = list(traces_parser.feed_generator(events))
assert len(ret) == 1
stk_uhdr = ret[0]
assert stk_uhdr.flags == [CallstackFlag.CALLSTACK_VALID, CallstackFlag.CALLSTACK_64BIT,
CallstackFlag.CALLSTACK_KERNEL_WORDS]
assert stk_uhdr.nframes == 7
|
11490682
|
from lint import Linter
class Puppet(Linter):
language = 'puppet'
cmd = ('puppet', 'parser', 'validate', '--color=false')
regex = r'^([^:]+:){2}\s*(?P<error>(Syntax error at|Could not match) \'?(?P<near>[^ ]*?)\'?.*) at [^:]*:(?P<line>\d+)$'
def run(self, cmd, code):
return self.tmpfile(cmd, code, suffix='.puppet')
|
11490805
|
from pgopttune.config.config import Config
class TuneConfig(Config):
def __init__(self, conf_path, section='turning'):
super().__init__(conf_path)
self.config_dict = dict(self.config.items(section))
@property
def study_name(self):
return self.get_parameter_value('study_name')
@property
def required_recovery_time_second(self):
return self.get_parameter_value('required_recovery_time_second')
@property
def benchmark(self):
return self.get_parameter_value('benchmark')
@property
def parameter_json_dir(self):
return self.get_parameter_value('parameter_json_dir')
@property
def number_trail(self):
return int(self.get_parameter_value('number_trail'))
@property
def data_load_interval(self):
return int(self.get_parameter_value('data_load_interval'))
@property
def warm_up_interval(self):
return int(self.get_parameter_value('warm_up_interval'))
@property
def sample_mode(self):
return self.get_parameter_value('sample_mode')
@property
def debug(self):
return self.get_parameter_value('debug')
@property
def save_study_history(self):
return self.get_parameter_value('save_study_history')
@property
def load_study_history(self):
return self.get_parameter_value('load_study_history')
@property
def history_database_url(self):
return self.get_parameter_value('history_database_url')
|
11490807
|
from haystack import indexes
from mozdns.txt.models import TXT
from mozdns.mozdns_index import MozdnsIndex
class TXTIndex(MozdnsIndex, indexes.Indexable):
txt_data = indexes.CharField(model_attr='txt_data')
def get_model(self):
return TXT
|
11490819
|
from urllib.parse import quote
from bddrest import status, response, when, Given
import yhttp
def test_rewrite_nodefault():
log = []
foo = yhttp.Application()
bar = yhttp.Application()
app = yhttp.Rewrite()
app.route(r'/foo/?', r'/', foo)
app.route(r'/bar/?', r'/', bar)
@app.when
def endresponse(app):
log.append('app endresponse')
@foo.route()
@yhttp.statuscode('201 Created')
def get(req):
return 'foo'
@bar.route()
def get(req):
return 'bar'
with Given(app):
assert status == 404
when('/qux')
assert status == 404
when('/foo')
assert status == 201
assert response.text == 'foo'
when('/bar')
assert status == 200
assert response.text == 'bar'
assert log == [
'app endresponse',
'app endresponse',
]
def test_rewrite_default():
root = yhttp.Application()
foo = yhttp.Application()
app = yhttp.Rewrite(default=root)
app.route(r'/foo/?(.*)', r'/\1', foo)
@root.route()
def get(req):
return 'root'
@foo.route()
@yhttp.statuscode('201 Created')
def get(req):
resp = 'foo'
if req.query:
qs = ', '.join(f'{k}={v}' for k, v in req.query.items())
resp += f' qs: {qs}'
return resp
with Given(app):
assert status == 200
assert response.text == 'root'
when('/qux')
assert status == 404
when('/foo?bar=baz')
assert status == 201
assert response.text == 'foo qs: bar=baz'
when('/foo')
assert status == 201
assert response.text == 'foo'
when('/foo?bar=baz')
assert status == 201
assert response.text == 'foo qs: bar=baz'
def test_rewrite_hooks():
log = []
root = yhttp.Application()
foo = yhttp.Application()
app = yhttp.Rewrite(default=root)
app.route(r'/foo/?(.*)', r'/\1', foo)
@app.when
def ready(app):
log.append('app ready')
@root.when
def ready(app):
log.append('root ready')
@foo.when
def ready(app):
log.append('foo ready')
@root.when
def endresponse(app):
log.append('root endresponse')
@foo.when
def endresponse(app):
log.append('foo endresponse')
@app.when
def shutdown(app):
log.append('app shutdown')
@root.when
def shutdown(app):
log.append('root shutdown')
@foo.when
def shutdown(app):
log.append('foo shutdown')
@root.route()
def get(req):
return 'root'
@foo.route()
@yhttp.statuscode('201 Created')
def get(req):
return 'foo'
app.ready()
with Given(app):
assert status == 200
assert response.text == 'root'
when('/foo')
assert status == 201
assert response.text == 'foo'
when('/bar')
assert status == 404
app.shutdown()
assert log == [
'foo ready',
'root ready',
'app ready',
'root endresponse',
'foo endresponse',
'root endresponse',
'foo shutdown',
'root shutdown',
'app shutdown',
]
def test_rewrite_encodedurl():
root = yhttp.Application()
foo = yhttp.Application()
app = yhttp.Rewrite(default=root)
app.route(r'/foo/?(.*)', r'/\1', foo)
@root.route()
def get(req):
return 'root'
@foo.route(r'/(.+)')
@yhttp.statuscode('201 Created')
def get(req, arg):
resp = f'foo: {arg}'
if req.query:
qs = ', '.join(f'{k}={v}' for k, v in req.query.items())
resp += f' qs: {qs}'
return resp
with Given(app):
assert status == 200
assert response.text == 'root'
when('/foo/bar')
assert status == 201
assert response.text == 'foo: bar'
when(quote('/foo/الف'))
assert status == 201
assert response.text == 'foo: الف'
when(quote('/foo/الف?a=ابجد'))
assert status == 201
assert response.text == 'foo: الف?a=ابجد'
|
11490820
|
from __future__ import print_function
from functools import reduce
import re
import numpy as np
from keras.preprocessing.sequence import pad_sequences
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y)
QFILE = {1: 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_train.txt',
2: 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_train.txt',
3: 'tasks_1-20_v1-2/en-10k/qa3_three-supporting-facts_train.txt',
4: 'tasks_1-20_v1-2/en-10k/qa4_two-arg-relations_train.txt',
5: 'tasks_1-20_v1-2/en-10k/qa5_three-arg-relations_train.txt',
6: 'tasks_1-20_v1-2/en-10k/qa6_yes-no-questions_train.txt',
7: 'tasks_1-20_v1-2/en-10k/qa7_counting_train.txt',
8: 'tasks_1-20_v1-2/en-10k/qa8_lists-sets_train.txt',
9: 'tasks_1-20_v1-2/en-10k/qa9_simple-negation_train.txt',
10: 'tasks_1-20_v1-2/en-10k/qa10_indefinite-knowledge_train.txt',
11: 'tasks_1-20_v1-2/en-10k/qa11_basic-coreference_train.txt',
12: 'tasks_1-20_v1-2/en-10k/qa12_conjunction_train.txt',
13: 'tasks_1-20_v1-2/en-10k/qa13_compound-coreference_train.txt',
14: 'tasks_1-20_v1-2/en-10k/qa14_time-reasoning_train.txt',
15: 'tasks_1-20_v1-2/en-10k/qa15_basic-deduction_train.txt',
16: 'tasks_1-20_v1-2/en-10k/qa16_basic-induction_train.txt',
17: 'tasks_1-20_v1-2/en-10k/qa17_positional-reasoning_train.txt',
18: 'tasks_1-20_v1-2/en-10k/qa18_size-reasoning_train.txt',
19: 'tasks_1-20_v1-2/en-10k/qa19_path-finding_train.txt',
20: 'tasks_1-20_v1-2/en-10k/qa20_agents-motivations_train.txt'}
|
11490835
|
from collections import OrderedDict
from wazimap.data.tables import get_model_from_fields, get_datatable
from wazimap.data.utils import (get_session, add_metadata, ratio, merge_dicts, group_remainder,
get_stat_data, get_objects_by_geo, percent)
from wazimap.geo import geo_data
PROFILE_SECTIONS = (
"demographics",
"hospitals",
"schools",
"ecd_centres",
"ecd_educators",
"ecd_budgets",
"households",
"service_delivery"
)
ECD_AGE_CATEGORIES = {
'0': '0-2',
'1': '0-2',
'2': '0-2',
'3': '3-5',
'4': '3-5',
'5': '3-5',
'6': '6',
}
TYPE_OF_DWELLING_RECODE = {
'House or brick/concrete block structure on a separate stand or yard or on a farm': 'House',
'Traditional dwelling/hut/structure made of traditional materials': 'Traditional',
'Flat or apartment in a block of flats': 'Apartment',
'Cluster house in complex': 'Cluster house',
'Townhouse (semi-detached house in a complex)': 'Townhouse',
'Semi-detached house': 'Semi-detached house',
'House/flat/room in backyard': 'Backyard in flat',
'Informal dwelling (shack; in backyard)': 'Shack',
'Informal dwelling (shack; not in backyard; e.g. in an informal/squatter settlement or on a farm)': 'Shack',
'Room/flatlet on a property or larger dwelling/servants quarters/granny flat': 'Room or flatlet',
'Caravan/tent': 'Caravan/tent',
'Other': 'Other',
'Unspecified': 'Unspecified',
'Not applicable': 'N/A',
}
SHORT_WATER_SOURCE_CATEGORIES = {
"Regional/local water scheme (operated by municipality or other water services provider)": "Service provider",
"Water tanker": "Tanker",
"Spring": "Spring",
"Other": "Other",
"Dam/pool/stagnant water": "Dam",
"River/stream": "River",
"Not applicable": "N/A",
"Borehole": "Borehole",
"Rain water tank": "Rainwater tank",
"Water vendor": "Vendor",
}
COLLAPSED_TOILET_CATEGORIES = {
"Flush toilet (connected to sewerage system)": "Flush toilet",
"Flush toilet (with septic tank)": "Flush toilet",
"Chemical toilet": "Chemical toilet",
"Pit toilet with ventilation (VIP)": "Pit toilet",
"Pit toilet without ventilation": "Pit toilet",
"Bucket toilet": "Bucket toilet",
"Other": "Other",
"None": "None",
"Unspecified": "Unspecified",
"Not applicable": "N/A",
}
def get_profile(geo, profile_name, request):
session = get_session()
try:
comp_geos = geo_data.get_comparative_geos(geo)
data = {}
sections = list(PROFILE_SECTIONS)
if geo.geo_level not in ['country', 'province', 'municipality']:
pass
# Raise error as we don't have this data
for section in sections:
function_name = 'get_%s_profile' % section
if function_name in globals():
func = globals()[function_name]
data[section] = func(geo, session)
# get profiles for province and/or country
for comp_geo in comp_geos:
# merge summary profile into current geo profile
merge_dicts(data[section], func(comp_geo, session), comp_geo.geo_level)
group_remainder(data['households']['type_of_dwelling_distribution'], 5)
group_remainder(data['service_delivery']['water_source_distribution'], 5)
group_remainder(data['service_delivery']['toilet_facilities_distribution'], 5)
return data
finally:
session.close()
def get_demographics_profile(geo, session):
# population group
pop_dist_data, total_pop = get_stat_data(
['population group'], geo, session, table_dataset='Census 2011')
ecd_age_groups, ecd_children = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=['0', '1', '2', '3', '4', '5'],
recode=ECD_AGE_CATEGORIES)
ecd_gender, total_ecd_gender = get_stat_data(
['gender'], geo, session,
table_name='genderunder9')
women_child_bearing_age, total_women_child_bearing_age = get_stat_data(
['age groups in 5 years'], geo, session,
table_name='womenagegroupsin5years15to44',
order_by='age groups in 5 years'
)
final_data = {
'total_population': {
"name": "People",
"values": {"this": total_pop}
},
'ecd_age_groups': ecd_age_groups,
'ecd_children': {
"name": "Children 5 years and younger",
"values": {"this": ecd_children}
},
'ecd_gender': ecd_gender,
'women_child_bearing_age': women_child_bearing_age,
'total_women_child_bearing_age': {
"name": "Women between the age of 15-44 years",
"values": {"this": total_women_child_bearing_age}
}
}
if geo.square_kms:
final_data['population_density'] = {
'name': "people per square kilometre",
'values': {"this": total_pop / geo.square_kms}
}
final_data['child_population_density'] = {
'name': 'Children (0-5 years) per square kilometre',
'values': {"this": ecd_children / geo.square_kms}
}
ecd_pop_density = OrderedDict()
for k, age_group in ecd_age_groups.iteritems():
if k != 'metadata':
ecd_pop_density[age_group['name']] = {
"name": age_group['name'],
"values": {
"this": age_group['numerators']['this'] / geo.square_kms
},
"numerators": {
"this": age_group['numerators']['this']
}
}
ecd_pop_density['metadata'] = ecd_age_groups['metadata']
final_data['ecd_pop_density'] = ecd_pop_density
return final_data
def get_schools_profile(geo, session):
# population group
_, total_pop = get_stat_data(
['population group'], geo, session, table_dataset='Census 2011')
# Schools
table = get_datatable('schools_2015')
keys = ['primary_schools', 'combined_schools', 'intermediate_schools', 'secondary_schools']
school_breakdown, total_schools = table.get_stat_data(
geo, keys, percent=False)
primary_school_ages = ['6', '7', '8', '9', '10', '11', '12', '13']
secondary_school_ages = ['14', '15', '16', '17', '18']
_, total_primary_children = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=primary_school_ages)
_, total_secondary_children = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=secondary_school_ages)
children_per_primary_school = ratio(total_primary_children, school_breakdown['primary_schools']['values']['this'])
children_per_secondary_school = ratio(total_secondary_children, school_breakdown['secondary_schools']['values']['this'])
final_data = {
'total_schools': {
"name": "Schools",
"values": {"this": total_schools}
},
"school_breakdown": school_breakdown,
"children_per_primary_school": {
"name": "Children (6-13 years) in the area for each primary school",
"values": {"this": children_per_primary_school}
},
"children_per_secondary_school": {
"name": "Children (14-18 years) for each secondary school",
"values": {"this": children_per_secondary_school}
}
}
return final_data
def get_ecd_centres_profile(geo, session):
children_age_groups, total_children = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=['3', '4', '5', '6'],
recode=ECD_AGE_CATEGORIES,
percent=False,
key_order=['0-2', '3-5', '6-7'])
children_3_to_5 = children_age_groups['3-5']['values']['this']
# This will not be needed when the column names for centres are changed.
reg_recode = {
'registration_incomplete-access_denied': 'Registration incomplete',
'registration_incomplete-closed': 'Registration incomplete',
'registration_incomplete-not_found': 'Registration incomplete',
}
table = get_datatable('ecd_centres_by_registration')
ecd_centres_by_registration, total_ecd_centres = table.get_stat_data(
geo, percent=True, recode=reg_recode)
table = get_datatable('ecd_children_enrolled')
children_enrolled, _ = table.get_stat_data(
geo, percent=False)
children_enrolled['children_enrolled_age_3_to_5']['name'] = 'Children enrolled in ECD centres'
children_3_to_5_coverage = percent(
children_enrolled['children_enrolled_age_3_to_5']['values']['this'],
children_3_to_5)
children_3_to_5_per_ecd_centre = ratio(
children_3_to_5,
total_ecd_centres)
children_3_to_5_per_ecd_centre_enrolled = ratio(
children_enrolled['children_enrolled_age_3_to_5']['values']['this'],
total_ecd_centres)
table = get_datatable('ecd_centres_by_type')
ecd_centres_by_type, _ = table.get_stat_data(
geo,
key_order=['community_based', 'home_based', 'school_based', 'other', 'not_specified'])
table = get_datatable('ecd_grade_r')
grade_r, _ = table.get_stat_data(
geo, percent=False)
grade_r['centres_with_grade_r_learners']['name'] = "Centres with Grade R learners"
# Currently there's no data available for these datapoints.
# They are displayed in the template to promote this fact.
registered_ecd_programmes = {
"name": "Registered ECD programmes",
"values": {"this": None},
}
children_in_ecd_programmes = {
"name": "Children in programmes",
"values": {"this": None},
}
children_in_play_groups = {
"name": "Children in play groups",
"values": {"this": None},
}
children_grade_r_age = {
"name": "Children of Grade R age (6 years)",
"values": {"this": children_age_groups['6']['values']['this']}
}
schools_with_grade_r_learners = {
"name": "Schools with Grade R learners",
"values": {"this": None}
}
return {
"total_ecd_centres": {
"name": "Number of ECD centres",
"values": {"this": total_ecd_centres}
},
"ecd_centres_by_registration": ecd_centres_by_registration,
"ecd_centres_by_type": ecd_centres_by_type,
"registered_ecd_programmes": registered_ecd_programmes,
"children_enrolled_age_3_to_5": children_enrolled['children_enrolled_age_3_to_5'],
"children_3_to_5_coverage": {
"name": "Children living in the area who are enrolled in ECD centres. (Children enrolled in centres / Children living in the area)",
"values": {"this": children_3_to_5_coverage}
},
"children_3_to_5_per_ecd_centre": {
"name": "Average number of children living in the area for each ECD centre",
"values": {"this": children_3_to_5_per_ecd_centre}
},
"children_3_to_5_per_ecd_centre_enrolled": {
"name": "Average number of children enrolled in each ECD centre",
"values": {"this": children_3_to_5_per_ecd_centre_enrolled}
},
"children_in_ecd_programmes": children_in_ecd_programmes,
"children_in_play_groups": children_in_play_groups,
"children_grade_r_age": children_grade_r_age,
"ecd_centres_with_grade_r_learners": grade_r['centres_with_grade_r_learners'],
"schools_with_grade_r_learners": schools_with_grade_r_learners
}
def get_ecd_educators_profile(geo, session):
# These values will be filled as information becomes available.
table = get_datatable('ecd_educators')
ecd_educators, _ = table.get_stat_data(
geo, percent=False)
table = get_datatable('ecd_children_enrolled')
children_enrolled, _ = table.get_stat_data(
geo, percent=False)
children_per_practitioner = ratio(
children_enrolled['children_enrolled_age_3_to_5']['values']['this'],
ecd_educators['practitioners_for_ages_3_to_5']['values']['this'])
_, children_age_3_to_5_in_area = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=['3', '4', '5'],
recode=ECD_AGE_CATEGORIES)
return {
"children_per_practitioner": {
"name": "Number of children enrolled in centres for each practitioner",
"values": {"this": children_per_practitioner}
},
"children_per_trained_practitioner": {
"name": "Number of children enrolled in centres for each trained practitioner *",
"values": {"this": None}
},
"children_per_untrained_practitioner": {
"name": "Number of children enrolled in centres for each untrained practitioner *",
"values": {"this": None}
},
"practitioners_for_ages_3_to_5": {
"name": "Number of practitioners in the area for children aged 3-5.",
"values": {"this": ecd_educators['practitioners_for_ages_3_to_5']['values']['this']}
},
'children_age_3_to_5_in_area': {
"name": "Children (age 3-5) living in the area",
"values": {"this": children_age_3_to_5_in_area}
},
}
def get_ecd_budgets_profile(geo, session):
table = get_datatable('ecd_grants')
ecd_grants, _ = table.get_stat_data(
geo, percent=False)
# http://www.gov.za/services/child-care-social-benefits/child-support-grant
monthly_csg = 350.00
csg = ecd_grants['child_support_grant']['values']['this']
child_support_grants = {
"name": "Learners in centres receiving child support grants",
"values": {"this": csg}
}
child_support_grants_amount = {
"name": "Approximate monthly amount paid as child support grants to children in ECD centres. (Learners in centres receiving grants x Child support grant amount *)",
"values": {"this": csg * monthly_csg}
}
# These values will be filled as information becomes available.
ecd_subsidies_budgeted = {
"name": "Amount budgeted for early learning subsidies",
"values": {"this": None}
}
ecd_subsidies_paid = {
"name": "Amount paid for early learning subsidies",
"values": {"this": None}
}
children_receiving_subsidy = {
"name": "Children receiving an early learning subsidy",
"values": {"this": None}
}
return {
"ecd_subsidies_budgeted": ecd_subsidies_budgeted,
"ecd_subsidies_paid": ecd_subsidies_paid,
"child_support_grants": child_support_grants,
"child_support_grants_amount": child_support_grants_amount,
"children_receiving_subsidy": children_receiving_subsidy
}
def get_hospitals_profile(geo, session):
# population group
_, total_pop = get_stat_data(
['population group'], geo, session, table_dataset='Census 2011')
# Hospitals
table = get_datatable('hospitals_2012')
keys = ['regional_hospital', 'central_hospital', 'district_hospital', 'clinic', 'chc']
hospital_breakdown, total_hospitals = table.get_stat_data(
geo, keys, percent=False,
recode={'chc': 'Community health centre'})
people_per_hospital = ratio(total_pop, total_hospitals)
_, ecd_children = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=['0', '1', '2', '3', '4', '5'])
children_0_to_5_per_hospital = ratio(ecd_children, total_hospitals)
return {
"total_hospitals": {
"name": "Hospitals / Clinics",
"values": {"this": total_hospitals}
},
"hospital_breakdown": hospital_breakdown,
"people_per_hospital": {
"name": "People living in the area for each hospital / clinic",
"values": {"this": people_per_hospital}
},
"children_0_to_5_per_hospital": {
"name": "Children (aged 0-5 years) living in the area for each hospital / clinic",
"values": {"this": children_0_to_5_per_hospital}
},
}
def get_households_profile(geo, session):
# head of household
# gender
head_gender_dist, total_households = get_stat_data(
['gender of household head'], geo, session,
order_by='gender of household head')
female_heads = head_gender_dist['Female']['numerators']['this']
# age
db_model_u18 = get_model_from_fields(
['gender of head of household'], geo.geo_level,
table_name='genderofheadofhouseholdunder18'
)
objects = get_objects_by_geo(db_model_u18, geo, session)
total_under_18 = float(sum(o[0] for o in objects))
# type of dwelling
type_of_dwelling_dist, _ = get_stat_data(
['type of dwelling'], geo, session,
recode=TYPE_OF_DWELLING_RECODE,
order_by='-total')
informal = type_of_dwelling_dist['Shack']['numerators']['this']
_, total_ecd_children = get_stat_data(
['age in completed years'], geo, session,
table_name='ageincompletedyears',
only=['0', '1', '2', '3', '4', '5'])
ecd_children_per_household = ratio(total_ecd_children, total_households)
return {
'total_households': {
'name': 'Households',
'values': {'this': total_households},
},
'type_of_dwelling_distribution': type_of_dwelling_dist,
'informal': {
'name': 'Households that are informal dwellings (shacks)',
'values': {'this': percent(informal, total_households)},
'numerators': {'this': informal},
},
'head_of_household': {
'gender_distribution': head_gender_dist,
'female': {
'name': 'Households with women as their head',
'values': {'this': percent(female_heads, total_households)},
'numerators': {'this': female_heads},
},
'under_18': {
'name': 'Households with heads under 18 years old',
'values': {'this': total_under_18},
}
},
'ecd_children_per_household': {
'name': 'Average number of children (aged 0-5) in each household',
'values': {'this': ecd_children_per_household},
},
}
def get_service_delivery_profile(geo, session):
# water source
water_src_data, total_wsrc = get_stat_data(
['source of water'], geo, session,
recode=SHORT_WATER_SOURCE_CATEGORIES,
order_by='-total')
if 'Service provider' in water_src_data:
total_water_sp = water_src_data['Service provider']['numerators']['this']
else:
total_water_sp = 0.0
# electricity
elec_attrs = ['electricity for cooking',
'electricity for heating',
'electricity for lighting']
db_model_elec = get_model_from_fields(elec_attrs, geo.geo_level)
objects = get_objects_by_geo(db_model_elec, geo, session)
total_elec = 0.0
total_some_elec = 0.0
elec_access_data = {
'total_all_elec': {
"name": "Have electricity for everything",
"numerators": {"this": 0.0},
},
'total_some_not_all_elec': {
"name": "Have electricity for some things",
"numerators": {"this": 0.0},
},
'total_no_elec': {
"name": "No electricity",
"numerators": {"this": 0.0},
}
}
for obj in objects:
total_elec += obj.total
has_some = False
has_all = True
for attr in elec_attrs:
val = not getattr(obj, attr).startswith('no ')
has_all = has_all and val
has_some = has_some or val
if has_some:
total_some_elec += obj.total
if has_all:
elec_access_data['total_all_elec']['numerators']['this'] += obj.total
elif has_some:
elec_access_data['total_some_not_all_elec']['numerators']['this'] += obj.total
else:
elec_access_data['total_no_elec']['numerators']['this'] += obj.total
for data, total in zip((elec_access_data,), (total_elec,)):
for fields in data.values():
fields["values"] = {"this": percent(fields["numerators"]["this"], total)}
add_metadata(elec_access_data, db_model_elec)
# toilets
toilet_data, total_toilet = get_stat_data(
['toilet facilities'], geo, session,
exclude_zero=True,
recode=COLLAPSED_TOILET_CATEGORIES,
order_by='-total')
total_flush_toilet = 0.0
total_no_toilet = 0.0
for key, data in toilet_data.iteritems():
if key.startswith('Flush') or key.startswith('Chemical'):
total_flush_toilet += data['numerators']['this']
if key == 'None':
total_no_toilet += data['numerators']['this']
return {
'water_source_distribution': water_src_data,
'percentage_water_from_service_provider': {
"name": "Are getting water from a regional or local service provider",
"numerators": {"this": total_water_sp},
"values": {"this": percent(total_water_sp, total_wsrc)},
},
'percentage_electricity_access': {
"name": "Have electricity for at least one of cooking, heating or lighting",
"numerators": {"this": total_some_elec},
"values": {"this": percent(total_some_elec, total_elec)},
},
'electricity_access_distribution': elec_access_data,
'percentage_flush_toilet_access': {
"name": "Have access to flush or chemical toilets",
"numerators": {"this": total_flush_toilet},
"values": {"this": percent(total_flush_toilet, total_toilet)},
},
'percentage_no_toilet_access': {
"name": "Have no access to any toilets",
"numerators": {"this": total_no_toilet},
"values": {"this": percent(total_no_toilet, total_toilet)},
},
'toilet_facilities_distribution': toilet_data,
}
|
11490856
|
import dt
import unittest
import numpy as np
# ----------------------------------------------------------------------------
# SYMMETRY
# ----------------------------------------------------------------------------
class TestDT(unittest.TestCase):
def test_identity(self):
"""Assert that equal potentials are already ground truth"""
x = np.ones((10,13))
xs,i = dt.compute(x)
self.assertTrue((xs == x).all())
def test_independence(self):
"""Assert the the order of transforms along axes does not matter"""
x = np.random.standard_normal((10,11,3))
xs1,i1 = dt.compute(x, axes=(0,1,2))
xs2,i2 = dt.compute(x, axes=(2,1,0))
self.assertTrue(np.linalg.norm(xs1 - xs2) == 0.0)
self.assertTrue(np.linalg.norm(i1[0] - i2[2]) == 0.0)
def test_distance_cost(self):
"""Assert that the minimum solution cost > the minimum potential"""
for n in xrange(100):
x = np.random.standard_normal((9,13))
xs,i = dt.compute(x)
self.assertTrue(xs.min() >= x.min())
|
11490858
|
import os
from pathlib import Path
DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("EQUALITY_ROOT", "~/.equality/mainnet"))).resolve()
|
11490880
|
import copy
import rdtest
import renderdoc as rd
from typing import Tuple
class GL_Shader_Editing(rdtest.TestCase):
demos_test_name = 'GL_Shader_Editing'
def check_capture(self):
eid = self.find_action("fixedprog").eventId
self.controller.SetFrameEvent(eid, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
fixedrefl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
eid = self.find_action("dynamicprog").eventId
self.controller.SetFrameEvent(eid, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
dynamicrefl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
vsrefl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Vertex)
eid = self.find_action("sepprog").eventId
self.controller.SetFrameEvent(eid, False)
vsseprefl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Vertex)
fsseprefl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
# Work at the last action, where the uniforms have been trashed
self.controller.SetFrameEvent(self.get_last_action().eventId, False)
tex: rd.ResourceId = pipe.GetOutputTargets()[0].resourceId
# On upper row: Left triangle is fully green, right triangle is half-green
# On lower row: Left triangle is fully green
self.check_pixel_value(tex, 0.25, 0.25, [0.0, 1.0, 0.0, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.0, 0.5, 0.0, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.0, 1.0, 0.0, 1.0])
rdtest.log.success("Values are as expected initially")
source: bytes = fixedrefl.rawBytes.replace(b'.rgba', b'.rgga').replace(b'location = 9', b'location = 10')
newShader: Tuple[rd.ResourceId, str] = self.controller.BuildTargetShader(fixedrefl.entryPoint,
fixedrefl.encoding, source,
rd.ShaderCompileFlags(),
rd.ShaderStage.Fragment)
if len(newShader[1]) != 0:
raise rdtest.TestFailureException("Failed to compile edited shader: {}".format(newShader[1]))
fixedFS = newShader[0]
source: bytes = dynamicrefl.rawBytes.replace(b'.rgba', b'.rgga').replace(b'#if 1', b'#if 0')
newShader: Tuple[rd.ResourceId, str] = self.controller.BuildTargetShader(dynamicrefl.entryPoint,
dynamicrefl.encoding, source,
rd.ShaderCompileFlags(),
rd.ShaderStage.Fragment)
if len(newShader[1]) != 0:
raise rdtest.TestFailureException("Failed to compile edited shader: {}".format(newShader[1]))
dynamicFS = newShader[0]
source: bytes = vsrefl.rawBytes.replace(b'Position.xyz', b'Position.xyz+vec3(1.0)')
newShader: Tuple[rd.ResourceId, str] = self.controller.BuildTargetShader(vsrefl.entryPoint,
vsrefl.encoding, source,
rd.ShaderCompileFlags(),
rd.ShaderStage.Vertex)
if len(newShader[1]) != 0:
raise rdtest.TestFailureException("Failed to compile edited shader: {}".format(newShader[1]))
offsetVS = newShader[0]
source: bytes = vsrefl.rawBytes
newShader: Tuple[rd.ResourceId, str] = self.controller.BuildTargetShader(vsrefl.entryPoint,
vsrefl.encoding, source,
rd.ShaderCompileFlags(),
rd.ShaderStage.Vertex)
if len(newShader[1]) != 0:
raise rdtest.TestFailureException("Failed to compile edited shader: {}".format(newShader[1]))
nochangeVS = newShader[0]
source: bytes = vsseprefl.rawBytes.replace(b'Position.xyz', b'Position.xyz+vec3(1.0)')
newShader: Tuple[rd.ResourceId, str] = self.controller.BuildTargetShader(vsseprefl.entryPoint,
vsseprefl.encoding, source,
rd.ShaderCompileFlags(),
rd.ShaderStage.Vertex)
if len(newShader[1]) != 0:
raise rdtest.TestFailureException("Failed to compile edited shader: {}".format(newShader[1]))
sepVS = newShader[0]
source: bytes = fsseprefl.rawBytes.replace(b'.rgba', b'.rgga')
newShader: Tuple[rd.ResourceId, str] = self.controller.BuildTargetShader(fsseprefl.entryPoint,
fsseprefl.encoding, source,
rd.ShaderCompileFlags(),
rd.ShaderStage.Fragment)
if len(newShader[1]) != 0:
raise rdtest.TestFailureException("Failed to compile edited shader: {}".format(newShader[1]))
sepFS = newShader[0]
# Edit both fragment shaders
self.controller.ReplaceResource(fixedrefl.resourceId, fixedFS)
self.controller.ReplaceResource(dynamicrefl.resourceId, dynamicFS)
# Refresh the replay if it didn't happen already
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Triangles have green propagated across to the blue channel
self.check_pixel_value(tex, 0.25, 0.25, [0.0, 1.0, 1.0, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.0, 0.5, 0.5, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.0, 1.0, 1.0, 1.0])
rdtest.log.success("Values are as expected after fragment editing")
# Now "edit" the VS but don't change it. We should still get the same values
self.controller.ReplaceResource(vsrefl.resourceId, nochangeVS)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Triangles have green propagated across to the blue channel
self.check_pixel_value(tex, 0.25, 0.25, [0.0, 1.0, 1.0, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.0, 0.5, 0.5, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.0, 1.0, 1.0, 1.0])
rdtest.log.success("Values are as expected after no-op vertex editing")
# Change the VS to one that has offset the triangles off-centre
self.controller.ReplaceResource(vsrefl.resourceId, offsetVS)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Original sample positions are now the clear color
self.check_pixel_value(tex, 0.25, 0.25, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.2, 0.2, 0.2, 1.0])
# The triangles are still the same colour but up and to the right
self.check_pixel_value(tex, 0.45, 0.05, [0.0, 1.0, 1.0, 1.0])
self.check_pixel_value(tex, 0.95, 0.05, [0.0, 0.5, 0.5, 1.0])
self.check_pixel_value(tex, 0.45, 0.55, [0.0, 1.0, 1.0, 1.0])
rdtest.log.success("Values are as expected after offset vertex editing")
# Now undo the first FS edit
self.controller.RemoveReplacement(fixedrefl.resourceId)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Original sample positions are still the clear color
self.check_pixel_value(tex, 0.25, 0.25, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.2, 0.2, 0.2, 1.0])
# The lower triangle is the edited colour, the other two have reverted to green channel only
self.check_pixel_value(tex, 0.45, 0.05, [0.0, 1.0, 0.0, 1.0])
self.check_pixel_value(tex, 0.95, 0.05, [0.0, 0.5, 0.0, 1.0])
self.check_pixel_value(tex, 0.45, 0.55, [0.0, 1.0, 1.0, 1.0])
rdtest.log.success("Values are as expected after removing first fragment edit")
# Now undo the first VS edit
self.controller.RemoveReplacement(vsrefl.resourceId)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Only the lower triangle is the edited colour, but they are back in the original positions
self.check_pixel_value(tex, 0.25, 0.25, [0.0, 1.0, 0.0, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.0, 0.5, 0.0, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.0, 1.0, 1.0, 1.0])
rdtest.log.success("Values are as expected after removing vertex edit")
# finally undo the second FS edit
self.controller.RemoveReplacement(dynamicrefl.resourceId)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# We should be back to where we started
self.check_pixel_value(tex, 0.25, 0.25, [0.0, 1.0, 0.0, 1.0])
self.check_pixel_value(tex, 0.75, 0.25, [0.0, 0.5, 0.0, 1.0])
self.check_pixel_value(tex, 0.25, 0.75, [0.0, 1.0, 0.0, 1.0])
rdtest.log.success("Values are as expected after removing all edits")
rdtest.log.success("Linked program editing succeeded")
# Check that we can edit separable shaders
# Only looking at bottom left triangle, it should be green
self.check_pixel_value(tex, 0.75, 0.75, [0.0, 1.0, 0.0, 1.0])
self.controller.ReplaceResource(fsseprefl.resourceId, sepFS)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Now it should be green-blue
self.check_pixel_value(tex, 0.75, 0.75, [0.0, 1.0, 1.0, 1.0])
self.controller.ReplaceResource(vsseprefl.resourceId, sepVS)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Now it should be green-blue and offset
self.check_pixel_value(tex, 0.75, 0.75, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(tex, 0.95, 0.55, [0.0, 1.0, 1.0, 1.0])
self.controller.RemoveReplacement(fsseprefl.resourceId)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# Now it should be back to green and offset
self.check_pixel_value(tex, 0.75, 0.75, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(tex, 0.95, 0.55, [0.0, 1.0, 0.0, 1.0])
self.controller.RemoveReplacement(vsseprefl.resourceId)
self.controller.SetFrameEvent(self.get_last_action().eventId, True)
# We should be back to where we started
self.check_pixel_value(tex, 0.75, 0.75, [0.0, 1.0, 0.0, 1.0])
rdtest.log.success("Separable program editing succeeded")
self.controller.FreeTargetResource(nochangeVS)
self.controller.FreeTargetResource(offsetVS)
self.controller.FreeTargetResource(fixedFS)
self.controller.FreeTargetResource(dynamicFS)
self.controller.FreeTargetResource(sepVS)
self.controller.FreeTargetResource(sepFS)
|
11490909
|
import os
import cv2
import numpy as np
import depthai
import json
from lane_detection import Lanes
base = "/home/satinders/Documents/personal projects/deepway/depthai/resources/nn"
class DepthAi:
max_z = 6
min_z = 0
max_x = 1.3
min_x = -0.5
def __init__(self):
# self.lanes = Lanes()
self.device = depthai.Device('', False)
json_path = os.path.join(base, "mobilenet-ssd/mobilenet-ssd_depth.json")
config = {
"streams": ["metaout", "previewout"],
"ai": {
"calc_dist_to_bb": True,
"blob_file": os.path.join(base, "mobilenet-ssd/mobilenet-ssd.blob"),
"blob_file_config": json_path
}
}
with open(json_path, 'r') as f:
json_file = json.load(f)
self.labels = list(json_file['mappings']['labels'])
self.p = self.device.create_pipeline(config=config)
self.entries = []
# print(self.p)
# print(config)
def translate_x(self, val):
norm = min(self.max_x, max(val, self.min_x))
position = (norm - self.min_x) / (self.max_x - self.min_x) * 256
return position
def translate_y(self, val):
norm = min(self.max_z, max(val, self.min_z))
position = (1 - (norm - self.min_z) / (self.max_z - self.min_z)) * 256
return position
def run(self):
while 1:
net_packets, data_packets = self.p.get_available_nnet_and_data_packets()
for net_packet in net_packets:
self.entries = []
for e in net_packet.entries():
if e[0]['id'] == -1 or e[0]['confidence'] == 0.0:
break
if e[0]['confidence'] > 0.5:
self.entries.append(e[0])
for packet in data_packets:
if packet.stream_name == "previewout":
data = packet.getData()
blue, green, red = data[0, :, :], data[1, :, :], data[2, :, :]
frame = cv2.merge([blue, green, red])
img_h, img_w = frame.shape[:2]
results = []
for e in self.entries:
pt1 = int(e['left'] * img_w), int(e['top'] * img_h)
pt2 = int(e['right'] * img_w), int(e['bottom'] * img_h)
label = self.labels[int(e['label'])]
distance_x = self.translate_x(e['distance_x'])
distance_y = self.translate_y(e['distance_z'])
results.append((pt1, pt2, label, distance_x, distance_y))
yield frame, results
def __del__(self):
del self.p
del self.device
if __name__ == '__main__':
obj = DepthAi()
lanes = Lanes()
for frame, results in obj.run():
label_object_mapping = {}
for *_, label, x, y in results:
if label not in label_object_mapping:
label_object_mapping[label] = []
label_object_mapping[label].append([x, y])
lanes.get_lanes_prediction(frame, label_object_mapping, True)
for pt1, pt2, label, dist_x, dist_z in results:
cv2.rectangle(frame, pt1, pt2, (0, 255, 0), 2)
cv2.putText(frame, str(label), pt1, cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)
cv2.imshow("preview", frame)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
del obj
|
11490916
|
from globibot.lib.plugin import Plugin
from globibot.lib.decorators import command
from globibot.lib.helpers import parsing as p
from globibot.lib.helpers import formatting as f
from globibot.lib.helpers.hooks import master_only
from functools import reduce
from .permissions import permission_names, PERMISSION_NAMES
from discord import ChannelType, Game
class Meta(Plugin):
@command(
p.string('!permissions') + p.bind(p.maybe(p.channel), 'channel_id'),
master_only
)
async def permissions(self, message, channel_id=None):
if channel_id is None:
channel_id = message.channel.id
channel = next(
channel for channel in message.server.channels
if channel.id == str(channel_id)
)
perms = [' - {}'.format(perm) for perm in self.permissions_in(channel)]
await self.send_message(
message.channel,
'In {}, I can:\n{}'
.format(f.channel(channel_id), f.code_block(perms)),
delete_after = 30
)
@command(p.string('!channels'))
async def channels(self, message):
channels = [
(channel.name, channel.id)
for channel in message.server.channels
]
await self.send_message(
message.channel,
f.code_block(f.format_sql_rows(channels)),
delete_after = 30
)
@command(
p.string('!where-can-you') + p.bind(p.word, 'what'), master_only
)
async def where_can_you(self, message, what):
perm_name = PERMISSION_NAMES[what]
permissions = [
(channel, self.permissions_in(channel))
for channel in message.server.channels
]
where = [
channel.id for channel, perms in permissions
if perm_name in perms and channel.type == ChannelType.text
]
await self.send_message(
message.channel,
'On this server, I can `{}` {}'
.format(
PERMISSION_NAMES[what],
('in ' + ' '.join(map(f.channel, where))) if where else '`nowhere`'
),
delete_after = 30
)
@command(p.string('!status'), master_only)
async def status(self, message):
status = message.clean_content[len('!status'):].strip()
await self.bot.change_status(Game(name=status))
@command(p.string('!name'), master_only)
async def name(self, message):
name = message.clean_content[len('!name'):].strip()
await self.bot.edit_profile(username=name)
def permissions_in(self, channel):
member = channel.server.get_member(self.bot.user.id)
standard_perms = channel.permissions_for(member)
overwrites = [channel.overwrites_for(role) for role in member.roles]
sets = [permission_names(perms) for perms in [standard_perms] + overwrites]
return reduce(set.union, sets)
|
11490939
|
import pymysql.cursors
conexion = pymysql.connect(host='192.168.56.2',
user='pepito',
password='<PASSWORD>',
db='test',
cursorclass=pymysql.cursors.DictCursor)
try:
with conexion.cursor() as cursor:
sql = "INSERT INTO `users` (`email`, `password`) VALUES (%s, %s)"
cursor.execute(sql, ('<EMAIL>', 'super-secreto'))
conexion.commit()
with conexion.cursor() as cursor:
sql = "SELECT `id`, `password` FROM `users` WHERE `email`=%s"
cursor.execute(sql, ('<EMAIL>',))
resultado = cursor.fetchone()
print(resultado)
finally:
conexion.close()
|
11490951
|
from mher.samplers.sampler import RandomSampler
from mher.samplers.her_sampler import HER_Sampler
from mher.samplers.nstep_sampler import Nstep_Sampler, Nstep_HER_Sampler
from mher.samplers.prioritized_sampler import PrioritizedSampler, PrioritizedHERSampler
|
11490959
|
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.binance.types import PairSymbolType
from cryptoxlib.clients.binance.exceptions import BinanceException
def map_pair(pair: Pair) -> str:
return f"{pair.base}{pair.quote}"
def map_ws_pair(pair: Pair) -> str:
return map_pair(pair).lower()
def extract_symbol(symbol: PairSymbolType) -> str:
if isinstance(symbol, str):
return symbol
elif isinstance(symbol, Pair):
return map_pair(symbol)
raise BinanceException(f"Symbol [{symbol}] is neither string not Pair.")
def extract_ws_symbol(symbol: PairSymbolType) -> str:
return extract_symbol(symbol).lower()
|
11490978
|
from datetime import date
from typing import Any
from seedwork.domain.entities import Entity
from seedwork.domain.value_objects import Currency, UUID
from modules.catalog.domain.rules import ListingPriceMustBeGreaterThanZero
from .value_objects import ListingStatus
class Listing(Entity):
title: str
description: str
price: Currency
seller_id: UUID
status = ListingStatus.DRAFT
def change_main_attributes(self, title: str, description: str, price: Currency):
self.title = title
self.description = description
self.price = price
def publish(self):
self.status = ListingStatus.PUBLISHED
class Seller(Entity):
id: UUID
is_new: bool = True
currently_published_listings_count: int = 0
def publish_listing(self, listing):
self.check_rule(ListingPriceMustBeGreaterThanZero(price=listing.price))
# self.check_rule(ListingMustBeInDraftState(listing.status))
# self.check_rule(SellerMustBeEligibleForAddingNextListing(self))
listing.publish()
|
11491003
|
def get_cell_value(cell):
return type(lambda: 0)(
(lambda x: lambda: x)(0).func_code, {}, None, None, (cell,)
)()
# longer and more verbose version:
import new
def get_cell_value(cell):
def make_closure_that_returns_value(use_this_value):
def closure_that_returns_value():
return use_this_value
return closure_that_returns_value
dummy_function = make_closure_that_returns_value(0)
dummy_function_code = dummy_function.func_code
our_function = new.function(dummy_function_code, {}, None, None, (cell,))
value_from_cell = our_function()
return value_from_cell
# examples
>>> def make_list_appender(mylist):
... def append_to_mylist(newvalue):
... mylist.append(newvalue)
... return newvalue
... return append_to_mylist
...
>>> somelist = []
>>> somelist_appender = make_list_appender(somelist)
>>> somelist_appender(2)
2
>>> somelist_appender(3)
3
>>> somelist
[2, 3]
>>> somelist_appender
<function append_to_mylist at 0xb7df556c>
>>> somelist_appender.func_closure
(<cell at 0xb7e1d38c: list object at 0xb7e0f26c>,)
>>> cell = somelist_appender.func_closure[0]
>>> get_cell_value(cell)
[2, 3]
>>> get_cell_value(cell) is somelist
True
|
11491023
|
import json
from tornado.ioloop import IOLoop
from appscale.admin.constants import CONTROLLER_STATE_NODE
class ControllerState(object):
""" Keeps track of the latest controller state. """
def __init__(self, zk_client):
""" Creates a new ControllerState object.
Args:
zk_client: A KazooClient.
"""
self.options = None
zk_client.DataWatch(CONTROLLER_STATE_NODE, self._controller_state_watch)
def _update_controller_state(self, encoded_controller_state):
""" Handles updates to controller state.
Args:
encoded_controller_state: A JSON-encoded string containing controller
state.
"""
if not encoded_controller_state:
return
controller_state = json.loads(encoded_controller_state)
self.options = controller_state.get('@options')
def _controller_state_watch(self, encoded_controller_state, _):
""" Handles updates to controller state.
Args:
encoded_controller_state: A JSON-encoded string containing controller
state.
"""
IOLoop.instance().add_callback(self._update_controller_state,
encoded_controller_state)
|
11491047
|
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Literal
from .typing import ImplicitDict, StringBasedDateTime
import s2sphere
TIME_FORMAT_CODE = 'RFC3339'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
EARTH_CIRCUMFERENCE_M = 40.075e6
API_0_3_5 = '0.3.5'
API_0_3_17 = '0.3.17'
# In Both
SCOPE_SC = 'utm.strategic_coordination'
SCOPE_CM = 'utm.constraint_management'
# In 0.3.5
SCOPE_CI = 'utm.constraint_consumption'
# In 0.3.17
SCOPE_CP = 'utm.constraint_processing'
SCOPE_CM_SA = 'utm.conformance_monitoring_sa'
SCOPE_AA = 'utm.availability_arbitration'
NO_OVN_PHRASES = {'', 'Available from USS'}
def make_vol4(
t0: Optional[datetime] = None,
t1: Optional[datetime] = None,
alt0: Optional[float] = None,
alt1: Optional[float] = None,
circle: Dict = None,
polygon: Dict = None) -> Dict:
vol3 = dict()
if circle is not None:
vol3['outline_circle'] = circle
if polygon is not None:
vol3['outline_polygon'] = polygon
if alt0 is not None:
vol3['altitude_lower'] = make_altitude(alt0)
if alt1 is not None:
vol3['altitude_upper'] = make_altitude(alt1)
vol4 = {'volume': vol3}
if t0 is not None:
vol4['time_start'] = make_time(t0)
if t1 is not None:
vol4['time_end'] = make_time(t1)
return vol4
def make_time(t: datetime) -> Dict:
return {
'value': t.isoformat() + 'Z',
'format': 'RFC3339'
}
def make_altitude(alt: float) -> Dict:
return {
'value': alt,
'reference': 'W84',
'units': 'M'
}
def make_circle(lat: float, lng: float, radius: float) -> Dict:
return {
"center": {
"lat": lat,
"lng": lng,
},
"radius": {
"value": radius,
"units": "M"
}
}
def make_polygon(coords: List[Tuple[float, float]]=None, latlngrect: s2sphere.LatLngRect=None) -> Dict:
if coords is not None:
return {
"vertices": [ {'lat': lat, 'lng': lng} for (lat, lng) in coords]
}
return {
"vertices": [
{'lat': latlngrect.lat_lo().degrees, 'lng': latlngrect.lng_lo().degrees},
{'lat': latlngrect.lat_lo().degrees, 'lng': latlngrect.lng_hi().degrees},
{'lat': latlngrect.lat_hi().degrees, 'lng': latlngrect.lng_hi().degrees},
{'lat': latlngrect.lat_hi().degrees, 'lng': latlngrect.lng_lo().degrees},
]
}
def latitude_degrees(distance_meters: float) -> float:
return 360 * distance_meters / EARTH_CIRCUMFERENCE_M
def parse_time(time: Dict) -> datetime:
t_str = time['value']
if t_str[-1] == 'Z':
t_str = t_str[0:-1]
return datetime.fromisoformat(t_str)
def start_of(vol4s: List[Dict]) -> datetime:
return min([parse_time(vol4['time_start']) for vol4 in vol4s])
def offset_time(vol4s: List[Dict], dt: timedelta) -> List[Dict]:
for vol4 in vol4s:
vol4['time_start'] = make_time(parse_time(vol4['time_start']) + dt)
vol4['time_end'] = make_time(parse_time(vol4['time_end']) + dt)
return vol4s
class Subscription(dict):
@property
def valid(self) -> bool:
if self.version is None:
return False
return True
@property
def version(self) -> Optional[int]:
return self.get('version', None)
################################################################################
#################### Start of ASTM-standard definitions #####################
#################### interfaces/astm-utm/Protocol/utm.yaml #####################
################################################################################
class LatLngPoint(ImplicitDict):
'''A class to hold information about a location as Latitude / Longitude pair '''
lat: float
lng: float
class Radius(ImplicitDict):
''' A class to hold the radius of a circle for the outline_circle object '''
value: float
units: str
class Polygon(ImplicitDict):
''' A class to hold the polygon object, used in the outline_polygon of the Volume3D object '''
vertices: List[LatLngPoint] # A minimum of three LatLngPoints are required
class Circle(ImplicitDict):
''' A class the details of a circle object used in the outline_circle object '''
center: LatLngPoint
radius: Radius
class Altitude(ImplicitDict):
''' A class to hold altitude information '''
value:float
reference:Literal['W84']
units: str
class Time(ImplicitDict):
''' A class to hold Time details '''
value: StringBasedDateTime
format:Literal['RFC3339']
class Volume3D(ImplicitDict):
'''A class to hold Volume3D objects '''
outline_circle: Circle
outline_polygon: Polygon
altitude_lower: Altitude
altitude_upper: Altitude
class Volume4D(ImplicitDict):
'''A class to hold Volume4D objects '''
volume: Volume3D
time_start: Time
time_end: Time
################################################################################
#################### End of ASTM-standard definitions #####################
#################### interfaces/astm-utm/Protocol/utm.yaml #####################
################################################################################
|
11491059
|
from openprocurement.api.utils import get_now, parse_date
from openprocurement.tender.core.tests.base import change_auth
from openprocurement.api.constants import RELEASE_2020_04_19
from openprocurement.tender.core.utils import calculate_tender_date, calculate_complaint_business_date
from openprocurement.tender.core.constants import ALP_MILESTONE_REASONS
from copy import deepcopy
from datetime import timedelta
from mock import patch
class TenderQualificationMilestone24HMixin(object):
docservice = True
context_name = "qualification" # can be also "award"
initial_bids_tokens = {}
context_id = None
tender_id = None
tender_token = None
app = None
def setUp(self):
super(TenderQualificationMilestone24HMixin, self).setUp()
if self.context_name == "qualification":
response = self.app.get("/tenders/{}/qualifications".format(self.tender_id))
self.assertEqual(response.content_type, "application/json")
qualifications = response.json["data"]
self.context_id = qualifications[0]["id"]
else:
self.context_id = self.award_id
def test_24hours_milestone(self):
self.app.authorization = ("Basic", ("broker", ""))
# try upload documents
response = self.app.get("/tenders/{}".format(self.tender_id))
context = response.json["data"]["{}s".format(self.context_name)][0]
bid_id = context.get("bid_id") or context.get("bidID") # awards and qualifications developed on different days
winner_token = self.initial_bids_tokens[bid_id]
upload_allowed_by_default = response.json["data"]["procurementMethodType"] in \
("aboveThresholdUA.defense", "simple.defense")
self.assert_upload_docs_status(bid_id, winner_token, success=upload_allowed_by_default)
# invalid creation
response = self.app.post_json(
"/tenders/{}/{}s/{}/milestones".format(self.tender_id, self.context_name, self.context_id),
{
"data": {}
},
status=403
)
self.assertEqual(
response.json,
{"status": "error", "errors": [{"location": "url", "name": "permission", "description": "Forbidden"}]}
)
response = self.app.post_json(
"/tenders/{}/{}s/{}/milestones?acc_token={}".format(
self.tender_id,
self.context_name,
self.context_id,
self.tender_token
),
{
"data": {
"code": "alp"
}
},
status=403
)
if get_now() > RELEASE_2020_04_19:
self.assertEqual(
response.json,
{"status": "error", "errors": [{"description": "The only allowed milestone code is '24h'",
"location": "body", "name": "data"}]}
)
else:
self.assertEqual(
response.json,
{"status": "error", "errors": [{"location": "body", "name": "data", "description": "Forbidden"}]}
)
return
# valid creation
request_data = {
"code": "24h",
"description": "One ring to bring them all and in the darkness bind them",
"dueDate": (get_now() + timedelta(days=10)).isoformat()
}
response = self.app.post_json(
"/tenders/{}/{}s/{}/milestones?acc_token={}".format(
self.tender_id, self.context_name, self.context_id, self.tender_token
),
{"data": request_data},
)
self.assertEqual(response.status, "201 Created")
created_milestone = response.json["data"]
# get milestone from tender
response = self.app.get("/tenders/{}".format(self.tender_id))
tender_data = response.json["data"]
context = tender_data["{}s".format(self.context_name)][0]
public_milestone = context["milestones"][0]
self.assertEqual(created_milestone, public_milestone)
self.assertEqual(
set(created_milestone.keys()),
{
"id",
"date",
"code",
"description",
"dueDate",
}
)
self.assertEqual(created_milestone["code"], request_data["code"])
self.assertEqual(created_milestone["description"], request_data["description"])
self.assertNotEqual(created_milestone["dueDate"], request_data["dueDate"])
expected_date = calculate_tender_date(
parse_date(created_milestone["date"]),
timedelta(hours=24),
tender_data
)
self.assertEqual(created_milestone["dueDate"], expected_date.isoformat())
# get milestone by its direct link
response = self.app.get("/tenders/{}/{}s/{}/milestones/{}".format(
self.tender_id, self.context_name, self.context_id, created_milestone["id"]
))
direct_milestone = response.json["data"]
self.assertEqual(created_milestone, direct_milestone)
# can't post another
response = self.app.post_json(
"/tenders/{}/{}s/{}/milestones?acc_token={}".format(
self.tender_id, self.context_name, self.context_id, self.tender_token
),
{"data": request_data},
status=422
)
self.assertEqual(
response.json,
{"status": "error", "errors": [{"description": [
{"milestones": ["There can be only one '24h' milestone"]}],
"location": "body", "name": "{}s".format(self.context_name)}]}
)
# can't update status of context until dueDate
activation_data = {"status": "active", "qualified": True, "eligible": True}
response = self.app.patch_json(
"/tenders/{}/{}s/{}?acc_token={}".format(
self.tender_id, self.context_name, self.context_id, self.tender_token
),
{"data": activation_data},
status=403
)
self.assertEqual(
response.json,
{
"status": "error", "errors": [
{
"description": "Can't change status to 'active' "
"until milestone.dueDate: {}".format(created_milestone["dueDate"]),
"location": "body", "name": "data"
}]
}
)
# try upload documents
self.assert_upload_docs_status(bid_id, winner_token)
# wait until milestone dueDate ends
with patch("openprocurement.tender.core.procedure.validation.get_now", lambda: get_now() + timedelta(hours=24)):
with patch("openprocurement.tender.core.validation.get_now", lambda: get_now() + timedelta(hours=24)):
self.assert_upload_docs_status(bid_id, winner_token, success=upload_allowed_by_default)
response = self.app.patch_json(
"/tenders/{}/{}s/{}?acc_token={}".format(
self.tender_id, self.context_name, self.context_id, self.tender_token
),
{"data": activation_data},
status=200
)
self.assertEqual(response.json["data"]["status"], "active")
# check appending milestone at active qualification status
# remove milestone to skip "only one" validator
tender = self.db.get(self.tender_id)
context = tender["{}s".format(self.context_name)][0]
context["milestones"] = []
self.db.save(tender)
response = self.app.post_json(
"/tenders/{}/{}s/{}/milestones?acc_token={}".format(
self.tender_id, self.context_name, self.context_id, self.tender_token
),
{"data": request_data},
status=403
)
self.assertEqual(
response.json,
{"status": "error", "errors": [
{"description": "Not allowed in current 'active' {} status".format(self.context_name),
"location": "body", "name": "data"}]}
)
def assert_upload_docs_status(self, bid_id, bid_token, success=True):
document = {
"title": "name.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
}
response = self.app.post_json(
"/tenders/{}/bids/{}/documents?acc_token={}".format(
self.tender_id, bid_id, bid_token),
{"data": document},
status=201 if success else 403
)
if success: #
document["title"] = "ham.jpeg"
self.app.put_json(
"/tenders/{}/bids/{}/documents/{}?acc_token={}".format(
self.tender_id, bid_id, response.json["data"]["id"], bid_token),
{"data": document},
)
self.app.patch_json(
"/tenders/{}/bids/{}/documents/{}?acc_token={}".format(
self.tender_id, bid_id, response.json["data"]["id"], bid_token),
{"data": {"title": "spam.doc"}},
status=200 if success else 403
)
class TenderQualificationMilestoneALPMixin(object):
docservice = True
initial_status = "active.auction"
initial_bids_tokens = {}
context_id = None
tender_id = None
tender_token = None
app = None
def setUp(self):
more_bids = 4 - len(self.initial_bids)
if more_bids > 0:
self.initial_bids = deepcopy(self.initial_bids) + deepcopy(self.initial_bids)[:more_bids]
self.initial_bids[0]["value"]["amount"] = 400
self.initial_bids[1]["value"]["amount"] = 425
self.initial_bids[2]["value"]["amount"] = 450
self.initial_bids[3]["value"]["amount"] = 500
self.assertEqual(len(self.initial_bids), 4)
super(TenderQualificationMilestoneALPMixin, self).setUp()
tender = self.db.get(self.tender_id)
for b in tender["bids"]:
b["status"] = "active"
for l in b["lotValues"]:
if "status" in l:
l["status"] = "active" # in case they were "pending" #openeu
self.db.save(tender)
def test_milestone(self):
"""
test alp milestone is created in two cases
1. amount less by >=40% than mean of amount before auction
2. amount less by >=30% than the next amount
:return:
"""
# sending auction results
auction_results = [
{
"id": b["id"],
"lotValues": [{"relatedLot": l["relatedLot"], "value": l["value"]} for l in b["lotValues"]]
} for b in self.initial_bids
]
if self.initial_lots:
auction_results[0]["lotValues"][0]["value"]["amount"] = 200 # only 1 case
auction_results[1]["lotValues"][0]["value"]["amount"] = 201 # both 1 and 2 case
auction_results[2]["lotValues"][0]["value"]["amount"] = 350 # only 2 case
auction_results[3]["lotValues"][0]["value"]["amount"] = 500 # no milestones
else:
auction_results[0]["value"]["amount"] = 29 # only 1 case
auction_results[1]["value"]["amount"] = 30 # both 1 and 2 case
auction_results[2]["value"]["amount"] = 350 # only 2 case
auction_results[3]["value"]["amount"] = 500 # no milestones
with change_auth(self.app, ("Basic", ("auction", ""))):
if self.initial_lots:
lot_id = self.initial_lots[0]["id"]
for l in self.initial_lots:
response = self.app.post_json(
f"/tenders/{self.tender_id}/auction/{l['id']}",
{"data": {"bids": auction_results}},
status=200
)
else:
lot_id = None
response = self.app.post_json(
f"/tenders/{self.tender_id}",
{"data": {"bids": auction_results}},
status=200
)
tender = response.json["data"]
self.assertEqual("active.qualification", tender["status"])
self.assertGreater(len(tender["awards"]), 0)
for a in response.json["data"]["awards"]:
if a["status"] == "pending" and a.get("lotID") == lot_id:
award = a
break
bid_id = award["bid_id"]
self.assertEqual(bid_id, auction_results[0]["id"])
if get_now() < RELEASE_2020_04_19:
return self.assertEqual(len(award.get("milestones", [])), 0)
# check that a milestone's been created
self.assertEqual(len(award.get("milestones", [])), 1)
milestone = award["milestones"][0]
self.assertEqual(milestone["code"], "alp")
self.assertEqual(milestone["description"], ALP_MILESTONE_REASONS[0])
# try to change award status
unsuccessful_data = {"status": "unsuccessful"}
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(
self.tender_id, award["id"], self.tender_token
),
{"data": unsuccessful_data},
status=403
)
tender = self.db.get(self.tender_id)
expected_due_date = calculate_complaint_business_date(
parse_date(milestone["date"]),
timedelta(days=1),
tender,
working_days=True,
)
self.assertEqual(
response.json,
{
'status': 'error', 'errors': [{
'description': "Can't change status to 'unsuccessful' until milestone.dueDate: {}".format(
expected_due_date.isoformat()
),
'location': 'body', 'name': 'data'
}]
}
)
# try to post/put/patch docs
for doc_type in ["evidence", None]:
self._test_doc_upload(
tender["procurementMethodType"], doc_type,
bid_id, self.initial_bids_tokens[bid_id], expected_due_date
)
# setting "dueDate" to now
self.wait_until_award_milestone_due_date()
# after milestone dueDate tender owner can change award status
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(
self.tender_id, a["id"], self.tender_token
),
{"data": unsuccessful_data},
status=200
)
self.assertEqual(response.json["data"]["status"], "unsuccessful")
# check second award
response = self.app.get(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
status=200
)
self.assertGreater(len(response.json["data"]), 1)
for a in response.json["data"]:
if a["status"] == "pending" and a.get("lotID") == lot_id:
second_award = a
break
self.assertEqual(len(second_award.get("milestones", [])), 1)
self.assertEqual(second_award["milestones"][0]["description"], " / ".join(ALP_MILESTONE_REASONS))
# proceed to the third award
self.wait_until_award_milestone_due_date()
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(
self.tender_id, second_award["id"], self.tender_token
),
{"data": unsuccessful_data},
status=200
)
self.assertEqual(response.json["data"]["status"], "unsuccessful")
# checking 3rd award
response = self.app.get(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
status=200
)
for a in response.json["data"]:
if a["status"] == "pending" and a.get("lotID") == lot_id:
third_award = a
break
self.assertEqual(len(third_award.get("milestones", [])), 1)
self.assertEqual(third_award["milestones"][0]["description"], ALP_MILESTONE_REASONS[1])
# proceed to the last award
self.wait_until_award_milestone_due_date()
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(
self.tender_id, third_award["id"], self.tender_token
),
{"data": unsuccessful_data},
status=200
)
self.assertEqual(response.json["data"]["status"], "unsuccessful")
# checking last award
response = self.app.get(
"/tenders/{}/awards?acc_token={}".format(self.tender_id, self.tender_token),
status=200
)
for a in response.json["data"]:
if a["status"] == "pending" and a.get("lotID") == lot_id:
last_award = a
break
self.assertNotIn("milestones", last_award)
def wait_until_award_milestone_due_date(self):
tender = self.db.get(self.tender_id)
for a in tender["awards"]:
if a.get("milestones"):
a["milestones"][0]["dueDate"] = get_now().isoformat()
self.db.save(tender)
def _test_doc_upload(self, procurement_method, doc_type, bid_id, bid_token, due_date):
"""
expected that post/patch/put of docs is allowed during the period
"""
response = self.app.post_json(
"/tenders/{}/bids/{}/documents?acc_token={}".format(
self.tender_id, bid_id, bid_token),
{"data": {
"title": "lorem.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
"documentType": doc_type
}},
status=201
)
document = response.json["data"]
if doc_type is not None:
self.assertEqual(document["documentType"], doc_type)
else:
self.assertNotIn("documentType", document)
response = self.app.put_json(
"/tenders/{}/bids/{}/documents/{}?acc_token={}".format(
self.tender_id, bid_id, document["id"], bid_token),
{"data": {
"title": "lorem(1).doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
"documentType": doc_type,
}},
status=200
)
document = response.json["data"]
self.assertEqual(document["title"], "lorem(1).doc")
if doc_type is not None:
self.assertEqual(document["documentType"], doc_type)
else:
self.assertNotIn("documentType", document)
response = self.app.patch_json(
"/tenders/{}/bids/{}/documents/{}?acc_token={}".format(
self.tender_id, bid_id, document["id"], bid_token),
{"data": {"title": "Spam.json"}},
status=200
)
document = response.json["data"]
self.assertEqual(document["title"], "Spam.json")
if doc_type is not None:
self.assertEqual(document["documentType"], doc_type)
else:
self.assertNotIn("documentType", document)
# can't post docs after milestone dueDate (except closeFrameworkAgreementUA)
if procurement_method == "closeFrameworkAgreementUA":
return
with patch("openprocurement.tender.core.validation.get_now", lambda: due_date + timedelta(seconds=1)):
with patch("openprocurement.tender.core.procedure.validation.get_now",
lambda: due_date + timedelta(seconds=1)):
self.app.post_json(
"/tenders/{}/bids/{}/documents?acc_token={}".format(
self.tender_id, bid_id, bid_token),
{"data": {
"title": "lorem.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
"documentType": doc_type
}},
status=403
)
self.app.put_json(
"/tenders/{}/bids/{}/documents/{}?acc_token={}".format(
self.tender_id, bid_id, document["id"], bid_token),
{"data": {
"title": "lorem(5).doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
"documentType": doc_type
}},
status=403
)
self.app.patch_json(
"/tenders/{}/bids/{}/documents/{}?acc_token={}".format(
self.tender_id, bid_id, document["id"], bid_token),
{"data": {"title": "Spam(3).json"}},
status=403
)
|
11491066
|
import math
def harmonic_wavefunction(x, N):
psi=[0, (math.pi)**(-1./4)*math.exp(-x**2/2)]
for n in range(2,N):
psi+=[math.sqrt(2./n)*x*psi[n-1]-math.sqrt((n-1)/n)*psi[n-2], ]
return psi
|
11491069
|
import pytest
from django.core.urlresolvers import reverse
from wunderhabit import views
from wunderhabit import default
from .utils import get_user
from .utils import mock_messages
from wunderlist.tests.utils import mock_wunderlist_api
from wh_habitica.tests.utils import mock_habitica_api
@pytest.mark.usefixtures('mock_messages', 'mock_wunderlist_api', 'mock_habitica_api')
@pytest.mark.django_db
def test_successfully_authenticated(rf):
request = rf.get(reverse('test_authentication'))
request.user = get_user()
response = views.test_authentication(request)
assert request._messages.messages[0] == default.MESSAGE_AUTH_SUCCESS
assert response.url == reverse('dashboard')
|
11491089
|
import pytest
import mimetypes
def verify_file(gb_api, httpserver, filename, custom_filename, content_type, expected_content_type):
if not content_type:
# guess content type
content_type = mimetypes.guess_type(filename)[0]
httpserver.serve_content(content=open(filename, 'rb').read(),
headers={'content-type': content_type})
# format url
file_url = httpserver.url + '/' + custom_filename
# add url
assert gb_api.add_url(file_url) == True
payload = {}
payload.update({'showerrors': '1'})
result = gb_api.search('url:' + file_url, payload)
assert len(result['results']) == 1
assert result['results'][0]['contentType'] == expected_content_type
@pytest.mark.parametrize('filename, custom_filename, content_type, expected_content_type', [
('src/example_cpp.cpp', 'example_cpp.cpp', 'text/x-c++src', 'text'),
('src/example_cpp.cpp', 'example_plain.cpp', 'text/plain', 'text'),
('src/example_cpp.cpp', 'example_audio.cpp', 'audio/3gpp', ''),
])
def test_file_cpp(gb_api, httpserver, filename, custom_filename, content_type, expected_content_type):
verify_file(gb_api, httpserver, 'data/' + filename, custom_filename, content_type, expected_content_type)
|
11491100
|
import tensorflow as tf
import numpy as np
#import os, sys, inspect
from datetime import datetime
import EmotionDetectorUtils
"""
lib_path = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
sys.path.insert(0, lib_path)
"""
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "EmotionDetector/", "Path to data files")
tf.flags.DEFINE_string("logs_dir", "logs/EmotionDetector_logs/", "Path to where log files are to be saved")
tf.flags.DEFINE_string("mode", "train", "mode: train (Default)/ test")
BATCH_SIZE = 128
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 1001
REGULARIZATION = 1e-2
IMAGE_SIZE = 48
NUM_LABELS = 7
VALIDATION_PERCENT = 0.1
def add_to_regularization_loss(W, b):
tf.add_to_collection("losses", tf.nn.l2_loss(W))
tf.add_to_collection("losses", tf.nn.l2_loss(b))
def weight_variable(shape, stddev=0.02, name=None):
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], \
strides=[1, 2, 2, 1], padding="SAME")
def emotion_cnn(dataset):
with tf.name_scope("conv1") as scope:
#W_conv1 = weight_variable([5, 5, 1, 32])
#b_conv1 = bias_variable([32])
tf.summary.histogram("W_conv1", weights['wc1'])
tf.summary.histogram("b_conv1", biases['bc1'])
conv_1 = tf.nn.conv2d(dataset, weights['wc1'],\
strides=[1, 1, 1, 1], padding="SAME")
h_conv1 = tf.nn.bias_add(conv_1, biases['bc1'])
#h_conv1 = conv2d_basic(dataset, W_conv1, b_conv1)
h_1 = tf.nn.relu(h_conv1)
h_pool1 = max_pool_2x2(h_1)
add_to_regularization_loss(weights['wc1'], biases['bc1'])
with tf.name_scope("conv2") as scope:
#W_conv2 = weight_variable([3, 3, 32, 64])
#b_conv2 = bias_variable([64])
tf.summary.histogram("W_conv2", weights['wc2'])
tf.summary.histogram("b_conv2", biases['bc2'])
conv_2 = tf.nn.conv2d(h_pool1, weights['wc2'], strides=[1, 1, 1, 1], padding="SAME")
h_conv2 = tf.nn.bias_add(conv_2, biases['bc2'])
#h_conv2 = conv2d_basic(h_pool1, weights['wc2'], biases['bc2'])
h_2 = tf.nn.relu(h_conv2)
h_pool2 = max_pool_2x2(h_2)
add_to_regularization_loss(weights['wc2'], biases['bc2'])
with tf.name_scope("fc_1") as scope:
prob = 0.5
image_size = IMAGE_SIZE / 4
h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
#W_fc1 = weight_variable([image_size * image_size * 64, 256])
#b_fc1 = bias_variable([256])
tf.summary.histogram("W_fc1", weights['wf1'])
tf.summary.histogram("b_fc1", biases['bf1'])
h_fc1 = tf.nn.relu(tf.matmul(h_flat, weights['wf1']) + biases['bf1'])
h_fc1_dropout = tf.nn.dropout(h_fc1, prob)
with tf.name_scope("fc_2") as scope:
#W_fc2 = weight_variable([256, NUM_LABELS])
#b_fc2 = bias_variable([NUM_LABELS])
tf.summary.histogram("W_fc2", weights['wf2'])
tf.summary.histogram("b_fc2", biases['bf2'])
#pred = tf.matmul(h_fc1, weights['wf2']) + biases['bf2']
pred = tf.matmul(h_fc1_dropout, weights['wf2']) + biases['bf2']
return pred
weights = {
'wc1': weight_variable([5, 5, 1, 32], name="W_conv1"),
'wc2': weight_variable([3, 3, 32, 64],name="W_conv2"),
'wf1': weight_variable([(IMAGE_SIZE / 4) * (IMAGE_SIZE / 4) * 64, 256],name="W_fc1"),
'wf2': weight_variable([256, NUM_LABELS], name="W_fc2")
}
biases = {
'bc1': bias_variable([32], name="b_conv1"),
'bc2': bias_variable([64], name="b_conv2"),
'bf1': bias_variable([256], name="b_fc1"),
'bf2': bias_variable([NUM_LABELS], name="b_fc2")
}
def loss(pred, label):
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label))
tf.summary.scalar('Entropy', cross_entropy_loss)
reg_losses = tf.add_n(tf.get_collection("losses"))
tf.summary.scalar('Reg_loss', reg_losses)
return cross_entropy_loss + REGULARIZATION * reg_losses
def train(loss, step):
return tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss, global_step=step)
def get_next_batch(images, labels, step):
offset = (step * BATCH_SIZE) % (images.shape[0] - BATCH_SIZE)
batch_images = images[offset: offset + BATCH_SIZE]
batch_labels = labels[offset:offset + BATCH_SIZE]
return batch_images, batch_labels
def main(argv=None):
train_images, train_labels, valid_images, valid_labels, test_images = EmotionDetectorUtils.read_data(FLAGS.data_dir)
print("Train size: %s" % train_images.shape[0])
print('Validation size: %s' % valid_images.shape[0])
print("Test size: %s" % test_images.shape[0])
global_step = tf.Variable(0, trainable=False)
dropout_prob = tf.placeholder(tf.float32)
input_dataset = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 1],name="input")
input_labels = tf.placeholder(tf.float32, [None, NUM_LABELS])
pred = emotion_cnn(input_dataset)
output_pred = tf.nn.softmax(pred,name="output")
loss_val = loss(pred, input_labels)
train_op = train(loss_val, global_step)
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph_def)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model Restored!")
for step in range(MAX_ITERATIONS):
batch_image, batch_label = get_next_batch(train_images, train_labels, step)
feed_dict = {input_dataset: batch_image, input_labels: batch_label}
sess.run(train_op, feed_dict=feed_dict)
if step % 10 == 0:
train_loss, summary_str = sess.run([loss_val, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step)
print("Training Loss: %f" % train_loss)
if step % 100 == 0:
valid_loss = sess.run(loss_val, feed_dict={input_dataset: valid_images, input_labels: valid_labels})
print("%s Validation Loss: %f" % (datetime.now(), valid_loss))
saver.save(sess, FLAGS.logs_dir + 'model.ckpt', global_step=step)
if __name__ == "__main__":
tf.app.run()
"""
>>>
Train size: 3761
Validation size: 417
Test size: 1312
WARNING:tensorflow:Passing a `GraphDef` to the SummaryWriter is deprecated. Pass a `Graph` object instead, such as `sess.graph`.
Training Loss: 1.962236
2016-11-05 22:39:36.645682 Validation Loss: 1.962719
Training Loss: 1.907290
Training Loss: 1.849100
Training Loss: 1.871116
Training Loss: 1.798998
Training Loss: 1.885601
Training Loss: 1.849380
Training Loss: 1.843139
Training Loss: 1.933691
Training Loss: 1.829839
Training Loss: 1.839772
2016-11-05 22:42:58.951699 Validation Loss: 1.822431
Training Loss: 1.772197
Training Loss: 1.666473
Training Loss: 1.620869
Training Loss: 1.592660
Training Loss: 1.422701
Training Loss: 1.436721
Training Loss: 1.348217
Training Loss: 1.432023
Training Loss: 1.347753
Training Loss: 1.299889
2016-11-05 22:46:55.144483 Validation Loss: 1.335237
Training Loss: 1.108747
Training Loss: 1.197601
Training Loss: 1.245860
Training Loss: 1.164120
Training Loss: 0.994351
Training Loss: 1.072356
Training Loss: 1.193485
Training Loss: 1.118093
Training Loss: 1.021220
Training Loss: 1.069752
2016-11-05 22:50:17.677074 Validation Loss: 1.111559
Training Loss: 1.099430
Training Loss: 0.966327
Training Loss: 0.960916
Training Loss: 0.844742
Training Loss: 0.979741
Training Loss: 0.891897
Training Loss: 1.013132
Training Loss: 0.936738
Training Loss: 0.911577
Training Loss: 0.862605
2016-11-05 22:53:30.999141 Validation Loss: 0.999061
Training Loss: 0.800337
Training Loss: 0.776097
Training Loss: 0.799260
Training Loss: 0.919926
Training Loss: 0.758807
Training Loss: 0.807968
Training Loss: 0.856378
Training Loss: 0.867762
Training Loss: 0.656170
Training Loss: 0.688761
2016-11-05 22:56:53.256991 Validation Loss: 0.931223
Training Loss: 0.696454
Training Loss: 0.725157
Training Loss: 0.674037
Training Loss: 0.719200
Training Loss: 0.749460
Training Loss: 0.741768
Training Loss: 0.702719
Training Loss: 0.734194
Training Loss: 0.669155
Training Loss: 0.641528
2016-11-05 23:00:06.530139 Validation Loss: 0.911489
Training Loss: 0.764550
Training Loss: 0.646964
Training Loss: 0.724712
Training Loss: 0.726692
Training Loss: 0.656019
Training Loss: 0.690552
Training Loss: 0.537638
Training Loss: 0.680097
Training Loss: 0.554115
Training Loss: 0.590837
2016-11-05 23:03:15.351156 Validation Loss: 0.818303
Training Loss: 0.656608
Training Loss: 0.567394
Training Loss: 0.545324
Training Loss: 0.611726
Training Loss: 0.600910
Training Loss: 0.526467
Training Loss: 0.584986
Training Loss: 0.567015
Training Loss: 0.555465
Training Loss: 0.630097
2016-11-05 23:06:26.575298 Validation Loss: 0.824178
Training Loss: 0.662920
Training Loss: 0.512493
Training Loss: 0.475912
Training Loss: 0.455112
Training Loss: 0.567875
Training Loss: 0.582927
Training Loss: 0.509225
Training Loss: 0.602916
Training Loss: 0.521976
Training Loss: 0.445122
2016-11-05 23:09:40.136353 Validation Loss: 0.803449
Training Loss: 0.435535
Training Loss: 0.459343
Training Loss: 0.481706
Training Loss: 0.460640
Training Loss: 0.554570
Training Loss: 0.427962
Training Loss: 0.512764
Training Loss: 0.531128
Training Loss: 0.364465
Training Loss: 0.432366
2016-11-05 23:12:50.769527 Validation Loss: 0.851074
>>>
"""
|
11491102
|
import grpc
import pytest
from google.protobuf import empty_pb2
from couchers import errors
from couchers.models import UserBlock
from couchers.sql import couchers_select as select
from proto import blocking_pb2
from tests.test_fixtures import blocking_session, db, generate_user, make_user_block, session_scope, testconfig # noqa
@pytest.fixture(autouse=True)
def _(testconfig):
pass
def test_BlockUser(db):
user1, token1 = generate_user()
user2, token2 = generate_user()
with session_scope() as session:
blocked_user_list = (
session.execute(select(UserBlock).where(UserBlock.blocking_user_id == user1.id)).scalars().all()
)
assert len(blocked_user_list) == 0
with blocking_session(token1) as user_blocks:
with pytest.raises(grpc.RpcError) as e:
user_blocks.BlockUser(blocking_pb2.BlockUserReq(username=user1.username))
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert e.value.details() == errors.CANT_BLOCK_SELF
user_blocks.BlockUser(blocking_pb2.BlockUserReq(username=user2.username))
with pytest.raises(grpc.RpcError) as e:
user_blocks.BlockUser(blocking_pb2.BlockUserReq(username=user2.username))
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert e.value.details() == errors.USER_ALREADY_BLOCKED
with session_scope() as session:
blocked_user_list = (
session.execute(select(UserBlock).where(UserBlock.blocking_user_id == user1.id)).scalars().all()
)
assert len(blocked_user_list) == 1
def test_make_user_block(db):
user1, token1 = generate_user()
user2, token2 = generate_user()
make_user_block(user1, user2)
with session_scope() as session:
blocked_user_list = (
session.execute(select(UserBlock).where(UserBlock.blocking_user_id == user1.id)).scalars().all()
)
assert len(blocked_user_list) == 1
def test_UnblockUser(db):
user1, token1 = generate_user()
user2, token2 = generate_user()
make_user_block(user1, user2)
with blocking_session(token1) as user_blocks:
user_blocks.UnblockUser(blocking_pb2.UnblockUserReq(username=user2.username))
with session_scope() as session:
blocked_users = session.execute(select(UserBlock).where(UserBlock.blocking_user_id == user1.id)).scalars().all()
assert len(blocked_users) == 0
with blocking_session(token1) as user_blocks:
with pytest.raises(grpc.RpcError) as e:
user_blocks.UnblockUser(blocking_pb2.UnblockUserReq(username=user2.username))
assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
assert e.value.details() == errors.USER_NOT_BLOCKED
# Test re-blocking
user_blocks.BlockUser(blocking_pb2.BlockUserReq(username=user2.username))
with session_scope() as session:
blocked_users = session.execute(select(UserBlock).where(UserBlock.blocking_user_id == user1.id)).scalars().all()
assert len(blocked_users) == 1
def test_GetBlockedUsers(db):
user1, token1 = generate_user()
user2, token2 = generate_user()
user3, token3 = generate_user()
with blocking_session(token1) as user_blocks:
# Check no blocked users to start
blocked_user_list = user_blocks.GetBlockedUsers(empty_pb2.Empty())
assert len(blocked_user_list.blocked_usernames) == 0
make_user_block(user1, user2)
make_user_block(user1, user3)
blocked_user_list = user_blocks.GetBlockedUsers(empty_pb2.Empty())
assert len(blocked_user_list.blocked_usernames) == 2
def test_relationships_userblock_dot_user(db):
user1, token1 = generate_user()
user2, token2 = generate_user()
make_user_block(user1, user2)
with session_scope() as session:
block = session.execute(
select(UserBlock).where((UserBlock.blocking_user_id == user1.id) & (UserBlock.blocked_user_id == user2.id))
).scalar_one_or_none()
assert block.blocking_user.username == user1.username
assert block.blocked_user.username == user2.username
|
11491105
|
from pytest import raises
from Authentication import Client
from CommonServerPython import DemistoException
import demistomock as demisto
BASE_URL = 'https://example.com/v1/'
GET_CREDENTIALS = {
'credential': [
{'username': 'User1', 'password': '<PASSWORD>', 'name': 'DBot Demisto'},
{'username': 'User2', 'password': '<PASSWORD>', 'name': 'Demisto DBot'}
]
}
GET_USER_LIST = {
'account': [
{'username': 'User1', 'name': 'DBot Demisto', 'isLocked': False},
{'username': 'User2', 'name': 'Demisto DBot', 'isLocked': True}
]
}
client = Client('https://example.com/v1/')
class TestBuildContext:
def test_build_fetch_creds(self):
from Authentication import build_credentials_fetch
results = build_credentials_fetch([{'username': 'user1', 'name': 'name1', 'password': 'password'}])
assert results == [{'name': 'name1', 'password': 'password', 'user': 'user1'}]
def test_account_response_to_context(self):
from Authentication import account_response_to_context
results = account_response_to_context(GET_USER_LIST['account'])
assert results == [
{'IsLocked': False, 'Name': 'DBot Demisto', 'Username': 'User1'},
{'IsLocked': True, 'Name': 'Demisto DBot', 'Username': 'User2'}]
class TestCredentialsOperations:
def test_fetch_credentials_positive(self, mocker, requests_mock):
from Authentication import fetch_credentials
mocker.patch.object(demisto, 'credentials')
# list
requests_mock.get(
f'{BASE_URL}credential',
json=GET_CREDENTIALS)
results = fetch_credentials(client)
assert results == [{'user': 'User1', 'name': 'DBot Demisto', 'password': '<PASSWORD>'},
{'user': 'User2', 'name': 'Demisto DBot', 'password': 'mj54bk32gb'}]
def test_fetch_credentials_negative(self, mocker, requests_mock):
from Authentication import fetch_credentials
mocker.patch.object(demisto, 'credentials')
# list
requests_mock.get(
f'{BASE_URL}credential',
json={})
with raises(DemistoException, match='`fetch-incidents` failed in'):
fetch_credentials(client)
def test_list_accounts_full(self, mocker):
from Authentication import list_accounts_command
mocker.patch.object(client, 'list_accounts', return_value=GET_USER_LIST)
_, _, raw_response = list_accounts_command(client, {})
assert raw_response == {'account': [{'username': 'User1', 'name': 'DBot Demisto', 'isLocked': False},
{'username': 'User2', 'name': 'Demisto DBot', 'isLocked': True}]}
def test_list_accounts_negative(self, requests_mock):
from Authentication import list_accounts_command
requests_mock.get(BASE_URL + 'account', json={'account': []})
human_readable, _, _ = list_accounts_command(client, {})
assert 'Could not find any users' in human_readable
class TestTestModule:
def test_test_module_positive(self, requests_mock):
from Authentication import test_module_command
requests_mock.get(BASE_URL + 'version', json={'version': '1'})
human_readable = test_module_command(client, None)
assert human_readable == 'ok'
def test_test_module_false(self, requests_mock):
from Authentication import test_module_command
requests_mock.get(BASE_URL + 'version', json={})
with raises(DemistoException, match='Test module failed'):
test_module_command(client, None)
class TestAccountOperations:
def test_lock_account_positive(self, requests_mock):
from Authentication import lock_account_command
requests_mock.post(BASE_URL + 'account/lock?account=111', json={
'account': [{'username': '111', 'isLocked': True}]})
human_readable, _, _ = lock_account_command(client, {'username': '111'})
assert 'Authentication Integration - Account `111`' in human_readable
def test_lock_account_negative(self, requests_mock):
from Authentication import lock_account_command
requests_mock.post(BASE_URL + 'account/lock?account=111', json={})
with raises(DemistoException, match='Could not lock account'):
lock_account_command(client, {'username': '111'})
def test_unlock_account_positive(self, requests_mock):
from Authentication import unlock_account_command
requests_mock.post(BASE_URL + 'account/unlock?account=111', json={
'account': [{'username': '111', 'isLocked': False}]})
human_readable, _, _ = unlock_account_command(client, {'username': '111'})
assert 'Authentication Integration - Account `111`' in human_readable
def test_unlock_account_negative(self, requests_mock):
from Authentication import unlock_account_command
requests_mock.post(BASE_URL + 'account/unlock?account=111', json={})
with raises(DemistoException, match='Could not unlock account'):
unlock_account_command(client, {'username': '111'})
def test_reset_account_positive(self, requests_mock):
from Authentication import reset_account_command
requests_mock.post(BASE_URL + 'account/reset?account=111', json={
'account': [{'username': '111', 'isLocked': False}]})
human_readable, _, _ = reset_account_command(client, {'username': '111'})
assert 'Authentication Integration - Account `111`' in human_readable
def test_reset_account_negative(self, requests_mock):
from Authentication import reset_account_command
requests_mock.post(BASE_URL + 'account/reset?account=111', json={})
with raises(DemistoException, match='Could not reset account'):
reset_account_command(client, {'username': '111'})
class TestVaultOperations:
def test_lock_vault_positive(self, requests_mock):
from Authentication import lock_vault_command
requests_mock.post(BASE_URL + 'vault/lock?vaultId=111', json={
'vault': [{'vaultId': '111', 'isLocked': True}]})
results = lock_vault_command(client, {'vault_id': '111'})
assert 'Vault 111 has been locked' in results[0]
def test_lock_vault_negative(self, requests_mock):
from Authentication import lock_vault_command
requests_mock.post(BASE_URL + 'vault/lock?vaultId=111', json={
'vault': [{'vaultId': '111', 'isLocked': False}]})
with raises(DemistoException, match='Could not lock vault'):
lock_vault_command(client, {'vault_id': '111'})
def test_unlock_vault_positive(self, requests_mock):
from Authentication import unlock_vault_command
requests_mock.post(BASE_URL + 'vault/unlock?vaultId=111', json={
'vault': [{'vaultId': '111', 'isLocked': False}]})
results = unlock_vault_command(client, {'vault_id': '111'})
assert 'Vault 111 has been unlocked' in results[0]
def test_unlock_vault_negative(self, requests_mock):
from Authentication import unlock_vault_command
requests_mock.post(BASE_URL + 'vault/unlock?vaultId=111', json={
'vault': [{'vaultId': '111', 'isLocked': True}]})
with raises(DemistoException, match='Could not unlock vault'):
unlock_vault_command(client, {'vault_id': '111'})
def test_list_vaults_positive(self, requests_mock):
from Authentication import list_vaults_command
requests_mock.get(BASE_URL + 'vault', json={
'vault': [
{'vaultId': '111', 'isLocked': True},
{'vaultId': '121', 'isLocked': False},
{'vaultId': '164', 'isLocked': False}
]})
human_readable, _, _ = list_vaults_command(client, {})
assert 'Total of 3 has been found' in human_readable
def test_list_vaults_negative(self, requests_mock):
from Authentication import list_vaults_command
requests_mock.get(BASE_URL + 'vault', json={'vault': []})
human_readable, _, _ = list_vaults_command(client, {})
assert 'No vaults found' in human_readable
|
11491202
|
import os
import re
import flake8
import pytest
from flake8_nb import __version__
from flake8_nb.flake8_integration.cli import Flake8NbApplication
from flake8_nb.flake8_integration.cli import get_notebooks_from_args
from flake8_nb.flake8_integration.cli import hack_option_manager_generate_versions
from flake8_nb.parsers.notebook_parsers import InvalidNotebookWarning
from flake8_nb.parsers.notebook_parsers import NotebookParser
from tests.flake8_integration.conftest import TempIpynbArgs
def test_get_notebooks_from_args(temp_ipynb_args: TempIpynbArgs):
orig_args, (expected_args, expected_nb_list) = temp_ipynb_args.get_args_and_result()
args, nb_list = get_notebooks_from_args(
orig_args, exclude=["*.tox/*", ".ipynb_checkpoints", "*/docs/*"]
)
assert sorted(args) == sorted(expected_args)
assert sorted(nb_list) == sorted(expected_nb_list)
def test_hack_option_manager_generate_versions():
pattern = re.compile(rf"flake8: {flake8.__version__}, original_input")
def test_func(*args, **kwargs):
return "original_input"
hacked_output = hack_option_manager_generate_versions(test_func)()
assert re.match(pattern, hacked_output) is not None
def test_Flake8NbApplication__generate_versions():
generate_versions_pattern = re.compile(
rf"flake8: {flake8.__version__}(, [\w\-_]+: \d+\.\d+\.\d+)+"
)
generate_epilog_pattern = re.compile(
rf"Installed plugins: flake8: {flake8.__version__}(, [\w\-_]+: \d+\.\d+\.\d+)+"
)
orig_args = [os.path.join("tests", "data", "notebooks")]
app = Flake8NbApplication()
app.initialize(orig_args)
app.option_manager.generate_epilog()
hacked_generate_versions = app.option_manager.generate_versions()
hacked_generate_epilog: str = app.option_manager.parser.epilog # type: ignore
assert re.match(generate_versions_pattern, hacked_generate_versions) is not None
assert re.match(generate_epilog_pattern, hacked_generate_epilog) is not None
def test_Flake8NbApplication__hack_flake8_program_and_version():
app = Flake8NbApplication()
program = "flake8_nb"
assert app.program == program
assert app.version == __version__
assert app.option_manager.parser.prog == program
assert app.option_manager.parser.version == __version__ # type: ignore
assert app.option_manager.program_name == program
assert app.option_manager.version == __version__
def test_Flake8NbApplication__option_defaults():
app = Flake8NbApplication()
option_dict = app.option_manager.config_options_dict
assert option_dict["format"].default == "default_notebook"
assert option_dict["filename"].default == "*.py,*.ipynb_parsed"
assert option_dict["exclude"].default.endswith(",.ipynb_checkpoints") # type: ignore
assert option_dict["keep_parsed_notebooks"].default is False
@pytest.mark.filterwarnings(InvalidNotebookWarning)
def test_Flake8NbApplication__hack_args(temp_ipynb_args: TempIpynbArgs):
orig_args, (expected_args, _) = temp_ipynb_args.get_args_and_result()
result = Flake8NbApplication.hack_args(
orig_args, exclude=["*.tox/*", "*.ipynb_checkpoints*", "*/docs/*"]
)
expected_parsed_nb_list = NotebookParser.intermediate_py_file_paths
assert result == expected_args + expected_parsed_nb_list
@pytest.mark.filterwarnings(InvalidNotebookWarning)
def test_Flake8NbApplication__parse_configuration_and_cli():
orig_args = [os.path.join("tests", "data", "notebooks")]
app = Flake8NbApplication()
# parse_configuration_and_cli is called by initialize
app.initialize(orig_args)
expected_parsed_nb_list = NotebookParser.intermediate_py_file_paths
assert app.args == orig_args + expected_parsed_nb_list
@pytest.mark.parametrize("keep_parsed_notebooks", [False, True])
def test_Flake8NbApplication__exit(keep_parsed_notebooks: bool):
with pytest.warns(InvalidNotebookWarning):
orig_args = [os.path.join("tests", "data", "notebooks")]
app = Flake8NbApplication()
app.set_flake8_option("--keep-parsed-notebooks", default=keep_parsed_notebooks)
app.initialize(orig_args)
temp_path = NotebookParser.temp_path
try:
app.exit()
except SystemExit:
pass
assert os.path.exists(temp_path) == keep_parsed_notebooks
NotebookParser.clean_up()
|
11491235
|
import dataclasses
from pathlib import Path
import pytest
from common.node.cluster import Cluster
from common.node.node import Node
from common.test_config import TestConfig
THIS_DIR = Path(__file__).parent
def pytest_addoption(parser):
for field in dataclasses.fields(TestConfig):
required = field.default == dataclasses.MISSING
parser.addoption(f"--{field.name}",
action="store",
default=None if required else field.default,
required=required)
@pytest.fixture(scope="session")
def test_cfg(pytestconfig):
params = {}
for field in dataclasses.fields(TestConfig):
val = pytestconfig.getoption(field.name)
if val is not None:
params[field.name] = val
return TestConfig(**params)
@pytest.fixture(scope="session")
def default_node_configs():
basedir = THIS_DIR.parent.parent.joinpath("util_test").joinpath("conf")
return [(basedir.joinpath(f"conf_taraxa{i + 1}.json"), basedir.joinpath(f"wallet{i + 1}.json")) for i in range(5)]
@pytest.fixture()
def default_cluster(test_cfg, default_node_configs) -> Cluster:
cluster = Cluster.from_new_nodes(default_node_configs,
Node.ManagedProcessInitMode(executable_path=test_cfg.node_executable_path))
yield cluster
cluster.destructor()
@pytest.fixture()
def default_cluster_unmanaged(default_node_configs) -> Cluster:
cluster = Cluster.from_new_nodes(default_node_configs, Node.RemoteInitMode())
yield cluster
cluster.destructor()
|
11491252
|
import requests
import os
import json
from lxml import html
import argparse
import datetime
import time
import random
from selenium import webdriver
SUMMARY_RULE = [
'//main[@class="content"]/section/div[@class="qa-arrangement"]/div[@class="qa-arrangement-body"]/div[@class="qa-title"]',
'//main/div[@class="qa-arrangement-body"]/p'
]
CONTENT_RULE = [
'//main[@class="content"]/section/section[@class="problem-detail-wrap"]/section[@class="problem-detail-inner"]/div[@class="block-line"]/div[@class="block-right"]',
'//main/dev[@class="problem-detail-wrap"]/div[@class="block-line"]'
]
FULL_URL_FILE = 'data/urls/url_list.txt'
CRAWL_DATA_DIR = 'data/crawl_data'
HTML_DATA_DIR = os.path.join(CRAWL_DATA_DIR, 'html')
JSON_DATA_DIR = os.path.join(CRAWL_DATA_DIR, 'data')
WARNINGS_DIR = os.path.join(CRAWL_DATA_DIR, 'warnings')
if not os.path.exists(JSON_DATA_DIR):
os.makedirs(JSON_DATA_DIR)
if not os.path.exists(HTML_DATA_DIR):
os.makedirs(HTML_DATA_DIR)
if not os.path.exists(WARNINGS_DIR):
os.makedirs(WARNINGS_DIR)
def get_full_dialog_list(full_list_path):
dialog_list = []
with open(full_list_path, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
splits = line.split()
dialog = {'id': splits[0], 'url': splits[1]}
dialog_list.append(dialog)
return dialog_list
def get_existing_index(data_dir):
all_files = os.listdir(data_dir)
existing_ids = set()
for file in sorted(all_files, reverse=True):
if not file.endswith('.json'):
continue
with open(os.path.join(data_dir, file), 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
dialog = json.loads(line)
existing_ids.add(dialog['id'])
return existing_ids
def strip_str(string):
if string is None:
return ''
return ','.join(string.strip().split())
def name_filter(string):
if not string == '患者':
return '医生'
else:
return '患者'
def crawl_dialog(dialog, sleep_time, driver):
dialog_id = dialog['id']
dialog_url = dialog['url']
exception_report = []
dialog_dict = {'id': dialog_id, 'url': dialog_url, 'content': [], 'summary': {'description': '', 'suggestion': ''}}
html_dict = {'id': dialog_id, 'url': dialog_url, 'html': ''}
time.sleep(1)
response = requests.get(dialog_url)
step_threshold = 5
try_times = 0
while (response.status_code == 503 or response.status_code == 429) and try_times < 20:
time.sleep(max(sleep_time + 4 * (random.random() - 0.5), 2))
response = requests.get(dialog_url)
try_times += 1
sleep_time += int(try_times/step_threshold)
if response.status_code == 200:
response_text = response.text
html_dict['html'] = response_text
html_format = html.fromstring(response_text)
# get dialog
speaker_info = []
for context_rule in CONTENT_RULE:
speaker_rule = context_rule + '/h6'
utterance_rule = context_rule + '/p'
speaker_info = html_format.xpath(speaker_rule)
utterances_info = html_format.xpath(utterance_rule)
if len(speaker_info) > 0:
speaker_list = [name_filter(sp.text.strip()) for sp in speaker_info]
utterances_list = [strip_str(ut.text) for ut in utterances_info]
for sp, ut in zip(speaker_list, utterances_list):
if not ut == '':
dialog_dict['content'].append(
{'speaker': sp,
'utterance': ut}
)
break
if len(speaker_info) == 0:
warning_info = '%s %s dialog not found!' % (dialog_id, dialog_url)
exception_report.append(warning_info)
print(warning_info)
# get the summary
for sum_rule in SUMMARY_RULE:
summary_info = html_format.xpath(sum_rule)
if len(summary_info) > 0 and summary_info[0].text is not None:
description = summary_info[0].text
if description.startswith('问题描述:'):
description = description[5:]
dialog_dict['summary']['description'] = strip_str(description)
suggestion = summary_info[1].text
if suggestion.startswith('分析及建议:'):
suggestion = suggestion[6:]
dialog_dict['summary']['suggestion'] = strip_str(suggestion)
break
if dialog_dict['summary']['suggestion'] == '':
driver.implicitly_wait(sleep_time)
driver.get(dialog_url)
try:
enter = driver.find_element_by_class_name('qa-arrangement-btn')
driver.execute_script("arguments[0].scrollIntoView();", enter)
if driver.find_element_by_class_name('qa-arrangement-body').is_displayed():
pass
else:
enter.click()
# text_result = driver.find_element_by_class_name('qa-arrangement-body').text
text_result = driver.find_elements_by_class_name('qa-des')
if len(text_result) == 2:
dialog_dict['summary']['description'] = text_result[0].text
dialog_dict['summary']['suggestion'] = text_result[1].text
except Exception:
pass
if dialog_dict['summary']['suggestion'] == '':
if len(dialog_dict['content']) > 0:
description = dialog_dict['content'][0]['utterance']
dialog_dict['summary']['description'] = description
new_dialog_content = []
for utt in dialog_dict['content']:
speaker = utt['speaker']
if speaker == '医生' and utt['utterance'].startswith('针对本次问诊,医生更新了总结建议'):
dialog_dict['summary']['suggestion'] += utt['utterance']
else:
new_dialog_content.append(
{'speaker': speaker,
'utterance': utt['utterance']}
)
dialog_dict['content'] = new_dialog_content
if dialog_dict['summary']['suggestion'] == '':
warning_info = '%s %s summary not found!' % (dialog_id, dialog_url)
exception_report.append(warning_info)
print(warning_info)
else:
warning_info = '%s %d %s URL not found!' % (dialog_id, response.status_code, dialog_url)
exception_report.append(warning_info)
print(warning_info)
return dialog_dict, html_dict, exception_report
def save_json(data, file_path):
with open(file_path, 'w', encoding='utf8') as f:
for item in data:
json.dump(item, f, ensure_ascii=False)
f.write('\n')
def save_warning_file(warning_file, warnings):
with open(warning_file, 'a', encoding='utf8') as f:
for line in warnings:
f.write(line + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--chunk_size", default=500, type=int)
parser.add_argument("--sleep_time", default=15, type=int)
parser.add_argument("--test_url", default=None, type=str)
parser.add_argument("--chrome_driver", default='./chromedriver', type=str)
args = parser.parse_args()
full_dialogs = get_full_dialog_list(FULL_URL_FILE)
existing_ids = get_existing_index(JSON_DATA_DIR)
existing_dialog_num = len(existing_ids)
all_dialogs = []
for dialog in full_dialogs:
if not dialog['id'] in existing_ids:
all_dialogs.append(dialog)
print('%d dialogs exists in %s' % (existing_dialog_num, JSON_DATA_DIR))
print('%d dialogs to be crawl' % (len(all_dialogs)))
chunk_dialogs = []
chunk_htmls = []
now_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
warning_file = 'warning-%s' % now_time
warning_file = os.path.join(WARNINGS_DIR, warning_file)
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--window-size=1920,1080')
driver = webdriver.Chrome(executable_path=args.chrome_driver, options=chrome_options)
# test code
if args.test_url is not None:
test_dialog = {'id': 00000, 'url': args.test_url}
dialog_dict, html_dict, exception_report = crawl_dialog(test_dialog, sleep_time=args.sleep_time, driver=driver)
exit(0)
for i in range(len(all_dialogs)):
current_index = existing_dialog_num + i + 1
print('Processing %05d / %d' % (current_index, len(full_dialogs)))
dialog = all_dialogs[i]
dialog_dict, html_dict, exception_report = crawl_dialog(dialog, sleep_time=args.sleep_time, driver=driver)
if len(exception_report) > 0:
save_warning_file(warning_file, exception_report)
chunk_dialogs.append(dialog_dict)
chunk_htmls.append(html_dict)
if len(chunk_dialogs) == args.chunk_size:
start_index = current_index - args.chunk_size + 1
print('Saving ids from %05d to %05d' % (start_index, current_index))
data_path = os.path.join(JSON_DATA_DIR, 'data.%05d_%05d.json' % (start_index, current_index))
html_path = os.path.join(HTML_DATA_DIR, 'html.%05d_%05d.json' % (start_index, current_index))
save_json(chunk_dialogs, data_path)
save_json(chunk_htmls, html_path)
chunk_dialogs = []
chunk_htmls = []
if __name__ == "__main__":
main()
|
11491259
|
import numpy as np
import scipy.stats
import pytest
import matplotlib.pyplot as plt
from gp import PeriodicKernel, GaussianKernel
from .. import BQ
from .. import bq_c
DTYPE = np.dtype('float64')
options = {
'n_candidate': 10,
'x_mean': 0.0,
'x_var': 10.0,
'candidate_thresh': 0.5,
'kernel': GaussianKernel,
'optim_method': 'L-BFGS-B',
}
def npseed():
np.random.seed(8728)
def make_x(n=9):
x = np.linspace(-5, 5, n)
return x
def f_x(x):
y = scipy.stats.norm.pdf(x, 0, 1)
return y
def make_xy(n=9):
x = make_x(n=n)
y = f_x(x)
return x, y
def init_bq(bq):
bq.init(params_tl=(15, 2, 0), params_l=(0.2, 1.3, 0))
def make_bq(n=9, x=None, nc=None, init=True):
if x is None:
x, y = make_xy(n=n)
else:
y = f_x(x)
opt = options.copy()
if nc is not None:
opt['n_candidate'] = nc
bq = BQ(x, y, **opt)
if init:
init_bq(bq)
return bq
def make_xo():
return np.linspace(-10, 10, 500)
def vmpdf(x, mu, kappa):
C = -np.log(2 * np.pi * scipy.special.iv(0, kappa))
p = np.exp(C + (kappa * np.cos(x - mu)))
return p
def f_xp(x):
return vmpdf(x, mu=0.1, kappa=1.1)
def make_periodic_bq(x=None, nc=None):
opt = options.copy()
opt['kernel'] = PeriodicKernel
if nc is not None:
opt['n_candidate'] = nc
if x is None:
x = np.linspace(-np.pi, np.pi, 9)[:-1]
y = f_xp(x)
bq = BQ(x, y, **opt)
bq.init(
params_tl=(5, 2 * np.pi, 1, 0),
params_l=(0.2, np.pi / 2., 1, 0))
return bq
|
11491288
|
import mindspore.numpy as mnp
import mindspore.ops as ops
from mindspore.communication import init, get_rank
init()
data = mnp.ones((1, 2)) * (get_rank() + 1)
print(f'allreduce之前:{data} at rank {get_rank()}')
allreduce_sum = ops.AllReduce(ops.ReduceOp.SUM)
data = allreduce_sum(data)
print(f'allreduce之后:{data} at rank {get_rank()}')
|
11491306
|
import asyncio
import random
import typing
from typing import Optional, Union
import disnake
from disnake.ext import commands
from monty.bot import Monty
from monty.constants import ERROR_REPLIES, Colours, Icons
from monty.log import get_logger
from monty.utils.converters import WrappedMessageConverter
from monty.utils.messages import DeleteButton
log = get_logger(__name__)
# Number of seconds to wait for other users to bookmark the same message
TIMEOUT = 120
BOOKMARK_EMOJI = "📌"
CUSTOM_ID = "bookmark_add_bookmark_v1:"
DELETE_CUSTOM_ID = "bookmark_delete_bookmark"
class DeleteBookmarkView(disnake.ui.View):
"""View for deleting bookmarks. Sent as a response to the delete button."""
def __init__(self, message: disnake.Message, timeout: float = 180):
self.message = message
super().__init__(timeout=timeout)
@disnake.ui.button(
label="Confirm Deletion", custom_id="bookmark_delete_bookmark_confirm", style=disnake.ButtonStyle.danger
)
async def confirm(self, button: disnake.ui.Button, inter: disnake.MessageInteraction) -> None:
"""Delete the bookmark on confirmation."""
try:
await self.message.delete()
except disnake.errors.NotFound:
content = "You already deleted this message, nice try!"
else:
content = "Successfully deleted."
await inter.response.edit_message(content=content, view=None)
@disnake.ui.button(label="Cancel", custom_id="bookmark_delete_bookmark_cancel", style=disnake.ButtonStyle.green)
async def cancel(self, button: disnake.ui.Button, inter: disnake.MessageInteraction) -> None:
"""Cancel the deletion and provide a response."""
await inter.response.edit_message(content="Cancelled", view=None)
def disable(self) -> None:
"""Disable all attributes in this view."""
for c in self.children:
if hasattr(c, "disabled") and c.is_dispatchable():
c.disabled = True
def check_user_read_perms(user: disnake.User, target_message: disnake.Message) -> bool:
"""Prevent users from bookmarking a message in a channel they don't have access to."""
permissions = target_message.channel.permissions_for(user)
return permissions.read_messages and permissions.read_message_history
class Bookmark(
commands.Cog,
slash_command_attrs={"dm_permission": False},
message_command_attrs={"dm_permission": False},
):
"""Creates personal bookmarks by relaying a message link to the user's DMs."""
def __init__(self, bot: Monty):
self.bot = bot
@staticmethod
def build_bookmark_dm(target_message: disnake.Message, title: str) -> disnake.Embed:
"""Build the embed to DM the bookmark requester."""
embed = disnake.Embed(title=title, description=target_message.content, colour=Colours.soft_green)
embed.add_field(name="Wanna give it a visit?", value=f"[Visit original message]({target_message.jump_url})")
embed.set_author(name=target_message.author, icon_url=target_message.author.display_avatar.url)
embed.set_thumbnail(url=Icons.bookmark)
return embed
@staticmethod
def build_error_embed(user: disnake.Member) -> disnake.Embed:
"""Builds an error embed for when a bookmark requester has DMs disabled."""
return disnake.Embed(
title=random.choice(ERROR_REPLIES),
description=f"{user.mention}, please enable your DMs to receive the bookmark.",
colour=Colours.soft_red,
)
@staticmethod
def check_perms(user: disnake.User, message: disnake.Message) -> bool:
"""Prevent users from bookmarking a message in a channel they don't have access to."""
permissions = message.channel.permissions_for(user)
if not permissions.read_message_history:
log.info(f"{user} tried to bookmark a message in #{message.channel} but has no permissions.")
return False
return True
async def action_bookmark(
self, channel: disnake.TextChannel, user: disnake.Member, target_message: disnake.Message, title: str
) -> Union[disnake.Embed, disnake.Message]:
"""Sends the bookmark DM, or sends an error embed when a user bookmarks a message."""
if not self.check_perms(user, target_message):
return disnake.Embed(
title=random.choice(ERROR_REPLIES),
color=Colours.soft_red,
description="You don't have permission to view that channel.",
)
embed = self.build_bookmark_dm(target_message, title)
try:
components = disnake.ui.Button(
custom_id=DELETE_CUSTOM_ID, label="Delete this bookmark", style=disnake.ButtonStyle.red
)
message = await user.send(embed=embed, components=components)
except disnake.Forbidden:
error_embed = self.build_error_embed(user)
return error_embed
else:
log.info(f"{user} bookmarked {target_message.jump_url} with title '{title}'")
return message
@staticmethod
async def send_embed(
ctx: typing.Union[commands.Context, disnake.Interaction], target_message: disnake.Message
) -> disnake.Message:
"""Sends an embed, with a button, so users can click to bookmark the message too."""
embed = disnake.Embed(
description=(
f"Click the button below to be sent your very own bookmark to "
f"[this message]({target_message.jump_url})."
),
colour=Colours.soft_green,
)
components = disnake.ui.Button(
custom_id=f"{CUSTOM_ID}{target_message.channel.id}-{target_message.id}",
style=disnake.ButtonStyle.blurple,
emoji=BOOKMARK_EMOJI,
)
if isinstance(ctx, commands.Context) and ctx.channel == target_message.channel:
if ctx.channel.permissions_for(ctx.me).read_message_history:
reference = target_message.to_reference(fail_if_not_exists=False)
else:
reference = None
message = await ctx.send(
embed=embed, allowed_mentions=disnake.AllowedMentions.none(), components=components, reference=reference
)
else:
message = await ctx.send(embed=embed, components=components)
return message
@commands.command(name="bookmark", aliases=("bm", "pin"))
async def bookmark(
self,
ctx: typing.Union[commands.Context, disnake.Interaction],
target_message: Optional[WrappedMessageConverter],
*,
title: str = "Bookmark",
) -> None:
"""Send the author a link to `target_message` via DMs."""
if not target_message:
if not ctx.message.reference:
raise commands.UserInputError(
"You must either provide a valid message to bookmark, or reply to one."
"\n\nThe lookup strategy for a message is as follows (in order):"
"\n1. Lookup by '{channel ID}-{message ID}' (retrieved by shift-clicking on 'Copy ID')"
"\n2. Lookup by message ID (the message **must** be in the context channel)"
"\n3. Lookup by message URL"
)
target_message = ctx.message.reference.resolved
if not target_message.guild:
raise commands.NoPrivateMessage("You may only bookmark messages that aren't in DMs.")
result = await self.action_bookmark(ctx.channel, ctx.author, target_message, title)
if isinstance(result, disnake.Embed):
if isinstance(ctx, disnake.Interaction):
await ctx.send(embed=result, ephemeral=True)
elif ctx.channel.permissions_for(ctx.me).read_message_history:
components = DeleteButton(ctx.author, initial_message=ctx.message)
await ctx.reply(embed=result, fail_if_not_exists=False, components=components)
else:
components = DeleteButton(ctx.author, initial_message=ctx.message)
await ctx.send(embed=result, components=components)
return
await self.send_embed(
ctx,
target_message,
)
@commands.slash_command(name="bm", description="Bookmark a message.")
async def bookmark_slash(
self,
inter: disnake.ApplicationCommandInteraction,
message: str,
title: str = "Bookmark",
) -> None:
"""
Bookmark a message.
Parameters
----------
message: A message to bookmark. This can be a link or id.
title: An optional title for your direct message.
"""
inter.channel_id = inter.channel.id
try:
message = await commands.MessageConverter().convert(inter, message)
except (commands.MessageNotFound, commands.ChannelNotFound, commands.ChannelNotReadable):
await inter.send("That message is not valid, or I do not have permissions to read it.", ephemeral=True)
return
await self.bookmark(inter, message, title=title)
@commands.message_command(name="Bookmark")
async def message_bookmark(self, inter: disnake.MessageCommandInteraction) -> None:
"""Bookmark a message with a message command."""
components = disnake.ui.TextInput(
style=disnake.TextInputStyle.short,
max_length=256,
label="Title",
custom_id="title",
required=False,
)
await inter.response.send_modal(title="Bookmark", custom_id=f"bookmark-{inter.id}", components=components)
try:
modal_inter: disnake.ModalInteraction = await self.bot.wait_for(
"modal_submit",
check=lambda x: x.custom_id == f"bookmark-{inter.id}",
timeout=180,
)
except asyncio.TimeoutError:
return
await self.bookmark(modal_inter, inter.target, title=modal_inter.text_values["title"])
@commands.Cog.listener("on_button_click")
async def bookmark_button(self, inter: disnake.MessageInteraction) -> None:
"""Listen for bookmarked button events and respond to them."""
if not inter.component.custom_id.startswith(CUSTOM_ID):
return
custom_id = inter.component.custom_id.removeprefix(CUSTOM_ID)
def remove_button(message: disnake.Message) -> disnake.ui.View:
view = disnake.ui.View.from_message(message)
for child in view.children:
if (getattr(child, "custom_id", "") or "").startswith(CUSTOM_ID):
view.remove_item(child)
break
else:
log.warning("Button was not found to be removed.")
return view
channel_id, message_id = custom_id.split("-")
channel_id, message_id = int(channel_id), int(message_id)
channel = self.bot.get_channel(channel_id)
if channel is None:
await inter.response.send("I can no longer view this channel.", ephemeral=True)
return
if not channel.permissions_for(channel.guild.me).read_message_history:
# while we could remove the button there is no reason to as we aren't making an invalid api request
await inter.response.send_message("I am currently unable to view the channel this message is from.")
return
try:
message = await channel.fetch_message(message_id)
except (disnake.NotFound, disnake.Forbidden):
view = remove_button(inter.message)
await inter.response.edit_message(view=view)
await inter.send("This message either no longer exists or I cannot reference it.", ephemeral=True)
return
maybe_error = await self.action_bookmark(inter.channel, inter.author, message, title="Bookmark")
if isinstance(maybe_error, disnake.Embed):
await inter.send(embed=maybe_error, ephemeral=True)
else:
await inter.send(f"Sent you a [direct message](<{maybe_error.jump_url}>).", ephemeral=True)
@commands.Cog.listener("on_button_click")
async def maybe_delete_bookmark_button(self, inter: disnake.MessageInteraction) -> None:
"""Handle bookmark delete button interactions."""
if inter.data.custom_id != DELETE_CUSTOM_ID:
return
# these are only sent in dms so there is no reason to check the author
await inter.response.defer()
await inter.send(
"Are you sure you want to delete this bookmark?", ephemeral=True, view=DeleteBookmarkView(inter.message)
)
def setup(bot: Monty) -> None:
"""Load the Bookmark cog."""
bot.add_cog(Bookmark(bot))
|
11491312
|
import random
def split_by_percent(data, train_percentage):
train_indices = random.sample(list(range(len(data))), int(train_percentage*len(data)))
train = [row for i,row in enumerate(data) if i in train_indices]
test = [row for i,row in enumerate(data) if i not in train_indices]
return train, test
def test():
data = [[i] for i in range(100)]
train,test = split_by_percent(data, 0.75)
print("75:25 data split correct?", len(train) == 75 and len(test) == 25)
train,test = split_by_percent(data, 0.50)
print("50:50 data split correct?", len(train) == 50 and len(test) == 50)
if __name__=="__main__":
test()
|
11491339
|
import argparse
import os
import tensorflow as tf
from tensorflow_privacy.privacy.optimizers.dp_optimizer_keras_vectorized import (
VectorizedDPKerasSGDOptimizer,
)
import flwr as fl
import common
# Make TensorFlow logs less verbose
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# global for tracking privacy
PRIVACY_LOSS = 0
# Define Flower client
class MnistClient(fl.client.NumPyClient):
def __init__(self, model, x_train, y_train, x_test, y_test, args):
self.model = model
self.x_train, self.y_train = x_train, y_train
self.x_test, self.y_test = x_test, y_test
self.batch_size = args.batch_size
self.local_epochs = args.local_epochs
self.dpsgd = args.dpsgd
if args.dpsgd:
self.noise_multiplier = args.noise_multiplier
if args.batch_size % args.microbatches != 0:
raise ValueError(
"Number of microbatches should divide evenly batch_size"
)
optimizer = VectorizedDPKerasSGDOptimizer(
l2_norm_clip=args.l2_norm_clip,
noise_multiplier=args.noise_multiplier,
num_microbatches=args.microbatches,
learning_rate=args.learning_rate,
)
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE
)
else:
optimizer = tf.keras.optimizers.SGD(learning_rate=args.learning_rate)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# Compile model with Keras
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
def get_parameters(self):
"""Get parameters of the local model."""
raise Exception("Not implemented (server-side parameter initialization)")
def fit(self, parameters, config):
"""Train parameters on the locally held training set."""
# Update local model parameters
global PRIVACY_LOSS
if self.dpsgd:
privacy_spent = common.compute_epsilon(
self.local_epochs,
len(self.x_train),
self.batch_size,
self.noise_multiplier,
)
PRIVACY_LOSS += privacy_spent
self.model.set_weights(parameters)
# Train the model
self.model.fit(
self.x_train,
self.y_train,
epochs=self.local_epochs,
batch_size=self.batch_size,
)
return self.model.get_weights(), len(self.x_train), {}
def evaluate(self, parameters, config):
"""Evaluate parameters on the locally held test set."""
# Update local model with global parameters
self.model.set_weights(parameters)
# Evaluate global model parameters on the local test data and return results
loss, accuracy = self.model.evaluate(self.x_test, self.y_test)
num_examples_test = len(self.x_test)
return loss, num_examples_test, {"accuracy": accuracy}
def main(args) -> None:
# Load Keras model
model = common.create_cnn_model()
# Load a subset of MNIST to simulate the local data partition
(x_train, y_train), (x_test, y_test) = common.load(args.num_clients)[args.partition]
# drop samples to form exact batches for dpsgd
# this is necessary since dpsgd is sensitive to uneven batches
# due to microbatching
if args.dpsgd and x_train.shape[0] % args.batch_size != 0:
drop_num = x_train.shape[0] % args.batch_size
x_train = x_train[:-drop_num]
y_train = y_train[:-drop_num]
# Start Flower client
client = MnistClient(model, x_train, y_train, x_test, y_test, args)
fl.client.start_numpy_client("[::]:8080", client=client)
if args.dpsgd:
print("Privacy Loss: ", PRIVACY_LOSS)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Flower Client")
parser.add_argument(
"--num-clients",
default=2,
type=int,
help="Total number of fl participants, requied to get correct partition",
)
parser.add_argument(
"--partition",
type=int,
required=True,
help="Data Partion to train on. Must be less than number of clients",
)
parser.add_argument(
"--local-epochs",
default=1,
type=int,
help="Total number of local epochs to train",
)
parser.add_argument("--batch-size", default=32, type=int, help="Batch size")
parser.add_argument(
"--learning-rate", default=0.15, type=float, help="Learning rate for training"
)
# DPSGD specific arguments
parser.add_argument(
"--dpsgd",
default=False,
type=bool,
help="If True, train with DP-SGD. If False, " "train with vanilla SGD.",
)
parser.add_argument("--l2-norm-clip", default=1.0, type=float, help="Clipping norm")
parser.add_argument(
"--noise-multiplier",
default=1.1,
type=float,
help="Ratio of the standard deviation to the clipping norm",
)
parser.add_argument(
"--microbatches",
default=32,
type=int,
help="Number of microbatches " "(must evenly divide batch_size)",
)
args = parser.parse_args()
main(args)
|
11491361
|
from unittest import TestCase, expectedFailure
import re
import shutil
from typing import Any, TYPE_CHECKING
import integration_helpers as helpers
# This list should match test_fixed_sequence in mock_server.c
test_fixed_sequence = [0.0, 1.0, 0.5, -1.0, 280.0, -12.5, 16.3, 425.87, -100000.0, 0.001]
class FileLoadTests(TestCase):
def test_load_from_file_doesnt_crash(self):
helpers.run_main(['-l', helpers.short_log_file()])
def test_load_from_file_shows_messages(self):
result = helpers.run_main(['-l', helpers.short_log_file()])
self.assertIn('get_registry', result)
self.assertIn('create_surface', result)
def test_load_from_file_with_filter(self):
result = helpers.run_main(['-l', helpers.short_log_file(), '-f', 'wl_compositor'])
self.assertNotIn('get_registry', result)
self.assertIn('create_surface', result)
def test_load_file_with_comma_numbers(self):
result = helpers.run_main(['-l', helpers.log_file_with_comma_numbers()])
self.assertIn('2690.6303 wl_pointer@19a.motion(time=4491984, surface_x=561.15625, surface_y=501.382812)', result)
# see https://github.com/wmww/wayland-debug/issues/35
@expectedFailure
def test_load_from_file_with_server_obj(self):
helpers.run_main(['-l', helpers.server_obj_log_file()])
# see https://github.com/wmww/wayland-debug/issues/17
@expectedFailure
def test_load_from_file_with_break(self):
result = helpers.run_main(['-l', helpers.short_log_file(), '-b', '[global]'])
self.assertIn('get_registry', result)
self.assertNotIn('create_surface', result)
class MockProgramInGDBTests(TestCase):
def setUp(self):
mock_client, mock_server = helpers.build_mock_program()
self.mock_client = mock_client
self.mock_server = mock_server
def run_client_in_gdb(self, mode, wldbg_args=[]):
return helpers.run_in_gdb(
wldbg_args,
['--ex', 'r', '--args', self.mock_client, mode],
[self.mock_server])
def run_server_in_gdb(self, mode, wldbg_args=[]):
return helpers.run_in_gdb(
wldbg_args,
['--ex', 'r', '--args', self.mock_server],
[self.mock_client, mode])
def test_gdb_plugin_starts(self):
helpers.run_in_gdb([], [self.mock_client, '-ex', 'q'], None)
def test_gdb_plugin_runs(self):
'''
These tests look nice, but they don't seem to detect any errors inside GDB
Luckily we also have test_runner.py which does
'''
self.run_client_in_gdb('simple-client')
def test_detects_get_registry_from_client(self):
result = self.run_client_in_gdb('simple-client')
self.assertIn('get_registry', result)
self.assertIn('global', result)
def test_detects_get_registry_from_server(self):
result = self.run_server_in_gdb('simple-client')
self.assertIn('get_registry', result)
self.assertIn('global', result)
def test_extracts_enum_values(self):
result = self.run_server_in_gdb('simple-client')
self.assertIn('capabilities=3:pointer&keyboard', result)
def test_extracts_fixed_point_numbers_with_low_accuracy(self):
result = self.run_server_in_gdb('pointer-move')
matches = re.findall(r'surface_y=(.*)\)', result)
self.assertEqual(len(matches), len(test_fixed_sequence))
for i in range(len(matches)):
match = float(matches[i])
expected = test_fixed_sequence[i]
self.assertAlmostEqual(match, expected, places = 2)
# see https://github.com/wmww/wayland-debug/issues/24
@expectedFailure
def test_extracts_fixed_point_numbers_with_high_accuracy(self):
result = self.run_server_in_gdb('pointer-move')
matches = re.findall(r'surface_y=(.*)\)', result)
self.assertEqual(len(matches), len(test_fixed_sequence))
for i in range(len(matches)):
match = float(matches[i])
expected = test_fixed_sequence[i]
self.assertAlmostEqual(match, expected, places = 5)
def check_result_of_server_created_obj(self, result):
matches = re.findall(r'new wl_data_offer@(\d+)[a-z]+', result)
self.assertEqual(len(matches), 1)
data_offer_id = matches[0]
matches = re.findall(r'.*wl_data_offer@(\d+)[a-z].*mock-meme-type', result)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0], data_offer_id)
def test_client_with_server_created_obj(self):
result = self.run_client_in_gdb('server-created-obj')
self.check_result_of_server_created_obj(result)
def test_server_with_server_created_obj(self):
result = self.run_server_in_gdb('server-created-obj')
self.check_result_of_server_created_obj(result)
def test_client_with_dispatcher(self):
result = self.run_client_in_gdb('dispatcher')
self.assertIn('attach', result)
self.assertIn('enter', result)
def test_server_with_dispatcher(self):
result = self.run_server_in_gdb('dispatcher')
self.assertIn('attach', result)
self.assertIn('enter', result)
def test_handles_array_of_ints_argument(self):
result = self.run_server_in_gdb('keyboard-enter')
# The keys the server provides to wl_keybaord.enter()
self.assertIn('keys=[69, 420]', result)
|
11491374
|
import torch
import numpy as np
import scipy.sparse as sp
import torch_quiver as qv
import time
from ogb.nodeproppred import Evaluator, PygNodePropPredDataset
from scipy.sparse import csr_matrix
import os
import os.path as osp
from quiver.sage_sampler import GraphSageSampler
def get_csr_from_coo(edge_index):
src = edge_index[0].numpy()
dst = edge_index[1].numpy()
node_count = max(np.max(src), np.max(dst))
data = np.zeros(dst.shape, dtype=np.int32)
csr_mat = csr_matrix(
(data, (edge_index[0].numpy(), edge_index[1].numpy())))
return csr_mat
def test_neighbor_sampler_with_fake_graph():
print(f"{'*' * 10} TEST WITH FAKE GRAPH {'*' * 10}")
graph_size = 10000
seed_size = 2048
neighbor_size = 20
graph_adj = np.random.randint(0, 2, (graph_size, graph_size))
###########################
# Zero-Copy Sampling
############################
csr_mat = sp.csr_matrix(graph_adj)
rowptr = torch.from_numpy(csr_mat.indptr).type(torch.long)
colptr = torch.from_numpy(csr_mat.indices).type(torch.long)
edge_ids = torch.LongTensor([1])
quiver = qv.new_quiver_from_csr_array(rowptr, colptr, edge_ids, 0, True,
False)
seeds = np.random.randint(graph_size, size=seed_size)
seeds = torch.from_numpy(seeds).type(torch.long)
cuda_seeds = seeds.cuda()
start = time.time()
n_id, count = quiver.sample_neighbor(0, cuda_seeds, neighbor_size)
print(f"Zero-Copy sampling method consumed {time.time() - start}")
##########################
# DMA Sampling
##########################
coo_mat = csr_mat.tocoo()
row = coo_mat.row
col = coo_mat.col
row = torch.from_numpy(row).type(torch.long)
col = torch.from_numpy(col).type(torch.long)
edge_ids = torch.LongTensor([1])
edge_index = torch.stack((row, col))
quiver = qv.new_quiver_from_edge_index(graph_size, edge_index, edge_ids, 0)
start = time.time()
n_id2, count2 = quiver.sample_neighbor(0, cuda_seeds, neighbor_size)
print(f"DMA sampling method consumed {time.time() - start}")
##############################
# CPU Sampling
##############################
quiver = qv.cpu_quiver_from_edge_index(graph_size, edge_index)
start = time.time()
n_id3, count3 = quiver.sample_neighbor(seeds, neighbor_size)
print(f"CPU sampling method consumed {time.time() - start}")
def test_neighbor_sampler_with_real_graph():
print(f"{'*' * 10} TEST WITH REAL GRAPH {'*' * 10}")
home = os.getenv('HOME')
data_dir = osp.join(home, '.pyg')
root = osp.join(data_dir, 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
data = dataset[0]
edge_index = data.edge_index
seeds_size = 128 * 15 * 10
neighbor_size = 5
csr_mat = get_csr_from_coo(edge_index)
print(
f"mean degree of graph = {np.mean(csr_mat.indptr[1:] - csr_mat.indptr[:-1])}"
)
graph_size = csr_mat.indptr.shape[0] - 1
seeds = np.arange(graph_size)
np.random.shuffle(seeds)
seeds = seeds[:seeds_size]
###########################
# Zero-Copy Sampling
############################
rowptr = torch.from_numpy(csr_mat.indptr).type(torch.long)
colptr = torch.from_numpy(csr_mat.indices).type(torch.long)
edge_ids = torch.LongTensor([1])
quiver = qv.new_quiver_from_csr_array(rowptr, colptr, edge_ids, 0, True,
False)
seeds = torch.from_numpy(seeds).type(torch.long)
cuda_seeds = seeds.cuda()
start = time.time()
n_id, count = quiver.sample_neighbor(0, cuda_seeds, neighbor_size)
print(
f"Zero-Copy sampling method consumed {time.time() - start}, sampled res length = {n_id.shape}"
)
##########################
# DMA Sampling
##########################
coo_mat = csr_mat.tocoo()
row = coo_mat.row
col = coo_mat.col
row = torch.from_numpy(row).type(torch.long)
col = torch.from_numpy(col).type(torch.long)
edge_ids = torch.LongTensor([1])
quiver = qv.new_quiver_from_edge_index(graph_size, data.edge_index,
edge_ids, 0)
start = time.time()
n_id2, count2 = quiver.sample_neighbor(0, cuda_seeds, neighbor_size)
print(
f"DMA sampling method consumed {time.time() - start}, sampled res length = {n_id2.shape}"
)
##############################
# CPU Sampling
##############################
quiver = qv.cpu_quiver_from_edge_index(graph_size, data.edge_index)
start = time.time()
n_id3, count3 = quiver.sample_neighbor(seeds, neighbor_size)
print(
f"CPU sampling method consumed {time.time() - start}, sampled res length = {n_id3.shape}"
)
def test_zero_copy_sampling_gpu_utilization():
print(f"{'*' * 10} TEST WITH REAL GRAPH {'*' * 10}")
home = os.getenv('HOME')
data_dir = osp.join(home, '.pyg')
root = osp.join(data_dir, 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
data = dataset[0]
edge_index = data.edge_index
seeds_size = 128 * 15 * 10
neighbor_size = 5
csr_mat = get_csr_from_coo(edge_index)
print(
f"mean degree of graph = {np.mean(csr_mat.indptr[1:] - csr_mat.indptr[:-1])}"
)
graph_size = csr_mat.indptr.shape[0] - 1
seeds = np.arange(graph_size)
np.random.shuffle(seeds)
seeds = seeds[:seeds_size]
###########################
# Zero-Copy Sampling
############################
rowptr = torch.from_numpy(csr_mat.indptr).type(torch.long)
colptr = torch.from_numpy(csr_mat.indices).type(torch.long)
edge_ids = torch.LongTensor([1])
quiver = qv.new_quiver_from_csr_array(rowptr, colptr, edge_ids, 0, True,
False)
seeds = torch.from_numpy(seeds).type(torch.long)
cuda_seeds = seeds.cuda()
start = time.time()
while True:
n_id, count = quiver.sample_neighbor(0, cuda_seeds, neighbor_size)
#print(f"Zero-Copy sampling method consumed {time.time() - start}, sampled res length = {n_id.shape}")
#test_neighbor_sampler_with_fake_graph()
test_neighbor_sampler_with_real_graph()
#test_zero_copy_sampling_gpu_utilization()
|
11491383
|
import numpy as np
from pyquante2 import *
from pyquante.scf.iterators import AveragingIterator
import matplotlib.pyplot as plt
#
# Compute RHF on N2 molecule for interatomic
# separation R in a range with N points.
#
N = 50
R_vec = np.linspace(0.5, 3.0, N)
E_vec = np.zeros((N,))
for k in range(N):
R = R_vec[k]
#print "Solving for R = %g ..." % (R)
n2 = molecule([(7,0,0,-R/2),(7,0,0,R/2)],units='Angstrom')
bfs = basisset(n2,'sto3g')
solver = rhf(n2,bfs)
#ens = solver.converge()
ens = solver.converge(AveragingIterator,maxiters=100)
E_vec[k] = solver.energy
plt.figure()
plt.plot(R_vec, E_vec)
plt.show()
|
11491390
|
import unittest
import mock
from blink1.blink1 import Blink1, BlinkConnectionFailed, InvalidColor
class TestSimpleLightControl(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.b1 = Blink1()
@classmethod
def tearDownClass(cls):
cls.b1.off()
cls.b1.close()
del cls.b1
def testOn(self):
self.b1.fade_to_color(1000, 'white')
def testInvalidColor(self):
with self.assertRaises(InvalidColor):
self.b1.fade_to_color(1000, 'moomintrol')
def testAlsoWhite(self):
self.b1.fade_to_color(1000, (255,255,255))
def testAWhiteShadeOfPale(self):
self.b1.fade_to_color(1000, '#ffffff')
def testAGreyerShadeOfPale(self):
self.b1.fade_to_color(1000, '#eeeeee')
def testAnImplausibleShadeOfWhite(self):
with self.assertRaises(InvalidColor):
self.b1.fade_to_color(1000, '#xxxxxx')
def testOff(self):
self.b1.off()
def test_get_firmware_version(self):
ver = self.b1.get_version()
def test_get_serial_number(self):
sn = self.b1.get_serial_number()
class TestFailedConnection(unittest.TestCase):
def testCannotFind(self):
with mock.patch('blink1.blink1.PRODUCT_ID', '0101'):
with self.assertRaises(BlinkConnectionFailed):
b1 = Blink1()
if __name__ == '__main__':
unittest.main()
|
11491395
|
import numpy as np
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def forward_pass(network, _in, _tar, mode='validation', weights=None):
_input = _in.to(device)
_target = _tar.float().unsqueeze(0).to(device)
output = network.network_forward(_input, weights)
if mode == 'validation':
return [output]
else:
loss = network.loss_function(output, _target)
return [output, loss]
def evaluate(network, dataloader, mode='validation', weights=None):
mae, mse, loss = 0.0, 0.0, 0.0
for idx, (_in, _tar) in enumerate(dataloader):
result = forward_pass(network, _in, _tar, mode, weights)
difference = result[0].data.sum() - _tar.sum().type(torch.FloatTensor).cuda()
_mae = torch.abs(difference)
_mse = difference ** 2
mae += _mae.item()
mse += _mse.item()
if mode == 'training':
loss += result[1].item()
mae /= len(dataloader)
mse = np.sqrt(mse / len(dataloader))
if mode == 'training':
loss /= len(dataloader)
return (loss, mae, mse)
return mae, mse
|
11491431
|
import gym
import numpy as np
from rl4rs.server.gymHttpClient import Client
class HttpEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, env_id, config={}):
remote_base = config["remote_base"]
self.client = Client(remote_base)
self.instance_id = self.client.env_create(env_id, config)
action_info = self.client.env_action_space_info(self.instance_id)
obs_info = self.client.env_observation_space_info(self.instance_id)
if action_info['name'] == 'Box':
self.action_space = gym.spaces.Box(np.array(action_info['low']), np.array(action_info['high']), shape=action_info['shape'])
else:
self.action_space = gym.spaces.Discrete(action_info['n'])
if obs_info['name'] == 'Box':
self.observation_space = gym.spaces.Box(np.array(obs_info['low']), np.array(obs_info['high']), shape=obs_info['shape'])
elif obs_info['name'] == 'Dict':
keys = obs_info['keys']
space_D = {}
for key in keys:
shape = obs_info[key]['shape']
space_D[key] = gym.spaces.Box(np.array(obs_info[key]['low']).reshape(shape), np.array(obs_info[key]['high']).reshape(shape), shape=shape)
self.observation_space = gym.spaces.Dict(space_D)
else:
assert obs_info['name'] in ('Box', 'Dict')
def seed(self, sd=0):
pass
def step(self, action):
if isinstance(action, np.ndarray):
action = action.tolist()
if isinstance(action, np.int):
action = int(action)
observation, reward, done, info = self.client.env_step(self.instance_id, action, False)
return self.observation_space.from_jsonable(observation), reward, done, info
def reset(self):
observation = self.client.env_reset(self.instance_id)
return self.observation_space.from_jsonable(observation)
def render(self, mode='human', close=False):
return ''
def close(self):
return self.client.env_close(self.instance_id)
|
11491435
|
from dataclasses import dataclass
from functional import seq
from typing import List
from anki import Collection
from anki.notes import Note as AnkiNote
from ..utils.constants import UUID_FIELD_NAME
@dataclass
class UuidFetcher:
collection: Collection
def get_deck_config(self, uuid: str):
return get_value_by_uuid(self.collection.decks.all_config(), uuid)
def get_deck(self, uuid: str):
return get_value_by_uuid(self.collection.decks.all(), uuid)
def get_model(self, uuid: str):
return get_value_by_uuid(self.collection.models.all(), uuid)
def get_note(self, uuid: str):
query = "select id from notes where guid=?"
note_id = self.collection.db.scalar(query, uuid)
if not note_id:
return None
return AnkiNote(self.collection, id=note_id)
def get_value_by_uuid(values: List, uuid: str):
return seq(values).find(lambda it: it.get(UUID_FIELD_NAME) == uuid)
|
11491443
|
import timeit
from functools import wraps
from optimus.helpers.logger import logger
def time_it(method):
def timed(*args, **kw):
start_time = timeit.default_timer()
f = method(*args, **kw)
_time = round(timeit.default_timer() - start_time, 2)
logger.print("{name}() executed in {time} sec".format(name=method.__name__, time=_time))
return f
return timed
|
11491500
|
from src.rfunctions import *
from src.custlogger import *
from collections import defaultdict
try:
import dns.rdatatype
import dns.message
import dns.query
import dns.reversename
except ImportError:
notfound.append('dnspython')
logger = logging.getLogger(__name__)
def dns_query(server, timeout, protocol, qname, qtype, qclass):
request = dns.message.make_query(qname, qtype, qclass)
if protocol == 'tcp':
response = dns.query.tcp(request, server, timeout=timeout, one_rr_per_rrset=True)
else:
response = dns.query.udp(request, server, timeout=timeout, one_rr_per_rrset=True)
if response.flags & dns.flags.TC:
response = dns.query.tcp(request, server, timeout=timeout, one_rr_per_rrset=True)
return response
def generate_tld():
# NB. does not return an exhaustive list (ie. missing co.uk, co.nz etc.)
from itertools import product
from string import ascii_lowercase
# http://data.iana.org/TLD/tlds-alpha-by-domain.txt
gtld = ['academy', 'actor', 'aero', 'agency', 'archi', 'arpa', 'asia', 'axa',
'bar', 'bargains', 'berlin', 'best', 'bid', 'bike', 'biz', 'black', 'blue',
'boutique', 'build', 'builders', 'buzz', 'cab', 'camera', 'camp', 'cards',
'careers', 'cat', 'catering', 'center', 'ceo', 'cheap', 'christmas',
'cleaning', 'clothing', 'club', 'codes', 'coffee', 'cologne', 'com',
'community', 'company', 'computer', 'condos', 'construction', 'contractors',
'cooking', 'cool', 'coop', 'country', 'cruises', 'dance', 'dating', 'democrat',
'diamonds', 'directory', 'dnp', 'domains', 'edu', 'education', 'email',
'enterprises', 'equipment', 'estate', 'events', 'expert', 'exposed', 'farm',
'fish', 'fishing', 'flights', 'florist', 'foundation', 'futbol', 'gallery',
'gift', 'glass', 'gov', 'graphics', 'guitars', 'guru', 'haus', 'holdings',
'holiday', 'horse', 'house', 'immobilien', 'industries', 'info', 'ink',
'institute', 'int', 'international', 'jetzt', 'jobs', 'kaufen', 'kim',
'kitchen', 'kiwi', 'koeln', 'kred', 'land', 'lighting', 'limo', 'link',
'london', 'luxury', 'maison', 'management', 'mango', 'marketing', 'meet',
'menu', 'miami', 'mil', 'mobi', 'moda', 'moe', 'monash', 'museum', 'nagoya',
'name', 'net', 'neustar', 'ninja', 'nyc', 'okinawa', 'onl', 'org', 'partners',
'parts', 'photo', 'photography', 'photos', 'pics', 'pink', 'plumbing', 'post',
'pro', 'productions', 'properties', 'pub', 'qpon', 'recipes', 'red', 'ren',
'rentals', 'repair', 'report', 'reviews', 'rich', 'rodeo', 'ruhr', 'sexy',
'shiksha', 'shoes', 'singles', 'social', 'sohu', 'solar', 'solutions',
'supplies', 'supply', 'support', 'systems', 'tattoo', 'technology', 'tel',
'tienda', 'tips', 'today', 'tokyo', 'tools', 'trade', 'training', 'travel',
'uno', 'vacations', 'vegas', 'ventures', 'viajes', 'villas', 'vision', 'vodka',
'vote', 'voting', 'voto', 'voyage', 'wang', 'watch', 'webcam', 'wed', 'wien',
'wiki', 'works', 'xn--3bst00m', 'xn--3ds443g', 'xn--3e0b707e', 'xn--45brj9c',
'xn--55qw42g', 'xn--55qx5d', 'xn--6frz82g', 'xn--6qq986b3xl', 'xn--80ao21a',
'xn--80asehdb', 'xn--80aswg', 'xn--90a3ac', 'xn--c1avg', 'xn--cg4bki',
'xn--clchc0ea0b2g2a9gcd', 'xn--czru2d', 'xn--d1acj3b', 'xn--fiq228c5hs',
'xn--fiq64b', 'xn--fiqs8s', 'xn--fiqz9s', 'xn--fpcrj9c3d', 'xn--fzc2c9e2c',
'xn--gecrj9c', 'xn--h2brj9c', 'xn--i1b6b1a6a2e', 'xn--io0a7i', 'xn--j1amh',
'xn--j6w193g', 'xn--kprw13d', 'xn--kpry57d', 'xn--l1acc', 'xn--lgbbat1ad8j',
'xn--mgb9awbf', 'xn--mgba3a4f16a', 'xn--mgbaam7a8h', 'xn--mgbab2bd',
'xn--mgbayh7gpa', 'xn--mgbbh1a71e', 'xn--mgbc0a9azcg', 'xn--mgberp4a5d4ar',
'xn--mgbx4cd0ab', 'xn--ngbc5azd', 'xn--nqv7f', 'xn--nqv7fs00ema', 'xn--o3cw4h',
'xn--ogbpf8fl', 'xn--p1ai', 'xn--pgbs0dh', 'xn--q9jyb4c', 'xn--rhqv96g',
'xn--s9brj9c', 'xn--unup4y', 'xn--wgbh1c', 'xn--wgbl6a', 'xn--xkc2al3hye2a',
'xn--xkc2dl3a5ee0h', 'xn--yfro4i67o', 'xn--ygbi2ammx', 'xn--zfr164b', 'xxx',
'xyz', 'zone']
cctld = [''.join(i) for i in product(*[ascii_lowercase]*2)]
tld = gtld + cctld
return tld, len(tld)
def generate_srv():
common = [
'_gc._tcp', '_kerberos._tcp', '_kerberos._udp', '_ldap._tcp',
'_test._tcp', '_sips._tcp', '_sip._udp', '_sip._tcp', '_aix._tcp', '_aix._udp',
'_finger._tcp', '_ftp._tcp', '_http._tcp', '_nntp._tcp', '_telnet._tcp',
'_whois._tcp', '_h323cs._tcp', '_h323cs._udp', '_h323be._tcp', '_h323be._udp',
'_h323ls._tcp', '_h323ls._udp', '_sipinternal._tcp', '_sipinternaltls._tcp',
'_sip._tls', '_sipfederationtls._tcp', '_jabber._tcp', '_xmpp-server._tcp', '_xmpp-client._tcp',
'_imap.tcp', '_certificates._tcp', '_crls._tcp', '_pgpkeys._tcp', '_pgprevokations._tcp',
'_cmp._tcp', '_svcp._tcp', '_crl._tcp', '_ocsp._tcp', '_PKIXREP._tcp',
'_smtp._tcp', '_hkp._tcp', '_hkps._tcp', '_jabber._udp', '_xmpp-server._udp',
'_xmpp-client._udp', '_jabber-client._tcp', '_jabber-client._udp',
'_adsp._domainkey', '_policy._domainkey', '_domainkey', '_ldap._tcp.dc._msdcs', '_ldap._udp.dc._msdcs']
def distro():
import os
import re
files = ['/usr/share/nmap/nmap-protocols', '/usr/share/nmap/nmap-services', '/etc/protocols', '/etc/services']
ret = []
for f in files:
if not os.path.isfile(f):
logger.warn("File '%s' is missing, there will be less records to test" % f)
continue
for line in open(f):
match = re.match(r'([a-zA-Z0-9]+)\s', line)
if not match: continue
for w in re.split(r'[^a-z0-9]', match.group(1).strip().lower()):
ret.extend(['_%s.%s' % (w, i) for i in ('_tcp', '_udp')])
return ret
srv = set(common + distro())
return srv, len(srv)
class HostInfo:
def __init__(self):
self.name = set()
self.ip = set()
self.alias = set()
def __str__(self):
line = ''
if self.name:
line = ' '.join(self.name)
if self.ip:
if line: line += ' / '
line += ' '.join(map(str, self.ip))
if self.alias:
if line: line += ' / '
line += ' '.join(self.alias)
return line
class Controller_DNS(Controller):
records = defaultdict(list)
hostmap = defaultdict(HostInfo)
# show_final {{{
def show_final(self):
''' Expected output:
Records -----
ftp.example.com. IN A 10.0.1.1
www.example.com. IN A 10.0.1.1
prod.example.com. IN CNAME www.example.com.
ipv6.example.com. IN AAAA dead:beef::
dev.example.com. IN A 10.0.1.2
svn.example.com. IN A 10.0.2.1
websrv1.example.com. IN CNAME prod.example.com.
blog.example.com. IN CNAME example.wordpress.com.
'''
print('Records ' + '-'*42)
for name, infos in sorted(self.records.items()):
for qclass, qtype, rdata in infos:
print('%34s %4s %-7s %s' % (name, qclass, qtype, rdata))
''' Expected output:
Hostmap ------
ipv6.example.com dead:beef::
ftp.example.com 10.0.1.1
www.example.com 10.0.1.1
prod.example.com
websrv1.example.com
dev.example.com 10.0.1.2
svn.example.com 10.0.2.1
example.wordpress.com ?
blog.example.com
Domains ---------------------------
example.com 8
Networks --------------------------
dead:beef::
10.0.1.x
10.0.2.1
'''
ipmap = defaultdict(HostInfo)
noips = defaultdict(list)
'''
hostmap = {
'www.example.com': {'ip': ['10.0.1.1'], 'alias': ['prod.example.com']},
'ftp.example.com': {'ip': ['10.0.1.1'], 'alias': []},
'prod.example.com': {'ip': [], 'alias': ['websrv1.example.com']},
'ipv6.example.com': {'ip': ['dead:beef::'], 'alias': []},
'dev.example.com': {'ip': ['10.0.1.2'], 'alias': []},
'example.wordpress.com': {'ip': [], 'alias': ['blog.example.com']},
ipmap = {'10.0.1.1': {'name': ['www.example.com', 'ftp.example.com'], 'alias': ['prod.example.com', 'websrv1.example.com']}, ...
noips = {'example.wordpress.com': ['blog.example.com'],
'''
for name, hinfo in self.hostmap.items():
for ip in hinfo.ip:
ip = IP(ip)
ipmap[ip].name.add(name)
ipmap[ip].alias.update(hinfo.alias)
for name, hinfo in self.hostmap.items():
if not hinfo.ip and hinfo.alias:
found = False
for ip, v in ipmap.items():
if name in v.alias:
for alias in hinfo.alias:
ipmap[ip].alias.add(alias)
found = True
if not found: # orphan CNAME hostnames (with no IP address) may be still valid virtual hosts
noips[name].extend(hinfo.alias)
print('Hostmap ' + '-'*42)
for ip, hinfo in sorted(ipmap.items()):
for name in hinfo.name:
print('%34s %s' % (name, ip))
for alias in hinfo.alias:
print('%34s' % alias)
for k, v in noips.items():
print('%34s ?' % k)
for alias in v:
print('%34s' % alias)
print('Domains ' + '-'*42)
domains = {}
for ip, hinfo in ipmap.items():
for name in hinfo.name.union(hinfo.alias):
if name.count('.') > 1:
i = 1
else:
i = 0
d = '.'.join(name.split('.')[i:])
if d not in domains: domains[d] = 0
domains[d] += 1
for domain, count in sorted(domains.items(), key=lambda a:a[0].split('.')[-1::-1]):
print('%34s %d' % (domain, count))
print('Networks ' + '-'*41)
nets = {}
for ip in set(ipmap):
if not ip.version() == 4:
nets[ip] = [ip]
else:
n = ip.make_net('255.255.255.0')
if n not in nets: nets[n] = []
nets[n].append(ip)
for net, ips in sorted(nets.items()):
if len(ips) == 1:
print(' '*34 + ' %s' % ips[0])
else:
print(' '*34 + ' %s.x' % '.'.join(str(net).split('.')[:-1]))
# }}}
def push_final(self, resp):
if hasattr(resp, 'rrs'):
for rr in resp.rrs:
name, qclass, qtype, data = rr
info = (qclass, qtype, data)
if info not in self.records[name]:
self.records[name].append(info)
if not qclass == 'IN':
continue
if qtype == 'PTR':
data = data[:-1]
self.hostmap[data].ip.add(name)
else:
if qtype in ('A', 'AAAA'):
name = name[:-1]
self.hostmap[name].ip.add(data)
elif qtype == 'CNAME':
name, data = name[:-1], data[:-1]
self.hostmap[data].alias.add(name)
class DNS_reverse:
'''Reverse DNS lookup'''
usage_hints = [
'''%prog host=NET0 0=192.168.0.0/24 -x ignore:code=3''',
'''%prog host=NET0 0=216.239.32.0-216.239.47.255,172.16.31.10/24 -x ignore:code=3 -x ignore:fgrep!=google.com -x ignore:fgrep=216-239-''',
]
available_options = (
('host', 'IP addresses to reverse lookup'),
('server', 'name server to query (directly asking a zone authoritative NS may return more results) [8.8.8.8]'),
('timeout', 'seconds to wait for a response [5]'),
('protocol', 'send queries over udp or tcp [udp]'),
)
available_actions = ()
Response = Response_Base
def execute(self, host, server='8.8.8.8', timeout='5', protocol='udp'):
with Timing() as timing:
response = dns_query(server, int(timeout), protocol, dns.reversename.from_address(host), qtype='PTR', qclass='IN')
code = response.rcode()
status = dns.rcode.to_text(code)
rrs = [[host, c, t, d] for _, _, c, t, d in [rr.to_text().split(' ', 4) for rr in response.answer]]
mesg = '%s %s' % (status, ''.join('[%s]' % ' '.join(rr) for rr in rrs))
resp = self.Response(code, mesg, timing)
resp.rrs = rrs
return resp
class DNS_forward:
'''Forward DNS lookup'''
usage_hints = [
'''%prog name=FILE0.google.com 0=names.txt -x ignore:code=3''',
'''%prog name=google.MOD0 0=TLD -x ignore:code=3''',
'''%prog name=MOD0.microsoft.com 0=SRV qtype=SRV -x ignore:code=3''',
]
available_options = (
('name', 'domain names to lookup'),
('server', 'name server to query (directly asking the zone authoritative NS may return more results) [8.8.8.8]'),
('timeout', 'seconds to wait for a response [5]'),
('protocol', 'send queries over udp or tcp [udp]'),
('qtype', 'type to query [ANY]'),
('qclass', 'class to query [IN]'),
)
available_actions = ()
available_keys = {
'TLD': generate_tld,
'SRV': generate_srv,
}
Response = Response_Base
def execute(self, name, server='8.8.8.8', timeout='5', protocol='udp', qtype='ANY', qclass='IN'):
with Timing() as timing:
response = dns_query(server, int(timeout), protocol, name, qtype=qtype, qclass=qclass)
code = response.rcode()
status = dns.rcode.to_text(code)
rrs = [[n, c, t, d] for n, _, c, t, d in [rr.to_text().split(' ', 4) for rr in response.answer + response.additional + response.authority]]
mesg = '%s %s' % (status, ''.join('[%s]' % ' '.join(rr) for rr in rrs))
resp = self.Response(code, mesg, timing)
resp.rrs = rrs
return resp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.