hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a17ef764236249e443683f2dbe9c08a5529c01e
| 6,001
|
py
|
Python
|
asposewordscloud/models/requests/insert_style_request.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/requests/insert_style_request.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/requests/insert_style_request.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="insert_style_request.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
from six.moves.urllib.parse import quote
class InsertStyleRequest(object):
"""
Request model for insert_style operation.
Initializes a new instance.
:param name The filename of the input document.
:param style_insert The properties of the style.
:param folder Original document folder.
:param storage Original document storage.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, name, style_insert, folder=None, storage=None, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.name = name
self.style_insert = style_insert
self.folder = folder
self.storage = storage
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `insert_style`") # noqa: E501
# verify the required parameter 'style_insert' is set
if self.style_insert is None:
raise ValueError("Missing the required parameter `style_insert` when calling `insert_style`") # noqa: E501
path = '/v4.0/words/{name}/styles/insert'
path_params = {}
if self.name is not None:
path_params['name'] = self.name # noqa: E501
else:
path_params['name'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.folder is not None:
query_params.append(('folder', self.folder)) # noqa: E501
if self.storage is not None:
query_params.append(('storage', self.storage)) # noqa: E501
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
# HTTP header `Content-Type`
header_params['Content-Type'] = api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
form_params = []
body_params = None
if self.style_insert is not None:
body_params = self.style_insert
return {
"method": "POST",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'StyleResponse' # noqa: E501
}
def get_response_type(self):
return 'StyleResponse' # noqa: E501
| 48.395161
| 255
| 0.652725
|
4a17f009d029734bdfc8e417b0aaf0013ca43b6d
| 244
|
py
|
Python
|
apps/listings/urls.py
|
csyu12/RS_System
|
940b58e776dc59c7d287975bf145acdbb85d1018
|
[
"MIT"
] | null | null | null |
apps/listings/urls.py
|
csyu12/RS_System
|
940b58e776dc59c7d287975bf145acdbb85d1018
|
[
"MIT"
] | null | null | null |
apps/listings/urls.py
|
csyu12/RS_System
|
940b58e776dc59c7d287975bf145acdbb85d1018
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'listings'
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search'),
]
| 24.4
| 60
| 0.668033
|
4a17f1aada96ed8396fa9af2358223a6f115491b
| 3,596
|
py
|
Python
|
glue/plugins/dendro_viewer/tests/test_data_factory.py
|
ejeschke/glue
|
21689e3474aeaeb70e258d76c60755596856976c
|
[
"BSD-3-Clause"
] | 3
|
2015-09-10T22:23:55.000Z
|
2019-04-04T18:47:33.000Z
|
glue/plugins/dendro_viewer/tests/test_data_factory.py
|
ejeschke/glue
|
21689e3474aeaeb70e258d76c60755596856976c
|
[
"BSD-3-Clause"
] | null | null | null |
glue/plugins/dendro_viewer/tests/test_data_factory.py
|
ejeschke/glue
|
21689e3474aeaeb70e258d76c60755596856976c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import os
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from glue.tests.helpers import make_file
from glue.core.data_factories.helpers import find_factory
from glue.core import data_factories as df
from glue.tests.helpers import requires_astrodendro
DATA = os.path.join(os.path.dirname(__file__), 'data')
@requires_astrodendro
@pytest.mark.parametrize('filename', ['dendro.fits', 'dendro_old.fits', 'dendro.hdf5'])
def test_is_dendro(filename):
from ..data_factory import is_dendro
assert is_dendro(os.path.join(DATA, filename))
@requires_astrodendro
@pytest.mark.parametrize('filename', ['dendro.fits', 'dendro_old.fits', 'dendro.hdf5'])
def test_find_factory(filename):
from ..data_factory import load_dendro
assert find_factory(os.path.join(DATA, filename)) is load_dendro
@requires_astrodendro
def test_identifier_heuristics(tmpdir):
filename = tmpdir.join('test.fits').strpath
from ..data_factory import is_dendro
from astropy.io import fits
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
hdulist.writeto(filename)
assert not is_dendro(filename)
hdulist.append(fits.ImageHDU())
hdulist.writeto(filename, clobber=True)
assert not is_dendro(filename)
hdulist[1].name = 'random'
hdulist.writeto(filename, clobber=True)
assert not is_dendro(filename)
hdulist[1].name = ''
hdulist[0].data = np.array([1, 2, 3])
hdulist.writeto(filename, clobber=True)
assert not is_dendro(filename)
hdulist[0].data = None
hdulist[1].data = np.ones((3, 4))
hdulist[2].data = np.ones((2, 4))
hdulist[3].data = np.ones((3, 5))
hdulist.writeto(filename, clobber=True)
assert not is_dendro(filename)
hdulist[2].data = np.ones((3, 4))
hdulist.writeto(filename, clobber=True)
assert not is_dendro(filename)
hdulist[3].data = np.ones(3)
hdulist.writeto(filename, clobber=True)
assert is_dendro(filename)
@requires_astrodendro
def test_dendrogram_load():
from ..data_factory import load_dendro
data = b"""x\xda\xed\xda]K\xc2`\x18\xc6\xf1^\xbe\xc8}fA\xe4[X\x14\x1eX\x99<\x90S\xd8\x02O\x9f\xf2Q<\xd8&\xcf&\xe4\xb7\xcft\x82\xc9\xe6\x1be\x91\xff\xdf\xc9\xc5\xd8v\xc1vt\xeff\xaej\xb6\x9f\xeb"UI\xe1I^\xde\xc2\xa0\x17Z?\x928\x94\'\xe5\xb9\x12\xc5:\xe8j\xdb\x95T\xf7\xcak\xabNF\xdf\xcd\xa4O[\xab\xc7\xd2\xd5\xb1\x96x<4\xb2\x86S\xeb(W2\xfa\n\x93\xbe`\xe4\xbf\x1a+ao\xde<\xf0M\x10\r\xc2 J\xed\xabw\xbc\xba\xf3\x98\xf9\xbc[\x9b\x96\x01\x00\x00\xe0`|\x8e\x93\xaej9U\xc9\xa9f\xad1\x99\xa4%\xb7p:/\xca\xd7}#\xe6=\x9eM\xa5\xeb\xfaV\xcd\xcf\x95\xabo\x9e\x9f\x8b\xdb\xcf\xcf\xd3\xbebF_e\xfb\xf7\xd7~h\xbd8\xdeF\xf3\xfdP[\xed\x9b\xd8\xd8hE_cU\xdf\xd7\xe7\xed\xdbp4\x8c\x98\xef\x01\x00\x00\xf6\xeah\xe68\xc9\x93$O3\x8e\xe7\xd7\x01\x00\x00\x00\x07i\x9f\xfb\xe7r\x89\xfd3\xfbg\x00\x00\x80\x7f\xb1\x7fN\xdbA\x03\x00\x00\x00\xf8\xc5\xfd\xf3_\xff\xff\xb9t\xcd\xfe\x19\x00\x00\x00\x1b\xed\x9f\xcf\x96\xb2\x98\xe4m\x92\xe5$/\x93,d\xe4E\x92\xa5\x1d\xef?_:\xde\xf5\xfe;\xbe\x8c\x00\x00\x00\xf0\x13>\x00\x8e\xbe x"""
with make_file(data, 'fits', decompress=True) as fname:
dg, im = df.load_data(fname, factory=load_dendro)
assert_array_equal(im['intensity'], [1, 2, 3, 2, 3, 1])
assert_array_equal(im['structure'], [0, 0, 1, 0, 2, 0])
assert_array_equal(dg['parent'], [-1, 0, 0])
assert_array_equal(dg['height'], [3, 3, 3])
assert_array_equal(dg['peak'], [3, 3, 3])
| 37.458333
| 998
| 0.716908
|
4a17f1c398c8021c2cabc4e1a6b2a87f1eb7a149
| 989
|
py
|
Python
|
sazabi/plugins/imgur.py
|
oliverelias/sazabi
|
53e2c5fe5a823bb5814b3c9a614adee689fe3d2a
|
[
"MIT"
] | null | null | null |
sazabi/plugins/imgur.py
|
oliverelias/sazabi
|
53e2c5fe5a823bb5814b3c9a614adee689fe3d2a
|
[
"MIT"
] | 1
|
2018-08-25T04:13:25.000Z
|
2018-08-25T04:13:25.000Z
|
sazabi/plugins/imgur.py
|
oliverelias/sazabi
|
53e2c5fe5a823bb5814b3c9a614adee689fe3d2a
|
[
"MIT"
] | 2
|
2016-08-26T06:46:33.000Z
|
2018-08-23T07:55:57.000Z
|
import logging
from random import choice
from sazabi.types import SazabiBotPlugin
class Imgur(SazabiBotPlugin):
async def parse(self, client, message, *args, **kwargs):
imgur_client = kwargs.get('imgur')
pic = None
if message.content == "~imgur":
self.logger.debug('Processing imgur command')
pics = imgur_client.gallery_random(page=0)
pic = choice(pics).link
elif message.content == "~meme":
self.logger.debug('Processing meme command')
memes = imgur_client.memes_subgallery(sort='viral', page=0, window='week')
pic = choice(memes).link
elif message.content == "~robot":
keyword = choice(['gundam', 'mecha'])
self.logger.debug('Processing robot command')
robots = imgur_client.gallery_search(keyword)
pic = choice(robots).link
if pic is not None:
await client.send_message(message.channel, pic)
| 30.90625
| 86
| 0.614762
|
4a17f1c5c2b44c0b6413380c61c45897ffb8dfa7
| 2,570
|
py
|
Python
|
fix_mri_names.py
|
nordme/nordme_work_repo
|
950a5077730885330a4975c4ca1bd3c51903a1a6
|
[
"MIT"
] | null | null | null |
fix_mri_names.py
|
nordme/nordme_work_repo
|
950a5077730885330a4975c4ca1bd3c51903a1a6
|
[
"MIT"
] | null | null | null |
fix_mri_names.py
|
nordme/nordme_work_repo
|
950a5077730885330a4975c4ca1bd3c51903a1a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This script looks up folders in a given directory,
chooses genz folders and files that follow mri naming conventions,
and renames them with the new convention."""
import os
import os.path as op
import fnmatch as fm
# get and sort all the folders from a directory
parent_dir = '/brainstudio/MEG/genz/anatomy/fix/'
# parent_dir = '/home/nordme/MEG_data/rsMEG/'
print("Fetching folders from %s" % parent_dir)
folders = os.listdir(parent_dir)
folders.sort()
# choose folders to rename; rename folders; compile list of genz folders
genz_folders = []
for folder in folders:
if 'sub-' in folder:
try:
os.rename(op.join(parent_dir + folder),
op.join(parent_dir + (folder.replace('sub-genz', 'genz'))))
print('Renaming %s' % folder)
except FileNotFoundError:
print('File %s not found.' % folder)
print('%s' % op.join(parent_dir + (folder.replace('sub-genz', 'genz'))))
genz_folders.append(folder.replace('sub-genz', 'genz'))
folders = os.listdir(parent_dir)
folders.sort()
for folder in folders:
if 'ses-1' in folder:
try:
os.rename(op.join(parent_dir + folder),
op.join(parent_dir + (folder.replace('_ses-1_freesurfer_adult_bnmprage', '_'))))
print('Renaming %s' % folder)
except FileNotFoundError:
print('File %s not found.' % folder)
print('%s' % op.join(parent_dir + (folder.replace('_ses-1_freesurfer_adult_bnmprage', '_'))))
print('Our genz folders are: %s' % genz_folders)
folders = os.listdir(parent_dir)
folders.sort()
# add the age suffixes
for folder in folders:
if 'genz' in folder:
if fm.fnmatch(folder, 'genz5*'):
if fm.fnmatch(folder, 'genz530*'):
pass
else:
os.rename(op.join(parent_dir + folder), op.join(parent_dir + folder + '17a'))
elif fm.fnmatch(folder, 'genz4*'):
os.rename(op.join(parent_dir + folder), op.join(parent_dir + folder + '15a'))
elif fm.fnmatch(folder, 'genz3*'):
os.rename(op.join(parent_dir + folder), op.join(parent_dir + folder + '13a'))
elif fm.fnmatch(folder, 'genz2*'):
os.rename(op.join(parent_dir + folder), op.join(parent_dir + folder + '11a'))
elif fm.fnmatch(folder, 'genz1*'):
os.rename(op.join(parent_dir + folder), op.join(parent_dir + folder + '9a'))
else:
raise ValueError('Hey, this folder name is weird. Look at %s' % folder)
| 35.205479
| 105
| 0.618288
|
4a17f21d02fc89c19f5471830e16e6e19f1b3dba
| 10,302
|
py
|
Python
|
g3median_gcn_sneg-syme-2rl.py
|
Paeans/phylognn
|
45048d2e68af7c9114ada7e3ede9e765d10fe0a1
|
[
"MIT"
] | null | null | null |
g3median_gcn_sneg-syme-2rl.py
|
Paeans/phylognn
|
45048d2e68af7c9114ada7e3ede9e765d10fe0a1
|
[
"MIT"
] | null | null | null |
g3median_gcn_sneg-syme-2rl.py
|
Paeans/phylognn
|
45048d2e68af7c9114ada7e3ede9e765d10fe0a1
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
# from torch_geometric.nn import VGAE
from torch_geometric.loader import DataLoader
from torch_geometric.utils import (degree, negative_sampling,
batched_negative_sampling,
add_self_loops, to_undirected)
from torch.utils.tensorboard import SummaryWriter
from gene_graph_dataset import G3MedianDataset
from phylognn_model import G3Median_GCNConv, G3Median_VGAE
from sklearn.metrics import (roc_auc_score, roc_curve,
average_precision_score,
precision_recall_curve,
f1_score, matthews_corrcoef)
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpuid", type=int, default = 0)
# parser.add_argument("--run", type=int)
parser.add_argument("--seqlen", type=int)
parser.add_argument("--rate", type=float, default = 0.1)
parser.add_argument("--samples", type=int, default = 1000)
parser.add_argument("--epoch", type=int, default=1000)
parser.add_argument("--cvsplit", type=int, default=5)
parser.add_argument("--freq", type=int, default=20)
parser.add_argument("--shuffle", type=int, default=1)
args = parser.parse_args()
gpuid = args.gpuid # 0
# train_p, test_p, val_p = 0.7, 0.2, 0.1
train_batch, test_batch, val_batch = 256, 64, 8
device = torch.device('cuda:' + str(gpuid) if torch.cuda.is_available() else 'cpu')
dataset = G3MedianDataset('dataset_g3m', args.seqlen, int(args.seqlen * args.rate), args.samples)
in_channels, out_channels = 256, 128
# data_size = len(dataset)
# train_size, test_size, val_size = ((int)(data_size * train_p),
# (int)(data_size * test_p),
# (int)(data_size * val_p))
# print(f'dataset size: {data_size:0>5}')
dataset = dataset.shuffle()
# train_dataset = dataset[:train_size]
# test_dataset = dataset[train_size:(train_size + test_size)]
# val_dataset = dataset[(train_size + test_size):(train_size + test_size + val_size)]
# test_dataset = list(test_dataset)
# for t in test_dataset:
# t.pos_edge_label_index = add_self_loops(to_undirected(t.pos_edge_label_index))[0]
# t.neg_edge_label_index = negative_sampling(t.pos_edge_label_index,
# t.num_nodes,
# t.num_nodes**2)
# train_dataset = list(train_dataset)
# for t in train_dataset:
# t.pos_edge_label_index = add_self_loops(to_undirected(t.pos_edge_label_index))[0]
# t.neg_edge_label_index = negative_sampling(t.pos_edge_label_index,
# t.num_nodes,
# t.num_nodes**2)
# val_dataset = list(val_dataset)
# for t in val_dataset:
# t.pos_edge_label_index = add_self_loops(to_undirected(t.pos_edge_label_index))[0]
# t.neg_edge_label_index = negative_sampling(t.pos_edge_label_index,
# t.num_nodes,
# t.num_nodes**2)
# from torch_geometric.data import Batch
def train(model, train_loader):
model.train()
total_loss = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
z = model.encode(data.x, data.edge_index)
loss = model.recon_loss_wt(z, data.pos_edge_label_index, data.neg_edge_label_index, 1.5, 1) * 5
loss = loss + (1 / data.num_nodes) * model.kl_loss() * 0.5
loss.backward()
optimizer.step()
total_loss += loss
return total_loss/len(train_loader)
# @torch.no_grad()
# def test(model, test_loader):
# model.eval()
# auc, ap = 0, 0
# for data in test_loader:
# data = data.to(device)
# z = model.encode(data.x, data.edge_index)
# # loss += model.recon_loss(z, data.pos_edge_label_index, data.neg_edge_label_index)
# tauc, tap = model.test(z, data.pos_edge_label_index) #, data.neg_edge_label_index)
# auc += tauc
# ap += tap
# return auc/len(test_loader), ap/len(test_loader)
@torch.no_grad()
def predict(model, test_loader):
model.eval()
y_list, pred_list = [], []
for data in test_loader:
data = data.to(device)
z = model.encode(data.x, data.edge_index)
# loss += model.recon_loss(z, data.pos_edge_label_index, data.neg_edge_label_index)
pl, nl = data.pos_edge_label_index.size(-1), data.neg_edge_label_index.size(-1)
neg_index = torch.randperm(nl)[:pl]
y, pred = model.pred(z, data.pos_edge_label_index, data.neg_edge_label_index[:, neg_index])
y_list.append(y)
pred_list.append(pred)
return y_list, pred_list
@torch.no_grad()
def val(model, val_loader):
model.eval()
loss = 0
for data in val_loader:
data = data.to(device)
z = model.encode(data.x, data.edge_index)
loss += model.recon_loss_wt(z, data.pos_edge_label_index, data.neg_edge_label_index, 1.5, 1)
# tauc, tap = model.test(z, data.pos_edge_label_index, data.neg_edge_label_index)
return loss/len(val_loader)
def auc_ap(y_list, pred_list):
pred_accuracy = [[roc_auc_score(y, pred), average_precision_score(y, pred)]
for y, pred in zip(y_list, pred_list)]
auc, ap = np.mean(pred_accuracy, axis = 0)
return auc, ap
def cal_accuracy(y_list, pred_list):
# pred_accuracy = np.zeros((len(y_list), 2))
# for i in range(len(y_list)):
# y, pred = y_list[i], pred_list[i]
# pred_accuracy[i] = [roc_auc_score(y, pred),
# average_precision_score(y, pred)]
figsize = (6,6)
y, pred = np.concatenate([[t, p] for t, p in zip(y_list, pred_list)], axis = -1)
auc, ap = roc_auc_score(y, pred), average_precision_score(y, pred)
auc_figure = plt.figure(figsize=figsize)
fpr, tpr, _ = roc_curve(y, pred)
plt.plot(fpr, tpr, color='g', lw=0.3)
# for i in range(len(y_list)):
# y, pred = y_list[i], pred_list[i]
# fpr, tpr, _ = roc_curve(y, pred)
# plt.plot(fpr, tpr, color='g', lw=0.3)
plt.plot([0, 1], [0, 1], color="navy", lw=0.3, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title(f'Receiver Operating Characteristic ({auc:.4f})')
# plt.legend(loc="lower right")
ap_figure = plt.figure(figsize=figsize)
prc, rec, _ = precision_recall_curve(y, pred)
plt.plot(rec, prc, color='c', lw=0.3)
# for i in range(len(y_list)):
# y, pred = y_list[i], pred_list[i]
# prc, rec, _ = precision_recall_curve(y, pred)
# plt.plot(rec, prc, color='c', lw=0.3)
plt.plot([0, 1], [0, 1], color="navy", lw=0.3, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title(f'Precision-Recall Curve ({ap:.4f})')
return [auc, ap], [auc_figure, ap_figure] #, ('auc', 'ap')
y_pred_res = []
counter = 1
for train_index, test_index in KFold(n_splits = args.cvsplit).split(dataset):
print(f'{time.ctime()} -- seqlen:{args.seqlen:0>4} '
f'rate:{args.rate:.2f} samples:{args.samples:0>5} -- fold: {counter:0>2}')
model = G3Median_VGAE(G3Median_GCNConv(in_channels, out_channels)).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10,
min_lr=0.00001,verbose=True)
writer = SummaryWriter(log_dir='runs_g3median_' f'{args.seqlen:0>4}' '/s' f'{args.samples:0>5}' '_r'
f'{args.rate:0>3.1f}' '_' 'run' f'{counter:0>2}')
train_dataset = dataset[train_index]
test_dataset = dataset[test_index]
train_dataset = train_dataset[:int(len(train_dataset) * 0.9)]
val_dataset = train_dataset[int(len(train_dataset) * 0.9):]
train_loader = DataLoader(train_dataset, batch_size = train_batch, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size = test_batch)
val_loader = DataLoader(val_dataset, batch_size = val_batch)
start_time = time.time()
y_pred = None
p_auc, p_ap = 0, 0
for epoch in range(1, args.epoch + 1):
loss = train(model, train_loader)
tloss = val(model, val_loader)
scheduler.step(tloss)
writer.add_scalar('loss/train', loss, epoch)
writer.add_scalar('loss/val', tloss, epoch)
# if epoch % args.freq != 0:
# continue
y_list, pred_list = predict(model, test_loader)
# pred_acc, figures = cal_accuracy(y_list, pred_list)
# auc, ap = pred_acc
# y_list, pred_list = predict(model, test_dataset)
auc, ap = auc_ap(y_list, pred_list)
writer.add_scalar('auc/test', auc, epoch)
writer.add_scalar('ap/test', ap, epoch)
# writer.add_figure('roc/test', figures[0], epoch)
# writer.add_figure('pr/test', figures[1], epoch)
if auc >= p_auc and ap >= p_ap:
y_pred = np.concatenate([np.array([y, pred])
for y, pred in zip(y_list, pred_list)],
axis = 1)
p_auc, p_ap = auc, ap
end_time = time.time()
print(f'{time.ctime()} -- seqlen:{args.seqlen:0>4} '
f'rate:{args.rate:.2f} samples:{args.samples:0>5} -- fold: {counter:0>2}'
f' -- {(end_time - start_time)/args.epoch:>10.3f}s * {args.epoch:0>4} epoches')
y_pred_res.append(y_pred)
writer.close()
counter += 1
break
torch.save(y_pred_res,
f'y_pred/ldel' f'{args.seqlen:0>4}'
'-r' f'{args.rate:0>3.1f}'
'-s' f'{args.samples:0>5}'
'-' f'{int(time.time()):0>10}.pt')
| 36.274648
| 105
| 0.601922
|
4a17f22dce88c6fad779ff2d3ea47a5a05b47ae7
| 862
|
py
|
Python
|
data_util.py
|
koyappe/MyHAN
|
d088812f6e0dc00a45fb478f6df05be81aac202c
|
[
"MIT"
] | null | null | null |
data_util.py
|
koyappe/MyHAN
|
d088812f6e0dc00a45fb478f6df05be81aac202c
|
[
"MIT"
] | null | null | null |
data_util.py
|
koyappe/MyHAN
|
d088812f6e0dc00a45fb478f6df05be81aac202c
|
[
"MIT"
] | null | null | null |
import numpy as np
#import cupy
def batch(inputs):
batch_size = len(inputs)
#print(batch_size)
document_sizes = np.array([len(doc) for doc in inputs], dtype=np.int32)
#print(document_sizes)
document_size = document_sizes.max()
#print(document_size)
#for doc in inputs:
#for sent in doc:
#print(sent)
sentence_sizes_ = [[len(sent) for sent in doc] for doc in inputs]
sentence_size = max(map(max, sentence_sizes_))
b = np.zeros(shape=[batch_size, document_size, sentence_size], dtype=np.int32) # == PAD
sentence_sizes = np.zeros(shape=[batch_size, document_size], dtype=np.int32)
for i, document in enumerate(inputs):
for j, sentence in enumerate(document):
sentence_sizes[i, j] = sentence_sizes_[i][j]
for k, word in enumerate(sentence):
b[i, j, k] = word
return b, document_sizes, sentence_sizes
| 31.925926
| 89
| 0.697216
|
4a17f256388fd83ea41f1c9659733895ca5e2f63
| 4,058
|
py
|
Python
|
kolter_wong/convex_adversarial/utils.py
|
anonymous2398384/provable_robustness_max_linear_regions
|
529165d9047261813bc068997415f668c9675119
|
[
"BSD-3-Clause"
] | 34
|
2019-03-10T22:16:24.000Z
|
2021-09-23T22:22:27.000Z
|
kolter_wong/convex_adversarial/utils.py
|
anonymous2398384/provable_robustness_max_linear_regions
|
529165d9047261813bc068997415f668c9675119
|
[
"BSD-3-Clause"
] | 2
|
2019-09-24T16:18:55.000Z
|
2021-03-06T20:57:33.000Z
|
kolter_wong/convex_adversarial/utils.py
|
anonymous2398384/provable_robustness_max_linear_regions
|
529165d9047261813bc068997415f668c9675119
|
[
"BSD-3-Clause"
] | 9
|
2019-03-13T17:35:36.000Z
|
2021-01-15T02:37:23.000Z
|
import torch.nn as nn
###########################################
# Helper function to extract fully #
# shaped bias terms #
###########################################
def full_bias(l, n=None):
# expands the bias to the proper size. For convolutional layers, a full
# output dimension of n must be specified.
if isinstance(l, nn.Linear):
return l.bias.view(1, -1)
elif isinstance(l, nn.Conv2d):
if n is None:
raise ValueError("Need to pass n=<output dimension>")
b = l.bias.unsqueeze(1).unsqueeze(2)
if isinstance(n, int):
k = int((n / (b.numel())) ** 0.5)
return b.expand(1, b.numel(), k, k).contiguous().view(1, -1)
else:
return b.expand(1, *n)
elif isinstance(l, Dense):
return sum(full_bias(layer, n=n) for layer in l.Ws if layer is not None)
elif isinstance(l, nn.Sequential) and len(l) == 0:
return 0
else:
raise ValueError("Full bias can't be formed for given layer.")
###########################################
# Sequential models with skip connections #
###########################################
class DenseSequential(nn.Sequential):
def forward(self, x):
xs = [x]
for module in self._modules.values():
if 'Dense' in type(module).__name__:
xs.append(module(*xs))
else:
xs.append(module(xs[-1]))
return xs[-1]
class Dense(nn.Module):
def __init__(self, *Ws):
super(Dense, self).__init__()
self.Ws = nn.ModuleList(list(Ws))
if len(Ws) > 0 and hasattr(Ws[0], 'out_features'):
self.out_features = Ws[0].out_features
def forward(self, *xs):
xs = xs[-len(self.Ws):]
out = sum(W(x) for x, W in zip(xs, self.Ws) if W is not None)
return out
#######################################
# Epsilon for high probability bounds #
#######################################
import numpy as np
import time
def GR(epsilon):
return (epsilon ** 2) / (-0.5 * np.log(1 + (2 / np.pi * np.log(1 + epsilon)) ** 2)
+ 2 / np.pi * np.arctan(2 / np.pi * np.log(1 + epsilon)) * np.log(1 + epsilon))
def GL(epsilon):
return (epsilon ** 2) / (-0.5 * np.log(1 + (2 / np.pi * np.log(1 - epsilon)) ** 2)
+ 2 / np.pi * np.arctan(2 / np.pi * np.log(1 - epsilon)) * np.log(1 - epsilon))
def p_upper(epsilon, k):
return np.exp(-k * (epsilon ** 2) / GR(epsilon))
def p_lower(epsilon, k):
return np.exp(-k * (epsilon ** 2) / GL(epsilon))
def epsilon_from_model(model, X, k, delta, m):
if k is None or m is None:
raise ValueError("k and m must not be None. ")
if delta is None:
print('No delta specified, not using probabilistic bounds.')
return 0
X = X[0].unsqueeze(0)
out_features = []
for l in model:
X = l(X)
if isinstance(l, (nn.Linear, nn.Conv2d)):
out_features.append(X.numel())
num_est = sum(n for n in out_features[:-1] if k * m < n)
num_est += sum(n * i for i, n in enumerate(out_features[:-1]) if k * m < n)
print(num_est)
sub_delta = (delta / num_est) ** (1 / m)
l1_eps = get_epsilon(sub_delta, k)
if num_est == 0:
return 0
if l1_eps > 1:
raise ValueError('Delta too large / k too small to get probabilistic bound')
return l1_eps
def get_epsilon(delta, k, alpha=1e-2):
""" Determine the epsilon for which the estimate is accurate
with probability >(1-delta) and k projection dimensions. """
epsilon = 0.001
# probability of incorrect bound
start_time = time.time()
p_max = max(p_upper(epsilon, k), p_lower(epsilon, k))
while p_max > delta:
epsilon *= (1 + alpha)
p_max = max(p_upper(epsilon, k), p_lower(epsilon, k))
if epsilon > 1:
raise ValueError('Delta too large / k too small to get probabilistic bound (epsilon > 1)')
# print(time.time()-start_time)
return epsilon
| 31.952756
| 108
| 0.539921
|
4a17f292c6d751107c2b61c282e4ff0c07a07e51
| 1,076
|
py
|
Python
|
src/my_happy_modin/backends/__init__.py
|
ggservice007/my-happy-modin
|
ab293ecfa04516a5c9f76284e09b45cdd7588186
|
[
"Apache-2.0"
] | null | null | null |
src/my_happy_modin/backends/__init__.py
|
ggservice007/my-happy-modin
|
ab293ecfa04516a5c9f76284e09b45cdd7588186
|
[
"Apache-2.0"
] | 2
|
2021-01-27T11:25:26.000Z
|
2021-01-27T12:47:53.000Z
|
src/my_happy_modin/backends/__init__.py
|
ggservice007/my-happy-modin
|
ab293ecfa04516a5c9f76284e09b45cdd7588186
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to my_happy_modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The my_happy_modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from .base import BaseQueryCompiler
from .pandas import PandasQueryCompiler
__all__ = ["BaseQueryCompiler", "PandasQueryCompiler"]
try:
from .pyarrow import PyarrowQueryCompiler # noqa: F401
except ImportError:
pass
else:
__all__.append("PyarrowQueryCompiler")
| 44.833333
| 95
| 0.787175
|
4a17f33d0cea635a1a4ed35a48e09c8c2d4d55c2
| 65,968
|
py
|
Python
|
tests/apollo/test_skvbc_reconfiguration.py
|
ananpal/concord-bft
|
c1d24020c0bcf8a445458c389b92a80dd38bcd0c
|
[
"Apache-2.0"
] | null | null | null |
tests/apollo/test_skvbc_reconfiguration.py
|
ananpal/concord-bft
|
c1d24020c0bcf8a445458c389b92a80dd38bcd0c
|
[
"Apache-2.0"
] | null | null | null |
tests/apollo/test_skvbc_reconfiguration.py
|
ananpal/concord-bft
|
c1d24020c0bcf8a445458c389b92a80dd38bcd0c
|
[
"Apache-2.0"
] | null | null | null |
# Concord
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import os.path
import unittest
import trio
from util import skvbc as kvbc
from util.bft import with_trio, with_bft_network, KEY_FILE_PREFIX, TestConfig
from util import operator
from util.object_store import ObjectStore, start_replica_cmd_prefix, with_object_store
import sys
from util import eliot_logging as log
import concord_msgs as cmf_msgs
sys.path.append(os.path.abspath("../../util/pyclient"))
import bft_client
def start_replica_cmd_with_object_store(builddir, replica_id, config):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
ret = start_replica_cmd_prefix(builddir, replica_id, config)
ret.extend(["-b", "2", "-q", "1", "-o", builddir + "/operator_pub.pem"])
return ret
def start_replica_cmd_with_object_store_and_ke(builddir, replica_id, config):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
ret = start_replica_cmd_prefix(builddir, replica_id, config)
ret.extend(["-b", "2", "-q", "1", "-e", str(True), "-o", builddir + "/operator_pub.pem"])
return ret
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-l", os.path.join(builddir, "tests", "simpleKVBC", "scripts", "logging.properties"),
"-b", "2",
"-q", "1",
"-o", builddir + "/operator_pub.pem"]
def start_replica_cmd_with_key_exchange(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-l", os.path.join(builddir, "tests", "simpleKVBC", "scripts", "logging.properties"),
"-b", "2",
"-q", "1",
"-e", str(True),
"-o", builddir + "/operator_pub.pem"]
class SkvbcReconfigurationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.object_store = ObjectStore()
@classmethod
def tearDownClass(cls):
pass
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_key_exchange_command(self, bft_network):
"""
No initial key rotation
Operator sends key exchange command to replica 0
New keys for replica 0 should get effective at checkpoint 2, i.e. seqnum 300
"""
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.key_exchange([0])
for i in range(450):
await skvbc.write_known_kv()
sent_key_exchange_counter = await bft_network.metrics.get(0, *["KeyExchangeManager", "Counters", "sent_key_exchange"])
assert sent_key_exchange_counter == 1
self_key_exchange_counter = await bft_network.metrics.get(0, *["KeyExchangeManager", "Counters", "self_key_exchange"])
assert self_key_exchange_counter == 1
public_key_exchange_for_peer_counter = await bft_network.metrics.get(1, *["KeyExchangeManager", "Counters", "public_key_exchange_for_peer"])
assert public_key_exchange_for_peer_counter == 1
@unittest.skip("unstable test. Tracked in BC-9406")
@with_trio
@with_bft_network(start_replica_cmd=start_replica_cmd_with_key_exchange,
selected_configs=lambda n, f, c: n == 7,
rotate_keys=True)
async def test_key_exchange_command_with_restart(self, bft_network):
"""
- With initial key rotation (keys get effective at checkpoint 2)
- Reach checkpoint 2 since key cannot be generated twice within a 2 checkpoints window
- Operator sends key exchange command to replica 1 + validate execution
(New keys for replica 1 should get effective at checkpoint 4, i.e. seqnum 600)
- Reach checkpoint 4
- Stop replica 1
- Client sends 50 requests
- Start replica 1
- Reach checkpoint 6 and validate replica 1 is back on track
"""
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
await skvbc.fill_and_wait_for_checkpoint(initial_nodes=bft_network.all_replicas(),
num_of_checkpoints_to_add=2,
verify_checkpoint_persistency=False)
await self.send_and_check_key_exchange(target_replica=1, bft_network=bft_network, client=client)
await skvbc.fill_and_wait_for_checkpoint(initial_nodes=bft_network.all_replicas(),
num_of_checkpoints_to_add=2,
verify_checkpoint_persistency=False)
bft_network.stop_replica(1)
for i in range(50):
await skvbc.write_known_kv()
key, val = await skvbc.write_known_kv()
await skvbc.assert_kv_write_executed(key, val)
bft_network.start_replica(1)
await skvbc.fill_and_wait_for_checkpoint(initial_nodes=bft_network.all_replicas(),
num_of_checkpoints_to_add=2,
verify_checkpoint_persistency=False)
async def send_and_check_key_exchange(self, target_replica, bft_network, client):
sent_key_exchange_counter_before = await bft_network.metrics.get(target_replica, *["KeyExchangeManager", "Counters", "sent_key_exchange"])
self_key_exchange_counter_before = await bft_network.metrics.get(target_replica, *["KeyExchangeManager", "Counters", "self_key_exchange"])
# public_key_exchange_for_peer_counter_before = await bft_network.metrics.get(0, *["KeyExchangeManager", "Counters", "public_key_exchange_for_peer"])
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.key_exchange([target_replica])
await trio.sleep(seconds=5) # for status
sent_key_exchange_counter = await bft_network.metrics.get(1, *["KeyExchangeManager", "Counters", "sent_key_exchange"])
assert sent_key_exchange_counter == sent_key_exchange_counter_before + 1
self_key_exchange_counter = await bft_network.metrics.get(1, *["KeyExchangeManager", "Counters", "self_key_exchange"])
assert self_key_exchange_counter == self_key_exchange_counter_before +1
# public_key_exchange_for_peer_counter = await bft_network.metrics.get(0, *["KeyExchangeManager", "Counters", "public_key_exchange_for_peer"])
# assert public_key_exchange_for_peer_counter == 7
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_wedge_command(self, bft_network):
"""
Sends a wedge command and checks that the system stops processing new requests.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a wedge command
2. The client verifies that the system reached a super stable checkpoint.
3. The client tries to initiate a new write bft command and fails
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
# We increase the default request timeout because we need to have around 300 consensuses which occasionally may take more than 5 seconds
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.wedge()
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, range(bft_network.config.n))
await self.verify_last_executed_seq_num(bft_network, checkpoint_before)
await self.validate_stop_on_super_stable_checkpoint(bft_network, skvbc)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_wedge_command_with_state_transfer(self, bft_network):
"""
This test checks that even a replica that received the super stable checkpoint via the state transfer mechanism
is able to stop at the super stable checkpoint.
The test does the following:
1. Start all replicas but 1
2. A client sends a wedge command
3. Validate that all started replicas reached to the next next checkpoint
4. Start the late replica
5. Validate that the late replica completed the state transfer
6. Validate that all replicas stopped at the super stable checkpoint and that new commands are not being processed
"""
initial_prim = 0
late_replicas = bft_network.random_set_of_replicas(1, {initial_prim})
on_time_replicas = bft_network.all_replicas(without=late_replicas)
bft_network.start_replicas(on_time_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
await skvbc.wait_for_liveness()
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
client = bft_network.random_client()
# We increase the default request timeout because we need to have around 300 consensuses which occasionally may take more than 5 seconds
client.config._replace(req_timeout_milli=10000)
with log.start_action(action_type="send_wedge_cmd",
checkpoint_before=checkpoint_before,
late_replicas=list(late_replicas)):
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.wedge()
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, on_time_replicas)
bft_network.start_replicas(late_replicas)
await bft_network.wait_for_state_transfer_to_start()
for r in late_replicas:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, range(bft_network.config.n))
await self.validate_stop_on_super_stable_checkpoint(bft_network, skvbc)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_wedge_command_with_f_failures(self, bft_network):
"""
This test checks that even a replica that received the super stable checkpoint via the state transfer mechanism
is able to stop at the super stable checkpoint.
The test does the following:
1. Start all replicas but 2
2. A client sends a wedge command
3. Validate that all started replicas have reached the wedge point
4. Restart the live replicas and validate the system is able to make progress
5. Start the late replica
6. Validate that the late replicas completed the state transfer
7. Join the late replicas to the quorum and make sure the system is able to make progress
"""
initial_prim = 0
late_replicas = bft_network.random_set_of_replicas(2, {initial_prim})
on_time_replicas = bft_network.all_replicas(without=late_replicas)
bft_network.start_replicas(on_time_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
await skvbc.wait_for_liveness()
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
client = bft_network.random_client()
# We increase the default request timeout because we need to have around 300 consensuses which occasionally may take more than 5 seconds
client.config._replace(req_timeout_milli=10000)
with log.start_action(action_type="send_wedge_cmd",
checkpoint_before=checkpoint_before,
late_replicas=list(late_replicas)):
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.wedge()
with trio.fail_after(seconds=60):
done = False
while done is False:
await op.wedge_status(quorum=bft_client.MofNQuorum(on_time_replicas, len(on_time_replicas)), fullWedge=False)
rsi_rep = client.get_rsi_replies()
done = True
for r in rsi_rep.values():
res = cmf_msgs.ReconfigurationResponse.deserialize(r)
status = res[0].response.stopped
if status is False:
done = False
break
# Make sure the system is able to make progress
bft_network.stop_replicas(on_time_replicas)
bft_network.start_replicas(on_time_replicas)
for i in range(100):
await skvbc.write_known_kv()
# Start late replicas and wait for state transfer to stop
bft_network.start_replicas(late_replicas)
await bft_network.wait_for_state_transfer_to_start()
for r in late_replicas:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=True)
replicas_to_stop = bft_network.random_set_of_replicas(2, late_replicas | {initial_prim})
# Make sure the system is able to make progress
for i in range(100):
await skvbc.write_known_kv()
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_wedge_command_and_specific_replica_info(self, bft_network):
"""
Sends a wedge command and check that the system stops from processing new requests.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a wedge command
2. The client then sends a "Have you stopped" read only command such that each replica answers "I have stopped"
3. The client validates with the metrics that all replicas have stopped
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
# We increase the default request timeout because we need to have around 300 consensuses which occasionally may take more than 5 seconds
client.config._replace(req_timeout_milli=10000)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.wedge()
with trio.fail_after(seconds=90):
done = False
while done is False:
await op.wedge_status()
rsi_rep = client.get_rsi_replies()
done = True
for r in rsi_rep.values():
res = cmf_msgs.ReconfigurationResponse.deserialize(r)
status = res[0].response.stopped
if status is False:
done = False
break
await self.validate_stop_on_super_stable_checkpoint(bft_network, skvbc)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_wedge_command_where_noops_should_be_sent_in_two_parts(self, bft_network):
"""
Sends a wedge command on sequence number 300 and check that the system stops from processing new requests.
this way, when the primary tries to sent noop commands, the working window is reach only to 450.
Thus, it has to wait for a new stable checkpoint before sending the last 150 noops
Note: In this test we assume that the batch duration is no
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
# We increase the default request timeout because we need to have around 300 consensuses which occasionally may take more than 5 seconds
client.config._replace(req_timeout_milli=10000)
# bring the system to sequence number 299
for i in range(299):
await skvbc.write_known_kv()
# verify that all nodes are in sequence number 299
not_reached = True
with trio.fail_after(seconds=30):
while not_reached:
not_reached = False
for r in bft_network.all_replicas():
lastExecSeqNum = await bft_network.get_metric(r, bft_network, "Gauges", "lastExecutedSeqNum")
if lastExecSeqNum != 299:
not_reached = True
break
# now, send a wedge command. The wedge command sequence number is 300. Hence, in this point the woeking window
# is between 150 - 450. But, the wedge command will make the primary to send noops until 600.
# we want to verify that the primary manages to send the noops as required.
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.wedge()
# now, verify that the system has managed to stop
with trio.fail_after(seconds=90):
done = False
while done is False:
await op.wedge_status()
rsi_rep = client.get_rsi_replies()
done = True
for r in rsi_rep.values():
res = cmf_msgs.ReconfigurationResponse.deserialize(r)
status = res[0].response.stopped
if status is False:
done = False
break
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, 2, range(bft_network.config.n))
await self.verify_last_executed_seq_num(bft_network, 2)
await self.validate_stop_on_super_stable_checkpoint(bft_network, skvbc)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_get_latest_pruneable_block(self, bft_network):
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
# Create 100 blocks in total, including the genesis block we have 101 blocks
k, v = await skvbc.write_known_kv()
for i in range(99):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Get the minimal latest pruneable block among all replicas
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.latest_pruneable_block()
rsi_rep = client.get_rsi_replies()
min_prunebale_block = 1000
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
if lpab.response.block_id < min_prunebale_block:
min_prunebale_block = lpab.response.block_id
# Create another 100 blocks
k, v = await skvbc.write_known_kv()
for i in range(99):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Get the new minimal latest pruneable block
await op.latest_pruneable_block()
rsi_rep = client.get_rsi_replies()
min_prunebale_block_b = 1000
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
if lpab.response.block_id < min_prunebale_block_b:
min_prunebale_block_b = lpab.response.block_id
assert min_prunebale_block < min_prunebale_block_b
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_pruning_command(self, bft_network):
with log.start_action(action_type="test_pruning_command"):
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
# Create 100 blocks in total, including the genesis block we have 101 blocks
k, v = await skvbc.write_known_kv()
for i in range(99):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Get the minimal latest pruneable block among all replicas
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.latest_pruneable_block()
latest_pruneable_blocks = []
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
latest_pruneable_blocks += [lpab.response]
await op.prune(latest_pruneable_blocks)
rsi_rep = client.get_rsi_replies()
# we expect to have at least 2f + 1 replies
for rep in rsi_rep:
r = rsi_rep[rep]
data = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
pruned_block = int(data.additional_data.decode('utf-8'))
assert pruned_block <= 90
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_pruning_command_with_failures(self, bft_network):
with log.start_action(action_type="test_pruning_command_with_faliures"):
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
# Create 100 blocks in total, including the genesis block we have 101 blocks
k, v = await skvbc.write_known_kv()
for i in range(99):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Get the minimal latest pruneable block among all replicas
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.latest_pruneable_block()
latest_pruneable_blocks = []
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
latest_pruneable_blocks += [lpab.response]
# Now, crash one of the non-primary replicas
crashed_replica = 3
bft_network.stop_replica(crashed_replica)
await op.prune(latest_pruneable_blocks)
rsi_rep = client.get_rsi_replies()
# we expect to have at least 2f + 1 replies
for rep in rsi_rep:
r = rsi_rep[rep]
data = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
pruned_block = int(data.additional_data.decode('utf-8'))
assert pruned_block <= 90
# creates 100 new blocks
for i in range(100):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# now, return the crashed replica and wait for it to done with state transfer
bft_network.start_replica(crashed_replica)
await self._wait_for_st(bft_network, crashed_replica, 150)
# We expect the late replica to catch up with the state and to perform pruning
with trio.fail_after(seconds=30):
while True:
num_replies = 0
await op.prune_status()
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
last_prune_blockid = status.response.last_pruned_block
if status.response.in_progress is False and last_prune_blockid <= 90 and last_prune_blockid > 0:
num_replies += 1
if num_replies == bft_network.config.n:
break
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_pruning_status_command(self, bft_network):
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.prune_status()
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
assert status.response.in_progress is False
assert status.response.last_pruned_block == 0
# Create 100 blocks in total, including the genesis block we have 101 blocks
k, v = await skvbc.write_known_kv()
for i in range(99):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Get the minimal latest pruneable block among all replicas
await op.latest_pruneable_block()
latest_pruneable_blocks = []
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
latest_pruneable_blocks += [lpab.response]
await op.prune(latest_pruneable_blocks)
# Verify the system is able to get new write requests (which means that pruning has done)
with trio.fail_after(30):
await skvbc.write_known_kv()
await op.prune_status()
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
assert status.response.in_progress is False
assert status.response.last_pruned_block <= 90
@with_trio
@with_bft_network(start_replica_cmd=start_replica_cmd_with_object_store, num_ro_replicas=1, selected_configs=lambda n, f, c: n == 7)
async def test_pruning_with_ro_replica(self, bft_network):
bft_network.start_all_replicas()
ro_replica_id = bft_network.config.n
bft_network.start_replica(ro_replica_id)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
op = operator.Operator(bft_network.config, client, bft_network.builddir)
# Create more than 150 blocks in total, including the genesis block we have 101 blocks
k, v = await skvbc.write_known_kv()
for i in range(200):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Wait for the read only replica to catch with the state
await self._wait_for_st(bft_network, ro_replica_id, 150)
# Get the minimal latest pruneable block among all replicas
await op.latest_pruneable_block()
latest_pruneable_blocks = []
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
latest_pruneable_blocks += [lpab.response]
await op.prune(latest_pruneable_blocks)
# Verify the system is able to get new write requests (which means that pruning has done)
with trio.fail_after(30):
await skvbc.write_known_kv()
await op.prune_status()
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
assert status.response.in_progress is False
assert status.response.last_pruned_block == 150
@with_trio
@with_bft_network(start_replica_cmd=start_replica_cmd_with_object_store, num_ro_replicas=1, selected_configs=lambda n, f, c: n == 7)
async def test_pruning_with_ro_replica_failure(self, bft_network):
bft_network.start_all_replicas()
ro_replica_id = bft_network.config.n
bft_network.start_replica(ro_replica_id)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
op = operator.Operator(bft_network.config, client, bft_network.builddir)
# Create more than 150 blocks in total, including the genesis block we have 101 blocks
k, v = await skvbc.write_known_kv()
for i in range(200):
v = skvbc.random_value()
await client.write(skvbc.write_req([], [(k, v)], 0))
# Wait for the read only replica to catch with the state
await self._wait_for_st(bft_network, ro_replica_id, 150)
# Get the minimal latest pruneable block among all replicas
await op.latest_pruneable_block()
latest_pruneable_blocks = []
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
lpab = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
latest_pruneable_blocks += [lpab.response]
# Remove the read only latest pruneable block from the list
for m in latest_pruneable_blocks:
if m.replica >= bft_network.config.n:
latest_pruneable_blocks.remove(m)
assert len(latest_pruneable_blocks) == bft_network.config.n
# Now, issue a prune request. we expect to receive an error as the read only latest prunebale block is missing
rep = await op.prune(latest_pruneable_blocks)
rep = cmf_msgs.ReconfigurationResponse.deserialize(rep)[0]
assert rep.success is False
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_addRemove_command(self, bft_network):
"""
Sends a addRemove command and checks that new configuration is written to blockchain.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a addRemove command
2. The client verifies reads the configuration back and verifies the configuration
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100):
await skvbc.write_known_kv()
client = bft_network.random_client()
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration'
await op.add_remove(test_config)
await op.add_remove_status()
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
assert status.response.reconfiguration == test_config
@with_trio
@with_bft_network(start_replica_cmd_with_key_exchange, selected_configs=lambda n, f, c: n == 7, rotate_keys=True)
async def test_remove_nodes(self, bft_network):
"""
Sends a addRemove command and checks that new configuration is written to blockchain.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a remove command which will also wedge the system on next next checkpoint
2. Validate that all replicas have stopped
3. Load a new configuration to the bft network
4. Rerun the cluster with only 4 nodes and make sure they succeed to perform transactions in fast path
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100):
await skvbc.write_known_kv()
key, val = await skvbc.write_known_kv()
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_4_f_1_c_0'
await op.add_remove_with_wedge(test_config, bft=False)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
await self.verify_add_remove_status(bft_network, test_config, quorum_all=False)
bft_network.stop_all_replicas()
# We now expect the replicas to start with a fresh new configuration
# Metadata is erased on replicas startup
conf = TestConfig(n=4,
f=1,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd_with_key_exchange,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
await bft_network.check_initital_key_exchange(stop_replicas=False)
for r in bft_network.all_replicas():
last_stable_checkpoint = await bft_network.get_metric(r, bft_network, "Gauges", "lastStableSeqNum")
self.assertEqual(last_stable_checkpoint, 0)
await self.validate_state_consistency(skvbc, key, val)
for i in range(100):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
assert( r < 4 )
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
@with_trio
@with_bft_network(start_replica_cmd=start_replica_cmd_with_object_store_and_ke, num_ro_replicas=1, rotate_keys=True,
selected_configs=lambda n, f, c: n == 7)
async def test_remove_nodes_with_ror(self, bft_network):
"""
Sends a addRemove command and checks that new configuration is written to blockchain.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a remove command which will also wedge the system on next next checkpoint
2. Validate that all replicas have stopped
3. Wait for read only replica to done with state transfer
3. Load a new configuration to the bft network
4. Rerun the cluster with only 4 nodes and make sure they succeed to perform transactions in fast path
5. Make sure the read only replica is able to catch up with the new state
"""
bft_network.start_all_replicas()
ro_replica_id = bft_network.config.n
bft_network.start_replica(ro_replica_id)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100): # Produce 149 new blocks
await skvbc.write_known_kv()
key, val = await skvbc.write_known_kv()
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_4_f_1_c_0'
await op.add_remove_with_wedge(test_config, bft=False)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
await self._wait_for_st(bft_network, ro_replica_id, 300)
bft_network.stop_all_replicas()
# We now expect the replicas to start with a fresh new configuration
# Metadata is erased on replicas startup
conf = TestConfig(n=4,
f=1,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd_with_object_store_and_ke,
stop_replica_cmd=None,
num_ro_replicas=1)
await bft_network.change_configuration(conf)
ro_replica_id = bft_network.config.n
await bft_network.check_initital_key_exchange(stop_replicas=False)
bft_network.start_replica(ro_replica_id)
for r in bft_network.all_replicas():
last_stable_checkpoint = await bft_network.get_metric(r, bft_network, "Gauges", "lastStableSeqNum")
self.assertEqual(last_stable_checkpoint, 0)
await self.validate_state_consistency(skvbc, key, val)
for i in range(150):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
assert( r < 4 )
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
# Wait for the read only replica to catch with the state
await self._wait_for_st(bft_network, ro_replica_id, 150)
@with_trio
@with_bft_network(start_replica_cmd_with_key_exchange, selected_configs=lambda n, f, c: n == 7, rotate_keys=True)
async def test_remove_nodes_with_f_failures(self, bft_network):
"""
In this test we show how a system operator can remove nodes (and thus reduce the cluster) from 7 nodes cluster
to 4 nodes cluster even when f nodes are not responding
For that the operator performs the following steps:
1. Stop 2 nodes (f=2)
2. Send a remove_node command - this command also wedges the system
3. Verify that all live nodes have stopped
4. Load a new configuration to the bft network
5. Rerun the cluster with only 4 nodes and make sure they succeed to perform transactions in fast path
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
client = bft_network.random_client()
for i in range(100):
await skvbc.write_known_kv()
# choose two replicas to crash and crash them
crashed_replicas = {5, 6} # For simplicity, we crash the last two replicas
bft_network.stop_replicas(crashed_replicas)
# All next request should be go through the slow path
for i in range(100):
await skvbc.write_known_kv()
key, val = await skvbc.write_known_kv()
live_replicas = bft_network.all_replicas(without=crashed_replicas)
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_4_f_1_c_0'
await op.add_remove_with_wedge(test_config)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, live_replicas)
expectedSeqNum = (checkpoint_before + 2) * 150
for r in live_replicas:
lastExecSn = await bft_network.get_metric(r, bft_network, "Gauges", "lastExecutedSeqNum")
self.assertEqual(expectedSeqNum, lastExecSn)
await self.validate_stop_on_wedge_point(bft_network, skvbc)
await self.verify_add_remove_status(bft_network, test_config, quorum_all=False)
bft_network.stop_all_replicas()
# We now expect the replicas to start with a fresh new configuration
# Metadata is erased on replicas startup
conf = TestConfig(n=4,
f=1,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd_with_key_exchange,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
await bft_network.check_initital_key_exchange(stop_replicas=False)
for r in bft_network.all_replicas():
last_stable_checkpoint = await bft_network.get_metric(r, bft_network, "Gauges", "lastStableSeqNum")
self.assertEqual(last_stable_checkpoint, 0)
await self.validate_state_consistency(skvbc, key, val)
for i in range(100):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
assert (r < 4)
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
@with_trio
@with_bft_network(start_replica_cmd_with_key_exchange, selected_configs=lambda n, f, c: n == 7, rotate_keys=True)
async def test_remove_nodes_with_failures(self, bft_network):
"""
In this test we show how a system operator can remove nodes (and thus reduce the cluster) from 7 nodes cluster
to 4 nodes cluster even when f nodes are not responding
For that the operator performs the following steps:
1. Stop 2 nodes (f=2)
2. Send a remove_node command - this command also wedges the system
3. Verify that all live nodes have stopped
4. Load a new configuration to the bft network
5. Rerun the cluster with only 4 nodes and make sure they succeed to perform transactions in fast path
"""
crashed_replica = 3
live_replicas = bft_network.all_replicas(without={crashed_replica})
bft_network.start_replicas(live_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100):
await skvbc.write_known_kv()
key, val = await skvbc.write_known_kv()
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_4_f_1_c_0'
await op.add_remove_with_wedge(test_config, False)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, live_replicas)
expectedSeqNum = (checkpoint_before + 2) * 150
for r in live_replicas:
lastExecSn = await bft_network.get_metric(r, bft_network, "Gauges", "lastExecutedSeqNum")
self.assertEqual(expectedSeqNum, lastExecSn)
# Verify that all live replicas have got to the wedge point
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=False)
# Start replica 3 and wait for state transfer to finish
bft_network.start_replica(crashed_replica)
await self._wait_for_st(bft_network, crashed_replica, 300)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
bft_network.stop_all_replicas()
# We now expect the replicas to start with a fresh new configuration
# Metadata is erased on replicas startup
conf = TestConfig(n=4,
f=1,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd_with_key_exchange,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
await bft_network.check_initital_key_exchange(stop_replicas=False)
for r in bft_network.all_replicas():
last_stable_checkpoint = await bft_network.get_metric(r, bft_network, "Gauges", "lastStableSeqNum")
self.assertEqual(last_stable_checkpoint, 0)
await self.validate_state_consistency(skvbc, key, val)
for i in range(100):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
assert (r < 4)
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
@with_trio
@with_bft_network(start_replica_cmd, bft_configs=[{'n': 4, 'f': 1, 'c': 0, 'num_clients': 10}])
async def test_add_nodes(self, bft_network):
"""
Sends a addRemove command and checks that new configuration is written to blockchain.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a add node command which will also wedge the system on next next checkpoint
2. Validate that all replicas have stopped
3. Load a new configuration to the bft network
4. Add node is done in phases, (n=4,f=1,c=0)->(n=6,f=1,c=0)->(n=7,f=2,c=0)
Note: For new replicas to catch up with exiting replicas through ST, existing replicas must
move the checkpoint window, that means for n=7 configuration, there must be 5 non-faulty
replicas to move the checkpoint window, hence new replicas are added in two phases
5. Rerun the cluster with only new configuration and make sure they succeed to perform transactions in fast path
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100):
await skvbc.write_known_kv()
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_6_f_1_c_0'
await op.add_remove_with_wedge(test_config)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, range(bft_network.config.n))
await self.verify_last_executed_seq_num(bft_network, checkpoint_before)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
await self.verify_add_remove_status(bft_network, test_config, quorum_all=False)
bft_network.stop_all_replicas()
# We now expect the replicas to start with a fresh new configuration
# Metadata is erased on replicas startup
conf = TestConfig(n=6,
f=1,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
initial_prim = 0
new_replicas = {4, 5}
on_time_replicas = bft_network.all_replicas(without=new_replicas)
bft_network.start_replicas(on_time_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(151):
await skvbc.write_known_kv()
bft_network.start_replicas(new_replicas)
await bft_network.wait_for_state_transfer_to_start()
for r in new_replicas:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
for i in range(200):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_7_f_2_c_0'
await op.add_remove_with_wedge(test_config)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, range(bft_network.config.n))
await self.verify_last_executed_seq_num(bft_network, checkpoint_before)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
await self.verify_add_remove_status(bft_network, test_config, quorum_all=False)
bft_network.stop_all_replicas()
conf = TestConfig(n=7,
f=2,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
initial_prim = 0
new_replicas = {6}
on_time_replicas = bft_network.all_replicas(without=new_replicas)
bft_network.start_replicas(on_time_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(151):
await skvbc.write_known_kv()
bft_network.start_replicas(new_replicas)
await bft_network.wait_for_state_transfer_to_start()
for r in new_replicas:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
for i in range(300):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
@with_trio
@with_bft_network(start_replica_cmd, bft_configs=[{'n': 4, 'f': 1, 'c': 0, 'num_clients': 10}])
async def test_add_nodes_with_failures(self, bft_network):
"""
Sends a addRemove command and checks that new configuration is written to blockchain.
We add nodes to 4 nodes cluster in phases to make it a 7 node cluster even when f nodes are not responding
The test does the following:
1. Stop one node and send a add node command which will also wedge the system on next next checkpoint
2. Verify that all live nodes have stopped
3. Load a new configuration to the bft network
4. Add node is done in phases, (n=4,f=1,c=0)->(n=6,f=1,c=0)->(n=7,f=2,c=0)
Note: For new replicas to catch up with exiting replicas through ST, existing replicas must
move the checkpoint window, that means for n=7 configuration, there must be 5 non-faulty
replicas to move the checkpoint window, hence new replicas are added in two phases
5. Rerun the cluster with only new configuration and make sure they succeed to perform transactions in fast path
"""
initial_prim = 0
crashed_replica = bft_network.random_set_of_replicas(1, {initial_prim})
live_replicas = bft_network.all_replicas(without=crashed_replica)
bft_network.start_replicas(live_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100):
await skvbc.write_known_kv()
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_6_f_1_c_0'
await op.add_remove_with_wedge(test_config)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, live_replicas)
expectedSeqNum = (checkpoint_before + 2) * 150
for r in live_replicas:
lastExecSn = await bft_network.get_metric(r, bft_network, "Gauges", "lastExecutedSeqNum")
self.assertEqual(expectedSeqNum, lastExecSn)
# Verify that all live replicas have got to the wedge point
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=False)
# Start crashed replica and wait for state transfer to finish
bft_network.start_replicas(crashed_replica)
await bft_network.wait_for_state_transfer_to_start()
for r in crashed_replica:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
bft_network.stop_all_replicas()
# We now expect the replicas to start with a fresh new configuration
# Metadata is erased on replicas startup
conf = TestConfig(n=6,
f=1,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
initial_prim = 0
new_replicas = {4, 5}
on_time_replicas = bft_network.all_replicas(without=new_replicas)
bft_network.start_replicas(on_time_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(151):
await skvbc.write_known_kv()
bft_network.start_replicas(new_replicas)
await bft_network.wait_for_state_transfer_to_start()
for r in new_replicas:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
for i in range(200):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
test_config = 'new_configuration_n_7_f_2_c_0'
await op.add_remove_with_wedge(test_config)
await self.verify_replicas_are_in_wedged_checkpoint(bft_network, checkpoint_before, range(bft_network.config.n))
await self.verify_last_executed_seq_num(bft_network, checkpoint_before)
await self.validate_stop_on_wedge_point(bft_network, skvbc, fullWedge=True)
await self.verify_add_remove_status(bft_network, test_config, quorum_all=False)
bft_network.stop_all_replicas()
conf = TestConfig(n=7,
f=2,
c=0,
num_clients=10,
key_file_prefix=KEY_FILE_PREFIX,
start_replica_cmd=start_replica_cmd,
stop_replica_cmd=None,
num_ro_replicas=0)
await bft_network.change_configuration(conf)
initial_prim = 0
new_replica = 6
late_replicas = bft_network.random_set_of_replicas(1, without={initial_prim, new_replica})
late_replicas.add(new_replica)
on_time_replicas = bft_network.all_replicas(without=late_replicas)
bft_network.start_replicas(on_time_replicas)
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(151):
await skvbc.write_known_kv()
bft_network.start_replicas(late_replicas)
await bft_network.wait_for_state_transfer_to_start()
for r in late_replicas:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
for i in range(300):
await skvbc.write_known_kv()
for r in bft_network.all_replicas():
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, 0)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_addRemoveStatusError(self, bft_network):
"""
Sends a addRemoveStatus command without adding new configuration
and checks that replicas respond with valid error message.
Note that in this test we assume no failures and synchronized network.
The test does the following:
1. A client sends a addRemoveStatus command
2. The client verifies status returns a valid error message
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
for i in range(100):
await skvbc.write_known_kv()
client = bft_network.random_client()
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.add_remove_status()
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
assert status.response.error_msg == 'key_not_found'
assert status.success is False
async def validate_stop_on_wedge_point(self, bft_network, skvbc, fullWedge=False):
with log.start_action(action_type="validate_stop_on_stable_checkpoint") as action:
with trio.fail_after(seconds=90):
client = bft_network.random_client()
client.config._replace(req_timeout_milli=10000)
op = operator.Operator(bft_network.config, client, bft_network.builddir)
done = False
quorum = None if fullWedge is True else bft_client.MofNQuorum.LinearizableQuorum(bft_network.config, [r.id for r in bft_network.replicas])
while done is False:
stopped_replicas = 0
await op.wedge_status(quorum=quorum, fullWedge=fullWedge)
rsi_rep = client.get_rsi_replies()
done = True
for r in rsi_rep.values():
res = cmf_msgs.ReconfigurationResponse.deserialize(r)
status = res[0].response.stopped
if status:
stopped_replicas += 1
stop_condition = bft_network.config.n if fullWedge is True else (bft_network.config.n - bft_network.config.f)
if stopped_replicas < stop_condition:
done = False
with log.start_action(action_type='expect_kv_failure_due_to_wedge'):
with self.assertRaises(trio.TooSlowError):
await skvbc.write_known_kv()
async def validate_stop_on_super_stable_checkpoint(self, bft_network, skvbc):
with log.start_action(action_type="validate_stop_on_super_stable_checkpoint") as action:
with trio.fail_after(seconds=120):
for replica_id in range(bft_network.config.n):
while True:
with trio.move_on_after(seconds=1):
try:
key = ['replica', 'Gauges', 'OnCallBackOfSuperStableCP']
value = await bft_network.metrics.get(replica_id, *key)
if value == 0:
action.log(message_type=f"Replica {replica_id} has not reached super stable checkpoint yet")
await trio.sleep(0.5)
continue
except trio.TooSlowError:
action.log(message_type=
f"Replica {replica_id} was not able to get super stable checkpoint metric within the timeout")
raise
else:
self.assertEqual(value, 1)
action.log(message_type=f"Replica {replica_id} has reached super stable checkpoint")
break
with log.start_action(action_type='expect_kv_failure_due_to_wedge'):
with self.assertRaises(trio.TooSlowError):
await skvbc.write_known_kv()
async def verify_replicas_are_in_wedged_checkpoint(self, bft_network, previous_checkpoint, replicas):
with log.start_action(action_type="verify_replicas_are_in_wedged_checkpoint", previous_checkpoint=previous_checkpoint):
for replica_id in replicas:
with log.start_action(action_type="verify_replica", replica=replica_id):
with trio.fail_after(seconds=60):
while True:
with trio.move_on_after(seconds=1):
checkpoint_after = await bft_network.wait_for_checkpoint(replica_id=replica_id)
if checkpoint_after == previous_checkpoint + 2:
break
else:
await trio.sleep(1)
async def verify_last_executed_seq_num(self, bft_network, previous_checkpoint):
expectedSeqNum = (previous_checkpoint + 2) * 150
for r in bft_network.all_replicas():
lastExecSn = await bft_network.get_metric(r, bft_network, "Gauges", "lastExecutedSeqNum")
self.assertEqual(expectedSeqNum, lastExecSn)
async def verify_add_remove_status(self, bft_network, config_descriptor, quorum_all=True ):
quorum = bft_client.MofNQuorum.All(bft_network.config, [r for r in range(bft_network.config.n)])
if quorum_all == False:
quorum = bft_client.MofNQuorum.LinearizableQuorum(bft_network.config, [r.id for r in bft_network.replicas])
client = bft_network.random_client()
op = operator.Operator(bft_network.config, client, bft_network.builddir)
await op.add_remove_with_wedge_status(quorum)
rsi_rep = client.get_rsi_replies()
for r in rsi_rep.values():
status = cmf_msgs.ReconfigurationResponse.deserialize(r)[0]
assert status.response.config_descriptor == config_descriptor
async def validate_state_consistency(self, skvbc, key, val):
return await skvbc.assert_kv_write_executed(key, val)
async def _wait_for_st(self, bft_network, ro_replica_id, seqnum_threshold=150):
# TODO replace the below function with the library function:
# await tracker.skvbc.tracked_fill_and_wait_for_checkpoint(
# initial_nodes=bft_network.all_replicas(),
# num_of_checkpoints_to_add=1)
with trio.fail_after(seconds=70):
# the ro replica should be able to survive these failures
while True:
with trio.move_on_after(seconds=.5):
try:
key = ['replica', 'Gauges', 'lastExecutedSeqNum']
lastExecutedSeqNum = await bft_network.metrics.get(ro_replica_id, *key)
except KeyError:
continue
else:
# success!
if lastExecutedSeqNum >= seqnum_threshold:
log.log_message(message_type="Replica" + str(ro_replica_id) + " : lastExecutedSeqNum:" + str(lastExecutedSeqNum))
break
if __name__ == '__main__':
unittest.main()
| 50.472839
| 157
| 0.644373
|
4a17f3e57c1eea69c09dc738223f0a201d7a2cd4
| 1,797
|
gyp
|
Python
|
third_party/webrtc/src/chromium/src/base/android/jni_generator/jni_generator.gyp
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 20
|
2015-08-26T06:46:00.000Z
|
2019-02-27T09:05:58.000Z
|
third_party/webrtc/src/chromium/src/base/android/jni_generator/jni_generator.gyp
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 1
|
2016-01-29T00:54:49.000Z
|
2016-01-29T00:54:49.000Z
|
third_party/webrtc/src/chromium/src/base/android/jni_generator/jni_generator.gyp
|
bopopescu/webrtc-streaming-node
|
727a441204344ff596401b0253caac372b714d91
|
[
"MIT"
] | 7
|
2016-02-09T09:28:14.000Z
|
2020-07-25T19:03:36.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'jni_generator_py_tests',
'type': 'none',
'variables': {
'stamp': '<(INTERMEDIATE_DIR)/jni_generator_py_tests.stamp',
},
'actions': [
{
'action_name': 'run_jni_generator_py_tests',
'inputs': [
'jni_generator.py',
'jni_generator_tests.py',
'java/src/org/chromium/example/jni_generator/SampleForTests.java',
'golden_sample_for_tests_jni.h',
],
'outputs': [
'<(stamp)',
],
'action': [
'python', 'jni_generator_tests.py',
'--stamp=<(stamp)',
],
},
],
},
{
'target_name': 'jni_sample_header',
'type': 'none',
'sources': [
'java/src/org/chromium/example/jni_generator/SampleForTests.java',
],
'variables': {
'jni_gen_package': 'example',
},
'includes': [ '../../../build/jni_generator.gypi' ],
},
{
'target_name': 'jni_sample_java',
'type': 'none',
'variables': {
'java_in_dir': '../../../base/android/jni_generator/java',
},
'dependencies': [
'<(DEPTH)/base/base.gyp:base_java',
],
'includes': [ '../../../build/java.gypi' ],
},
{
'target_name': 'jni_generator_tests',
'type': 'executable',
'dependencies': [
'../../base.gyp:test_support_base',
'jni_generator_py_tests',
'jni_sample_header',
'jni_sample_java',
],
'sources': [
'sample_for_tests.cc',
],
},
],
}
| 26.043478
| 78
| 0.509182
|
4a17f4037adc1da536f554fe7fb27460b7eeaf47
| 6,973
|
py
|
Python
|
cartridge/shop/checkout.py
|
readevalprint/cartridge
|
757b051774817eefd8f459eabf10e307bdd13381
|
[
"BSD-2-Clause"
] | 1
|
2015-08-15T09:12:25.000Z
|
2015-08-15T09:12:25.000Z
|
cartridge/shop/checkout.py
|
readevalprint/cartridge
|
757b051774817eefd8f459eabf10e307bdd13381
|
[
"BSD-2-Clause"
] | null | null | null |
cartridge/shop/checkout.py
|
readevalprint/cartridge
|
757b051774817eefd8f459eabf10e307bdd13381
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Checkout process utilities.
"""
from django.contrib.auth.models import SiteProfileNotAvailable
from django.utils.translation import ugettext as _
from django.template.loader import get_template, TemplateDoesNotExist
from mezzanine.conf import settings
from mezzanine.utils.email import send_mail_template
from cartridge.shop.models import Order
from cartridge.shop.utils import set_shipping, sign
class CheckoutError(Exception):
"""
Should be raised in billing/shipping and payment handlers for
cases such as an invalid shipping address or an unsuccessful
payment.
"""
pass
def default_billship_handler(request, order_form):
"""
Default billing/shipping handler - called when the first step in
the checkout process with billing/shipping address fields is
submitted. Implement your own and specify the path to import it
from via the setting ``SHOP_HANDLER_BILLING_SHIPPING``.
This function will typically contain any shipping calculation
where the shipping amount can then be set using the function
``cartridge.shop.utils.set_shipping``. The Cart object is also
accessible via ``request.cart``
"""
if not request.session.get('free_shipping'):
settings.use_editable()
set_shipping(request, _("Flat rate shipping"),
settings.SHOP_DEFAULT_SHIPPING_VALUE)
def default_payment_handler(request, order_form, order):
"""
Default payment handler - called when the final step of the
checkout process with payment information is submitted. Implement
your own and specify the path to import it from via the setting
``SHOP_HANDLER_PAYMENT``. This function will typically contain
integration with a payment gateway. Raise
cartridge.shop.checkout.CheckoutError("error message") if payment
is unsuccessful.
"""
pass
def default_order_handler(request, order_form, order):
"""
Default order handler - called when the order is complete and
contains its final data. Implement your own and specify the path
to import it from via the setting ``SHOP_HANDLER_ORDER``.
"""
pass
def initial_order_data(request):
"""
Return the initial data for the order form, trying the following in
order:
- request.POST which is available when moving backward through the
checkout steps
- current order details in the session which are populated via each
checkout step, to support user leaving the checkout entirely and
returning
- last order made by the user, via user ID or cookie
- matching fields on an authenticated user and profile object
"""
from cartridge.shop.forms import OrderForm
if request.method == "POST":
return dict(request.POST.items())
if "order" in request.session:
return request.session["order"]
previous_lookup = {}
if request.user.is_authenticated():
previous_lookup["user_id"] = request.user.id
remembered = request.COOKIES.get("remember", "").split(":")
if len(remembered) == 2 and remembered[0] == sign(remembered[1]):
previous_lookup["key"] = remembered[1]
initial = {}
if previous_lookup:
previous_orders = Order.objects.filter(**previous_lookup).values()[:1]
if len(previous_orders) > 0:
initial.update(previous_orders[0])
if not initial and request.user.is_authenticated():
# No previous order data - try and get field values from the
# logged in user. Check the profile model before the user model
# if it's configured. If the order field name uses one of the
# billing/shipping prefixes, also check for it without the
# prefix. Finally if a matching attribute is callable, call it
# for the field value, to support custom matches on the profile
# model.
user_models = [request.user]
try:
user_models.insert(0, request.user.get_profile())
except SiteProfileNotAvailable:
pass
for order_field in OrderForm._meta.fields:
check_fields = [order_field]
for prefix in ("billing_detail_", "shipping_detail_"):
if order_field.startswith(prefix):
check_fields.append(order_field.replace(prefix, "", 1))
for user_model in user_models:
for check_field in check_fields:
user_value = getattr(user_model, check_field, None)
if user_value:
if callable(user_value):
try:
user_value = user_value()
except TypeError:
continue
if not initial.get(order_field):
initial[order_field] = user_value
# Set initial value for "same billing/shipping" based on
# whether both sets of address fields are all equal.
shipping = lambda f: "shipping_%s" % f[len("billing_"):]
if any([f for f in OrderForm._meta.fields if f.startswith("billing_") and
shipping(f) in OrderForm._meta.fields and
initial.get(f, "") != initial.get(shipping(f), "")]):
initial["same_billing_shipping"] = False
return initial
def send_order_email(request, order):
"""
Send order receipt email on successful order.
"""
settings.use_editable()
order_context = {"order": order, "request": request,
"order_items": order.items.all()}
order_context.update(order.details_as_dict())
try:
get_template("shop/email/order_receipt.html")
except TemplateDoesNotExist:
receipt_template = "email/order_receipt"
else:
receipt_template = "shop/email/order_receipt"
from warnings import warn
warn("Shop email receipt templates have moved from "
"templates/shop/email/ to templates/email/")
send_mail_template(settings.SHOP_ORDER_EMAIL_SUBJECT,
receipt_template, settings.SHOP_ORDER_FROM_EMAIL,
order.billing_detail_email, context=order_context,
fail_silently=settings.DEBUG)
# Set up some constants for identifying each checkout step.
CHECKOUT_STEPS = [{"template": "billing_shipping", "url": "details",
"title": _("Details")}]
CHECKOUT_STEP_FIRST = CHECKOUT_STEP_PAYMENT = CHECKOUT_STEP_LAST = 1
if settings.SHOP_CHECKOUT_STEPS_SPLIT:
CHECKOUT_STEPS[0].update({"url": "billing-shipping",
"title": _("Address")})
if settings.SHOP_PAYMENT_STEP_ENABLED:
CHECKOUT_STEPS.append({"template": "payment", "url": "payment",
"title": _("Payment")})
CHECKOUT_STEP_PAYMENT = CHECKOUT_STEP_LAST = 2
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION:
CHECKOUT_STEPS.append({"template": "confirmation", "url": "confirmation",
"title": _("Confirmation")})
CHECKOUT_STEP_LAST += 1
| 41.260355
| 78
| 0.666858
|
4a17f66c8e3ada291f8aff91aed12406517e45ac
| 608
|
py
|
Python
|
Back-End/Python/Basics/Part -2 - Iteration & Generators/04 - Iteration Tools/08_ziplongest.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25
|
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/Basics/Part -2 - Iteration & Generators/04 - Iteration Tools/08_ziplongest.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1
|
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/Basics/Part -2 - Iteration & Generators/04 - Iteration Tools/08_ziplongest.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15
|
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
from itertools import zip_longest
l1 = [1, 2, 3, 4, 5]
l2 = [1, 2, 3, 4]
l3 = [1, 2, 3]
print(list(zip_longest(l1, l2, l3, fillvalue='N/A')))
# [(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 'N/A'), (5, 'N/A', 'N/A')]
def squares():
i = 0
while True:
yield i ** 2
i += 1
def cubes():
i = 0
while True:
yield i ** 3
i += 1
iter1 = squares()
iter2 = cubes()
print(list(zip(range(10), iter1, iter2)))
# [(0, 0, 0),
# (1, 1, 1),
# (2, 4, 8),
# (3, 9, 27),
# (4, 16, 64),
# (5, 25, 125),
# (6, 36, 216),
# (7, 49, 343),
# (8, 64, 512),
# (9, 81, 729)]
| 15.2
| 69
| 0.427632
|
4a17f67116d0bb5d5a1edbb4fdd5f7b2dc0d0bcc
| 1,207
|
py
|
Python
|
src/oneNeuron/perceptron.py
|
gaurav98094/Perceptron_pypi
|
ff033b6e34c47decef9e3d6d95f00240debd4024
|
[
"MIT"
] | 1
|
2021-11-03T06:27:47.000Z
|
2021-11-03T06:27:47.000Z
|
src/oneNeuron/perceptron.py
|
gaurav98094/Perceptron_pypi
|
ff033b6e34c47decef9e3d6d95f00240debd4024
|
[
"MIT"
] | null | null | null |
src/oneNeuron/perceptron.py
|
gaurav98094/Perceptron_pypi
|
ff033b6e34c47decef9e3d6d95f00240debd4024
|
[
"MIT"
] | null | null | null |
"""Perceptron Class
Returns:
[python Object]: returns model object
"""
import numpy as np
import pandas as pd
import logging
# logging_str = " [ %(asctime)s:%(levelname)s:%(module)s ] : %(message)s"
# logging.basicConfig(level=logging.INFO,format=logging_str)
from tqdm import tqdm
class Perceptron:
def __init__(self):
self.weights = None
self.eta = 0.01
self.epochs = 1
self.error=0
def activationFunction(self,input):
z = np.dot(input,self.weights)
return np.where(z>0,1,0)
def fit(self,X,y,eta=0.01,epochs=1):
self.eta=eta
self.epochs=epochs
X_with_bias = np.c_[X,-np.ones((len(X),1))]
self.weights = np.random.randn(X_with_bias.shape[1])* 1e-4
for i in tqdm(range(0,self.epochs),total=self.epochs,desc="training model"):
y_hat = self.activationFunction(X_with_bias)
self.error = y-y_hat
self.weights = self.weights + self.eta * np.dot(X_with_bias.T, self.error)
logging.info(f'At Epochs {i+1} Weights :{self.weights} ; Error : {sum(self.error*self.error)}')
logging.info("--"*20)
def predict(self, X):
X_with_bias = np.c_[X, -np.ones((len(X), 1))]
return self.activationFunction(X_with_bias)
| 26.822222
| 101
| 0.664457
|
4a17f7fac69156f2e07fe309c504cf9d01fa308f
| 662
|
py
|
Python
|
src/braket/_sdk/_version.py
|
rhennig22/amazon-braket-sdk-python
|
b6642f859f9556f4862a1006e7abcc17712b0e58
|
[
"Apache-2.0"
] | 151
|
2020-08-13T21:26:05.000Z
|
2022-03-08T17:07:18.000Z
|
src/braket/_sdk/_version.py
|
rhennig22/amazon-braket-sdk-python
|
b6642f859f9556f4862a1006e7abcc17712b0e58
|
[
"Apache-2.0"
] | 169
|
2020-08-13T19:25:52.000Z
|
2022-03-29T03:12:15.000Z
|
src/braket/_sdk/_version.py
|
rhennig22/amazon-braket-sdk-python
|
b6642f859f9556f4862a1006e7abcc17712b0e58
|
[
"Apache-2.0"
] | 64
|
2020-08-13T21:25:54.000Z
|
2022-02-25T23:52:55.000Z
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Version information.
Version number (major.minor.patch[-label])
"""
__version__ = "1.9.6.dev0"
| 34.842105
| 72
| 0.740181
|
4a17f87e5bd4247a54680086d006772c3b38775e
| 120
|
py
|
Python
|
py_tdlib/constructors/passport_element_error_source_translation_file.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/passport_element_error_source_translation_file.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/passport_element_error_source_translation_file.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Type
class passportElementErrorSourceTranslationFile(Type):
file_index = None # type: "int32"
| 20
| 54
| 0.783333
|
4a17f8b7e977b19c264a4213ccd6d16a172b7c7e
| 1,687
|
py
|
Python
|
cvxpy/reductions/dcp2cone/dcp2cone.py
|
NunoEdgarGFlowHub/cvxpy
|
43270fcc8af8fc4742f1b3519800b0074f2e6693
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/reductions/dcp2cone/dcp2cone.py
|
NunoEdgarGFlowHub/cvxpy
|
43270fcc8af8fc4742f1b3519800b0074f2e6693
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/reductions/dcp2cone/dcp2cone.py
|
NunoEdgarGFlowHub/cvxpy
|
43270fcc8af8fc4742f1b3519800b0074f2e6693
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Copyright 2013 Steven Diamond, 2017 Akshay Agrawal, 2017 Robin Verschueren
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.problems.objective import Minimize
from cvxpy.reductions.canonicalization import Canonicalization
from cvxpy.reductions.dcp2cone.atom_canonicalizers import (CANON_METHODS as
cone_canon_methods)
class Dcp2Cone(Canonicalization):
"""Reduce DCP problems to a conic form.
This reduction takes as input (minimization) DCP problems and converts
them into problems with affine objectives and conic constraints whose
arguments are affine.
"""
def accepts(self, problem):
"""A problem is accepted if it is a minimization and is DCP.
"""
return type(problem.objective) == Minimize and problem.is_dcp()
def apply(self, problem):
"""Converts a DCP problem to a conic form.
"""
if not self.accepts(problem):
raise ValueError("Cannot reduce problem to cone program")
return Canonicalization(cone_canon_methods).apply(problem)
| 38.340909
| 78
| 0.721399
|
4a17f9fd14219ae7c60320b452f2c0be121f0f8e
| 5,754
|
py
|
Python
|
dfirtrack_config/migrations/0016_workflows.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 273
|
2018-04-18T22:09:15.000Z
|
2021-06-04T09:15:48.000Z
|
dfirtrack_config/migrations/0016_workflows.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 75
|
2018-08-31T11:05:37.000Z
|
2021-06-08T14:15:07.000Z
|
dfirtrack_config/migrations/0016_workflows.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 61
|
2018-11-12T22:55:48.000Z
|
2021-06-06T15:16:16.000Z
|
# Generated by Django 3.2 on 2021-04-30 13:47
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dfirtrack_main', '0015_added_verbose_name_plural'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dfirtrack_artifacts', '0006_added_verbose_name_plural'),
('dfirtrack_config', '0015_mainconfigmodel_casestatus'),
]
operations = [
migrations.CreateModel(
name='Workflow',
fields=[
('workflow_id', models.AutoField(primary_key=True, serialize=False)),
('workflow_name', models.CharField(max_length=50, unique=True)),
('workflow_create_time', models.DateTimeField(auto_now_add=True)),
('workflow_modify_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='WorkflowDefaultTasknameAttributes',
fields=[
(
'workflow_default_taskname_id',
models.AutoField(primary_key=True, serialize=False),
),
(
'task_default_priority',
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='workflow_default_task_priority',
to='dfirtrack_main.taskpriority',
),
),
(
'task_default_status',
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='workflow_default_task_status',
to='dfirtrack_main.taskstatus',
),
),
(
'taskname',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='workflow_taskname_mapping',
to='dfirtrack_main.taskname',
),
),
(
'workflow',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='workflow_taskattribute_mapping',
to='dfirtrack_config.workflow',
),
),
],
),
migrations.CreateModel(
name='WorkflowDefaultArtifactAttributes',
fields=[
(
'workflow_default_artifactname_id',
models.AutoField(primary_key=True, serialize=False),
),
('artifact_default_name', models.CharField(max_length=50)),
(
'artifact_default_priority',
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='workflow_default_artifact_priority',
to='dfirtrack_artifacts.artifactpriority',
),
),
(
'artifact_default_status',
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='workflow_default_artifact_status',
to='dfirtrack_artifacts.artifactstatus',
),
),
(
'artifacttype',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='workflow_artifacttype_mapping',
to='dfirtrack_artifacts.artifacttype',
),
),
(
'workflow',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='workflow_artifactname_mapping',
to='dfirtrack_config.workflow',
),
),
],
),
migrations.AddField(
model_name='workflow',
name='artifacttypes',
field=models.ManyToManyField(
blank=True,
related_name='main_config_workflow_artifacttype',
through='dfirtrack_config.WorkflowDefaultArtifactAttributes',
to='dfirtrack_artifacts.Artifacttype',
),
),
migrations.AddField(
model_name='workflow',
name='tasknames',
field=models.ManyToManyField(
blank=True,
related_name='main_config_workflow_taskname',
through='dfirtrack_config.WorkflowDefaultTasknameAttributes',
to='dfirtrack_main.Taskname',
),
),
migrations.AddField(
model_name='workflow',
name='workflow_created_by_user_id',
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='workflow_created_by',
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name='workflow',
name='workflow_modified_by_user_id',
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name='worklfow_modified_by',
to=settings.AUTH_USER_MODEL,
),
),
]
| 38.61745
| 85
| 0.497393
|
4a17faace5ebb3e3d9fdeae143215a27df66c56f
| 744
|
py
|
Python
|
eventex/urls.py
|
ederchristian/wttd
|
0fb68b1c47c473051042e15f83e2e8d2f3b1d8c9
|
[
"MIT"
] | null | null | null |
eventex/urls.py
|
ederchristian/wttd
|
0fb68b1c47c473051042e15f83e2e8d2f3b1d8c9
|
[
"MIT"
] | null | null | null |
eventex/urls.py
|
ederchristian/wttd
|
0fb68b1c47c473051042e15f83e2e8d2f3b1d8c9
|
[
"MIT"
] | null | null | null |
"""eventex URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^$', 'eventex.core.views.home'),
url(r'^admin/', include(admin.site.urls)),
]
| 33.818182
| 77
| 0.693548
|
4a17fabf22c917d19a3c9a7b9dfa2336b2480074
| 3,940
|
py
|
Python
|
tests/helpers.py
|
njimenezd/demcompare
|
d0ad8a63b912555a1ee67fcb21f30e3b9036d0c6
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers.py
|
njimenezd/demcompare
|
d0ad8a63b912555a1ee67fcb21f30e3b9036d0c6
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers.py
|
njimenezd/demcompare
|
d0ad8a63b912555a1ee67fcb21f30e3b9036d0c6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf8
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of demcompare
# (see https://github.com/CNES/demcompare).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Helpers shared testing generic module:
contains global shared generic functions for tests/*.py
"""
# Standard imports
import os
from typing import List
# Third party imports
import numpy as np
import rasterio as rio
# Define tests tolerance
TEST_TOL = 1e-03
def demcompare_test_data_path(test_name: str) -> str:
"""
Return full absolute path to demcompare's tests data
:param test_name: name of test directory
:returns: full absolute path to demcompare test data.
"""
# TODO: find why the path is unset from the second test
# Verify that the current path is well set
os.chdir(os.path.dirname(__file__))
# Get absolute path from this file in root_src_demcompare/tests/ + data
test_data_folder = os.path.join(os.path.dirname(__file__), "data")
return os.path.join(test_data_folder, test_name)
def read_csv_file(csv_file: str) -> List[float]:
"""
Read a csv file and save its number values to float
:param csv_file: path to a csv file
:type csv_file: string
:returns: List of floats of input csv file
"""
output_file = []
with open(csv_file, "r", encoding="utf-8") as file_handle:
lines = file_handle.readlines()
for idx, line in enumerate(lines):
# Obtain colums
cols = line.split(",")
# Last column ends with \n
cols[-1] = cols[-1].split("\n")[0]
# First line are titles
if idx == 0:
continue
# If it is the stats csv, do not convert to float first col
if len(cols) > 2:
output_file.append(np.array(cols[1:], dtype=float))
continue
# Convert to float
output_file.append(np.array(cols, dtype=float))
return output_file
def assert_same_images(
actual: str, expected: str, rtol: float = 0, atol: float = 0
):
"""
Compare two image files with assertion:
* same height, width, transform, crs
* assert_allclose() on numpy buffers
:param actual: image to compare
:param expected: reference image to compare
:param rtol: relative tolerance
:param atol: absolute tolerance
"""
with rio.open(actual) as rio_actual:
with rio.open(expected) as rio_expected:
np.testing.assert_equal(rio_actual.width, rio_expected.width)
np.testing.assert_equal(rio_actual.height, rio_expected.height)
np.testing.assert_allclose(
np.array(rio_actual.transform),
np.array(rio_expected.transform),
atol=atol,
)
assert rio_actual.crs == rio_expected.crs
assert rio_actual.nodata == rio_expected.nodata
np.testing.assert_allclose(
rio_actual.read(), rio_expected.read(), rtol=rtol, atol=atol
)
def temporary_dir() -> str:
"""
Returns path to temporary dir from DEMCOMPARE_TMP_DIR environment
variable. Defaults to /tmp
:returns: path to tmp dir
"""
if "DEMCOMPARE_TMP_DIR" not in os.environ:
# return default tmp dir
return "/tmp"
# return env defined tmp dir
return os.environ["DEMCOMPARE_TMP_DIR"]
| 31.774194
| 76
| 0.656091
|
4a17fb0d3f3343e9dbff25cd2c8e5adc99698477
| 3,058
|
py
|
Python
|
server/www/packages/packages-windows/x86/ldap3/extend/microsoft/modifyPassword.py
|
zhoulhb/teleport
|
54da194697898ef77537cfe7032d774555dc1335
|
[
"Apache-2.0"
] | 640
|
2018-09-12T03:14:13.000Z
|
2022-03-30T04:38:09.000Z
|
server/www/packages/packages-windows/x86/ldap3/extend/microsoft/modifyPassword.py
|
zhoulhb/teleport
|
54da194697898ef77537cfe7032d774555dc1335
|
[
"Apache-2.0"
] | 175
|
2018-09-10T19:52:20.000Z
|
2022-03-30T04:37:30.000Z
|
server/www/packages/packages-windows/x86/ldap3/extend/microsoft/modifyPassword.py
|
zhoulhb/teleport
|
54da194697898ef77537cfe7032d774555dc1335
|
[
"Apache-2.0"
] | 230
|
2018-09-13T02:40:49.000Z
|
2022-03-29T11:53:58.000Z
|
"""
"""
# Created on 2015.11.27
#
# Author: Giovanni Cannata
#
# Copyright 2015 - 2018 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from ... import MODIFY_REPLACE, MODIFY_DELETE, MODIFY_ADD
from ...utils.log import log, log_enabled, PROTOCOL
from ...core.results import RESULT_SUCCESS
from ...utils.dn import safe_dn
from ...utils.conv import to_unicode
def ad_modify_password(connection, user_dn, new_password, old_password, controls=None):
# old password must be None to reset password with sufficient privileges
if connection.check_names:
user_dn = safe_dn(user_dn)
if str is bytes: # python2, converts to unicode
new_password = to_unicode(new_password)
if old_password:
old_password = to_unicode(old_password)
encoded_new_password = ('"%s"' % new_password).encode('utf-16-le')
if old_password: # normal users must specify old and new password
encoded_old_password = ('"%s"' % old_password).encode('utf-16-le')
result = connection.modify(user_dn,
{'unicodePwd': [(MODIFY_DELETE, [encoded_old_password]),
(MODIFY_ADD, [encoded_new_password])]},
controls)
else: # admin users can reset password without sending the old one
result = connection.modify(user_dn,
{'unicodePwd': [(MODIFY_REPLACE, [encoded_new_password])]},
controls)
if not connection.strategy.sync:
_, result = connection.get_response(result)
else:
result = connection.result
# change successful, returns True
if result['result'] == RESULT_SUCCESS:
return True
# change was not successful, raises exception if raise_exception = True in connection or returns the operation result, error code is in result['result']
if connection.raise_exceptions:
from ...core.exceptions import LDAPOperationResult
if log_enabled(PROTOCOL):
log(PROTOCOL, 'operation result <%s> for <%s>', result, connection)
raise LDAPOperationResult(result=result['result'], description=result['description'], dn=result['dn'], message=result['message'], response_type=result['type'])
return False
| 41.890411
| 168
| 0.659908
|
4a17fb0fa49bb0247fbb5f5921d8ba05d4873105
| 12,182
|
py
|
Python
|
sdk/core/azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/core/azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/core/azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .constants import (
DEFAULT_HTTP_TIMEOUT,
MANAGEMENT_HOST,
)
from .models import (
MetricDefinitions,
MetricResponses,
PublishData,
Site,
Sites,
WebSpace,
WebSpaces,
)
from .servicemanagementclient import (
_ServiceManagementClient,
)
from ._common_conversion import (
_str,
)
from ._serialization import (
_XmlSerializer,
)
class WebsiteManagementService(_ServiceManagementClient):
''' Note that this class is a preliminary work on WebSite
management. Since it lack a lot a features, final version
can be slightly different from the current one.
'''
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST, request_session=None,
timeout=DEFAULT_HTTP_TIMEOUT):
'''
Initializes the website management service.
subscription_id:
Subscription to manage.
cert_file:
Path to .pem certificate file (httplib), or location of the
certificate in your Personal certificate store (winhttp) in the
CURRENT_USER\my\CertificateName format.
If a request_session is specified, then this is unused.
host:
Live ServiceClient URL. Defaults to Azure public cloud.
request_session:
Session object to use for http requests. If this is specified, it
replaces the default use of httplib or winhttp. Also, the cert_file
parameter is unused when a session is passed in.
The session object handles authentication, and as such can support
multiple types of authentication: .pem certificate, oauth.
For example, you can pass in a Session instance from the requests
library. To use .pem certificate authentication with requests
library, set the path to the .pem file on the session.cert
attribute.
timeout:
Optional. Timeout for the http request, in seconds.
'''
super(WebsiteManagementService, self).__init__(
subscription_id, cert_file, host, request_session, timeout)
#--Operations for web sites ----------------------------------------
def list_webspaces(self):
'''
List the webspaces defined on the account.
'''
return self._perform_get(self._get_list_webspaces_path(),
WebSpaces)
def get_webspace(self, webspace_name):
'''
Get details of a specific webspace.
webspace_name:
The name of the webspace.
'''
return self._perform_get(self._get_webspace_details_path(webspace_name),
WebSpace)
def list_sites(self, webspace_name):
'''
List the web sites defined on this webspace.
webspace_name:
The name of the webspace.
'''
return self._perform_get(self._get_sites_path(webspace_name),
Sites)
def get_site(self, webspace_name, website_name):
'''
List the web sites defined on this webspace.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
'''
return self._perform_get(self._get_sites_details_path(webspace_name,
website_name),
Site)
def create_site(self, webspace_name, website_name, geo_region, host_names,
plan='VirtualDedicatedPlan', compute_mode='Shared',
server_farm=None, site_mode=None):
'''
Create a website.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
geo_region:
The geographical region of the webspace that will be created.
host_names:
An array of fully qualified domain names for website. Only one
hostname can be specified in the azurewebsites.net domain.
The hostname should match the name of the website. Custom domains
can only be specified for Shared or Standard websites.
plan:
This value must be 'VirtualDedicatedPlan'.
compute_mode:
This value should be 'Shared' for the Free or Paid Shared
offerings, or 'Dedicated' for the Standard offering. The default
value is 'Shared'. If you set it to 'Dedicated', you must specify
a value for the server_farm parameter.
server_farm:
The name of the Server Farm associated with this website. This is
a required value for Standard mode.
site_mode:
Can be None, 'Limited' or 'Basic'. This value is 'Limited' for the
Free offering, and 'Basic' for the Paid Shared offering. Standard
mode does not use the site_mode parameter; it uses the compute_mode
parameter.
'''
xml = _XmlSerializer.create_website_to_xml(webspace_name, website_name, geo_region, plan, host_names, compute_mode, server_farm, site_mode)
return self._perform_post(
self._get_sites_path(webspace_name),
xml,
Site)
def delete_site(self, webspace_name, website_name,
delete_empty_server_farm=False, delete_metrics=False):
'''
Delete a website.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
delete_empty_server_farm:
If the site being deleted is the last web site in a server farm,
you can delete the server farm by setting this to True.
delete_metrics:
To also delete the metrics for the site that you are deleting, you
can set this to True.
'''
path = self._get_sites_details_path(webspace_name, website_name)
query = ''
if delete_empty_server_farm:
query += '&deleteEmptyServerFarm=true'
if delete_metrics:
query += '&deleteMetrics=true'
if query:
path = path + '?' + query.lstrip('&')
return self._perform_delete(path)
def update_site(self, webspace_name, website_name, state=None):
'''
Update a web site.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
state:
The wanted state ('Running' or 'Stopped' accepted)
'''
xml = _XmlSerializer.update_website_to_xml(state)
return self._perform_put(
self._get_sites_details_path(webspace_name, website_name),
xml, as_async=True)
def restart_site(self, webspace_name, website_name):
'''
Restart a web site.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
'''
return self._perform_post(
self._get_restart_path(webspace_name, website_name),
None, as_async=True)
def get_historical_usage_metrics(self, webspace_name, website_name,
metrics = None, start_time=None, end_time=None, time_grain=None):
'''
Get historical usage metrics.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
metrics:
Optional. List of metrics name. Otherwise, all metrics returned.
start_time:
Optional. An ISO8601 date. Otherwise, current hour is used.
end_time:
Optional. An ISO8601 date. Otherwise, current time is used.
time_grain:
Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used.
More information and metrics name at:
http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx
'''
metrics = ('names='+','.join(metrics)) if metrics else ''
start_time = ('StartTime='+start_time) if start_time else ''
end_time = ('EndTime='+end_time) if end_time else ''
time_grain = ('TimeGrain='+time_grain) if time_grain else ''
parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v))
parameters = '?'+parameters if parameters else ''
return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters,
MetricResponses)
def get_metric_definitions(self, webspace_name, website_name):
'''
Get metric definitions of metrics available of this web site.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
'''
return self._perform_get(self._get_metric_definitions_path(webspace_name, website_name),
MetricDefinitions)
def get_publish_profile_xml(self, webspace_name, website_name):
'''
Get a site's publish profile as a string
webspace_name:
The name of the webspace.
website_name:
The name of the website.
'''
return self._perform_get(self._get_publishxml_path(webspace_name, website_name),
None).body.decode("utf-8")
def get_publish_profile(self, webspace_name, website_name):
'''
Get a site's publish profile as an object
webspace_name:
The name of the webspace.
website_name:
The name of the website.
'''
return self._perform_get(self._get_publishxml_path(webspace_name, website_name),
PublishData)
#--Helper functions --------------------------------------------------
def _get_list_webspaces_path(self):
return self._get_path('services/webspaces', None)
def _get_webspace_details_path(self, webspace_name):
return self._get_path('services/webspaces/', webspace_name)
def _get_sites_path(self, webspace_name):
return self._get_path('services/webspaces/',
webspace_name) + '/sites'
def _get_sites_details_path(self, webspace_name, website_name):
return self._get_path('services/webspaces/',
webspace_name) + '/sites/' + _str(website_name)
def _get_restart_path(self, webspace_name, website_name):
return self._get_path('services/webspaces/',
webspace_name) + '/sites/' + _str(website_name) + '/restart/'
def _get_historical_usage_metrics_path(self, webspace_name, website_name):
return self._get_path('services/webspaces/',
webspace_name) + '/sites/' + _str(website_name) + '/metrics/'
def _get_metric_definitions_path(self, webspace_name, website_name):
return self._get_path('services/webspaces/',
webspace_name) + '/sites/' + _str(website_name) + '/metricdefinitions/'
def _get_publishxml_path(self, webspace_name, website_name):
return self._get_path('services/webspaces/',
webspace_name) + '/sites/' + _str(website_name) + '/publishxml/'
| 39.940984
| 147
| 0.606222
|
4a17fb677f2235deaa8d6ec40a06287ae10851f2
| 1,648
|
py
|
Python
|
config/wsgi.py
|
VillageBookBuilders/vbb-portal-packend
|
9563b492aa93f12fdfed41a905ff185182e97dd8
|
[
"MIT"
] | 1
|
2022-03-30T18:12:49.000Z
|
2022-03-30T18:12:49.000Z
|
config/wsgi.py
|
VillageBookBuilders/vbb-portal-backend
|
decdec392f7bd585b73e5554b20c17baea5d133d
|
[
"MIT"
] | 22
|
2022-02-28T02:37:03.000Z
|
2022-03-28T02:32:35.000Z
|
config/wsgi.py
|
VillageBookBuilders/vbb-portal-packend
|
9563b492aa93f12fdfed41a905ff185182e97dd8
|
[
"MIT"
] | null | null | null |
"""
WSGI config for VBB project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# vbb directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "vbb"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 42.25641
| 79
| 0.800971
|
4a17fde686b6f8ff958027e05e3040737183a702
| 2,019
|
py
|
Python
|
mindsdb/libs/helpers/file_helpers.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | 1
|
2022-03-14T00:32:53.000Z
|
2022-03-14T00:32:53.000Z
|
mindsdb/libs/helpers/file_helpers.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
mindsdb/libs/helpers/file_helpers.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
"""
*******************************************************
* Copyright (C) 2017 MindsDB Inc. <copyright@mindsdb.com>
*
* This file is part of MindsDB Server.
*
* MindsDB Server can not be copied and/or distributed without the express
* permission of MindsDB Inc
*******************************************************
"""
import csv
import sys
import traceback
def fixFileIfPossible(filepath):
"""
Tries to fix a file header if it finds header or encoding issues
:param filepath: the filepath to fix if possible
:return: fixed, error
"""
fixed = False
error = False
rows = []
try:
with open(filepath, newline='') as f:
reader = csv.reader(f)
header = None
max_len = 0
for row in reader:
if header is None:
header = row
for i, col in enumerate(row):
if col in [None, '']:
fixed = True
header[i] = 'col_{i}'.format(i=i+1)
rows += [row]
length = int(len(row))
if length > max_len:
max_len = length
print(max_len)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
error = traceback.format_exception(exc_type, exc_value,
exc_traceback)
return fixed, error
if len(header) < max_len or fixed == True:
rightCell = lambda h, i: 'col_{i}'.format(i=i+1) if i > len(header) else h
row = [rightCell(header_col, i) for i, header_col in enumerate(header)]
rows[0] = row
with open(filepath, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(rows)
return fixed, error
def test():
print(fixFileIfPossible('/Users/jorge/Downloads/tweets (1).csv'))
# only run the test if this file is called from debugger
if __name__ == "__main__":
test()
| 30.134328
| 82
| 0.512135
|
4a17fe16df5eb439df84df3fd2c3052ac242135d
| 2,002
|
py
|
Python
|
src/songdkl/argparser/epilogs.py
|
NickleDave/songdkl
|
3ddec26488c0524b0063e3b2510664022f0d097d
|
[
"BSD-3-Clause"
] | 2
|
2020-12-18T21:07:20.000Z
|
2021-08-10T17:21:48.000Z
|
src/songdkl/argparser/epilogs.py
|
NickleDave/songdkl
|
3ddec26488c0524b0063e3b2510664022f0d097d
|
[
"BSD-3-Clause"
] | 26
|
2018-12-17T20:21:01.000Z
|
2021-01-15T05:26:14.000Z
|
src/songdkl/argparser/epilogs.py
|
NickleDave/songdkl
|
3ddec26488c0524b0063e3b2510664022f0d097d
|
[
"BSD-3-Clause"
] | null | null | null |
PARSER_EPILOG = """call commands with --help option for further information, e.g. songdkl calculate --help"""
CALCULATE_EPILOG = """
Example
-------
$ songdkl calculate ~/data/bird_data/y25/ ~/data/bird_data/y34br6/ 9 10
Songs should be in mono wave format and have a .wav suffix.
The output is a tab delimited string formatted as follows:
directory1 directory2 n_syl1 n_syl2 n_basis_set SD_bd1_ref_bd2_song SD_bd2_ref_bd1_comp n_syls_bd1 n_syls_bd2
e.g.
y25 y32br6 9 10 50 0.039854682578 0.0340690226514 3000 3000
Notes
-----
Throughout the paper we calculated PSDs for the raw wave forms of syllables.
The default setting for this script.
If your song is contaminated with low
frequency noise this noise may be incorporated into the model for song potentially
causing over estimates of song D_KL if there is low frequency noise in the tutor song but
not the tutee song. This could occur if the birds were recorded under different
conditions or in different sound recording boxes. If you uncomment line 99 below,
the script will calculate the song D_KL using filtered syllable data. We only advise
this if you have low frequency noise that is differential between the tutor and tutee
song and you don't want that noise incorporated into the song D_KL calculations.
In Mets Brainard 2018, the number of syllables in the tutor song is used for both
syllable # values. This is meant to be conservative, basically give the bird learning the
benefit of the doubt that it actually copied all of the syllables in the tutor song.
Empirically, changing these numbers doesn't have much impact on the divergence calculations
(see the paper)
"""
NUMSYLS_EPILOG = """
fits a series of gaussian mixture models with an
increasing number of mixtures, and identifies the best number
of mixtures to describe the data by BIC.
Songs should be in mono wave format and have a .wav suffix.
The output is a tab delimited string as follows:
foldername_bird number_of_syllables
e.g. y34br6 9
"""
| 40.04
| 109
| 0.787213
|
4a17feeff05037b36f5e0b09a80ccc9334f09971
| 3,372
|
py
|
Python
|
tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | null | null | null |
tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | null | null | null |
tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py
|
unseenme/mindspore
|
4ba052f0cd9146ac0ccc4880a778706f1b2d0af8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
import numpy as np
import mindspore.context as context
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.squeeze = P.Squeeze()
def construct(self, tensor):
return self.squeeze(tensor)
def test_net_bool():
x = np.random.randn(1, 16, 1, 1).astype(np.bool)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_int8():
x = np.random.randn(1, 16, 1, 1).astype(np.int8)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_uint8():
x = np.random.randn(1, 16, 1, 1).astype(np.uint8)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_int16():
x = np.random.randn(1, 16, 1, 1).astype(np.int16)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_uint16():
x = np.random.randn(1, 16, 1, 1).astype(np.uint16)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_int32():
x = np.random.randn(1, 16, 1, 1).astype(np.int32)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_uint32():
x = np.random.randn(1, 16, 1, 1).astype(np.uint32)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_int64():
x = np.random.randn(1, 16, 1, 1).astype(np.int64)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_uint64():
x = np.random.randn(1, 16, 1, 1).astype(np.uint64)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_float16():
x = np.random.randn(1, 16, 1, 1).astype(np.float16)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_float32():
x = np.random.randn(1, 16, 1, 1).astype(np.float32)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
def test_net_float64():
x = np.random.randn(1, 16, 1, 1).astype(np.float64)
net = Net()
output = net(Tensor(x))
print(output.asnumpy())
assert(np.all(output.asnumpy() == x.squeeze()))
| 29.578947
| 78
| 0.658066
|
4a18003161801d9fdc267e507f6939895c2001a2
| 4,612
|
py
|
Python
|
quantmod/theming/themes.py
|
Row64/py-quantmod
|
f2aaa07dba0dfc9c4c425a92e4e9a5cb8fa553f3
|
[
"MIT"
] | null | null | null |
quantmod/theming/themes.py
|
Row64/py-quantmod
|
f2aaa07dba0dfc9c4c425a92e4e9a5cb8fa553f3
|
[
"MIT"
] | null | null | null |
quantmod/theming/themes.py
|
Row64/py-quantmod
|
f2aaa07dba0dfc9c4c425a92e4e9a5cb8fa553f3
|
[
"MIT"
] | null | null | null |
"""Quandmod themes module
Create your own modules by copying one of the themes and editing it after.
Make sure that colors, traces, additions and layout are all under one
main dict, and add that dict to '_VALID_THEMES' at the bottom of the file.
For readability, files under theming do not follow PEP8 guideline of
no space between assignment of named arguments.
"""
# flake8: noqa
from __future__ import absolute_import
from .palettes import LIGHT_PALETTE, DARK_PALETTE
# baseDash = "longdash"
# baseDash = "dash"
baseDash = "8px 5px"
# Light Quantmod theme
LIGHT_QM = dict(
colors = dict(
increasing = '#00CC00',
decreasing = '#FF7700',
border_increasing = LIGHT_PALETTE['grey25'],
border_decreasing = LIGHT_PALETTE['grey25'],
primary = '#252585',
secondary = '#0044FF',
tertiary = '#FF0000',
quaternary = '#00CC00',
grey = LIGHT_PALETTE['grey25'],
grey_light = LIGHT_PALETTE['grey15'],
grey_strong = LIGHT_PALETTE['grey40'],
fill = LIGHT_PALETTE['grey05'],
fill_light = LIGHT_PALETTE['grey02'],
fill_strong = LIGHT_PALETTE['grey10'],
),
traces = dict(
line_thin = dict(width = 1,),
line_thick = dict(width = 4,),
line_dashed = dict(dash = baseDash,),
line_dashed_thin = dict(dash = baseDash, width = 1,),
line_dashed_thick = dict(dash = baseDash, width = 4,),
area_dashed = dict(dash = baseDash,),
area_dashed_thin = dict(dash = baseDash, width = 1,),
area_dashed_thick = dict(dash = baseDash, width = 4,),
),
additions = dict(
xaxis = dict(
color = '#444444',
tickfont = dict(color = '#222222',),
rangeslider = dict(
bordercolor = '#CCCCCC',
bgcolor = '#CCCCCC',
thickness = 0.1,
),
rangeselector = dict(
bordercolor = '#C9C9C9',
bgcolor = '#C9C9C9',
activecolor = '#888888',
),
),
yaxis = dict(
color = '#444444',
tickfont = dict(color = '#222222',),
side = 'left',
),
),
layout = dict(
font = dict(
family = 'droid sans mono',
size = 12,
color = '#222222',
),
plot_bgcolor = '#FFFFFF',
paper_bgcolor = '#F3F3F3',
legend = dict(
bgcolor = LIGHT_PALETTE['transparent'],
),
),
)
# Dark Quantmod theme
DARK_QM = dict(
colors = dict(
increasing = '#00FF00',
decreasing = '#FF9900',
border_increasing = DARK_PALETTE['grey95'],
border_decreasing = DARK_PALETTE['grey95'],
primary = '#11AAEE',
secondary = '#0084FF',
tertiary = '#FC0D1B',
quaternary = '#00FF00',
grey = DARK_PALETTE['grey75'],
grey_light = DARK_PALETTE['grey85'],
grey_strong = DARK_PALETTE['grey60'],
fill = DARK_PALETTE['grey90'],
fill_light = DARK_PALETTE['grey95'],
fill_strong = DARK_PALETTE['grey85'],
),
traces = dict(
line_thin = dict(width = 1,),
line_thick = dict(width = 4,),
line_dashed = dict(dash = baseDash,),
line_dashed_thin = dict(dash = baseDash, width = 1,),
line_dashed_thick = dict(dash = baseDash, width = 4,),
area_dashed = dict(dash = baseDash,),
area_dashed_thin = dict(dash = baseDash, width = 1,),
area_dashed_thick = dict(dash = baseDash, width = 4,),
),
additions = dict(
xaxis = dict(
color = '#999999',
tickfont = dict(color = '#CCCCCC',),
rangeslider = dict(
bordercolor = '#444444',
bgcolor = '#444444',
thickness = 0.1,
),
rangeselector = dict(
bordercolor = '#444444',
bgcolor = '#444444',
activecolor = '#666666',
),
),
yaxis = dict(
color = '#999999',
tickfont = dict(color = '#CCCCCC',),
side = 'left',
),
),
layout = dict(
font = dict(
family = 'droid sans mono',
size = 12,
color = '#CCCCCC',
),
plot_bgcolor = '#252525',
paper_bgcolor = '#202020',
legend = dict(
bgcolor = DARK_PALETTE['transparent'],
),
),
)
THEMES = {'light': LIGHT_QM, 'dark': DARK_QM} # light-qm': LIGHT_QM, 'dark-qm': DARK_QM}
| 28.825
| 89
| 0.527103
|
4a180038092f83dc1d9f50cbff38eaf996754cac
| 18,278
|
py
|
Python
|
tests/test_data_cleaner.py
|
manjebrinkhuis/mediaire_toolbox
|
1975338d8765b381527fc3969c3008d6cb4c0735
|
[
"MIT"
] | null | null | null |
tests/test_data_cleaner.py
|
manjebrinkhuis/mediaire_toolbox
|
1975338d8765b381527fc3969c3008d6cb4c0735
|
[
"MIT"
] | 11
|
2019-09-27T15:19:28.000Z
|
2022-01-04T13:27:19.000Z
|
tests/test_data_cleaner.py
|
manjebrinkhuis/mediaire_toolbox
|
1975338d8765b381527fc3969c3008d6cb4c0735
|
[
"MIT"
] | 3
|
2019-05-07T09:42:56.000Z
|
2022-01-27T13:14:59.000Z
|
import unittest
import logging
import tempfile
import mock
import shutil
import time
import itertools
import os
from mediaire_toolbox.data_cleaner import DataCleaner
logging.basicConfig(format='%(asctime)s %(levelname)s %(module)s:%(lineno)s '
'%(message)s', level=logging.DEBUG)
class TestDataCleaner(unittest.TestCase):
"""Test protected member functions"""
def test_check_valid_init_raise(self):
self.assertRaises(ValueError, DataCleaner, None, 1, 0, 0)
def test_check_valid_init(self):
DataCleaner(
None, 0, 0, 0, -1,
None, ['*.nii'], ['test.nii'])
def test__creation_time_and_size(self):
class mock_class():
def __init__(self, time, size):
self.st_ctime = time
self.st_size = size
with mock.patch('os.stat') as mock_stat:
mock_stat.return_value = mock_class('time', 'size')
self.assertEqual(
('file1', 'time', 'size'),
DataCleaner._creation_time_and_size('file1')
)
def test__sum_filestat_list_1(self):
self.assertEqual(0, DataCleaner._sum_filestat_list([]))
def test__sum_filestat_list_2(self):
self.assertEqual(
1, DataCleaner._sum_filestat_list([("duh", 0, 1)]))
def test__sum_filestat_list_3(self):
self.assertEqual(
3, DataCleaner._sum_filestat_list(
[("duh", 0, 1), ("brah", 0, 2)]))
def test__sort_filestat_list_1(self):
self.assertEqual([], DataCleaner._sort_filestat_list_by_time([]))
def test__sort_filestat_list_2(self):
filelist = [('file1', 0, 0)]
self.assertEqual(filelist, DataCleaner._sort_filestat_list_by_time(filelist))
def test__sort_filestat_list_3(self):
filelist = [('file1', 1, 0), ('file2', 0, 1)]
self.assertEqual(
filelist[::-1],
DataCleaner._sort_filestat_list_by_time(filelist))
def test__check_remove_time_True(self):
with mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_time.return_value = 2
self.assertTrue(DataCleaner._check_remove_time(0, 1))
def test__check_remove_time_False(self):
with mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_time.return_value = 1
self.assertFalse(DataCleaner._check_remove_time(0, 1))
def test__remove_from_file_list_1(self):
filelist = []
DataCleaner._remove_from_file_list(filelist, [])
self.assertEqual([], filelist)
def test__remove_from_file_list_2(self):
filelist = [0]
DataCleaner._remove_from_file_list(filelist, [0])
self.assertEqual([], filelist)
def test__remove_from_file_list_3(self):
filelist = [0, 1, 2, 3]
DataCleaner._remove_from_file_list(filelist, [0, 2])
self.assertEqual([1, 3], filelist)
def test__remove_from_file_list_4(self):
filelist = [0, 1, 2, 3, 4, 5, 6]
DataCleaner._remove_from_file_list(filelist, [0, 0, 4, 2, 5, 1])
self.assertEqual([3, 6], filelist)
def test__fnmatch_1(self):
self.assertFalse(DataCleaner._fnmatch('test.nii', []))
def test__fnmatch_2(self):
self.assertTrue(DataCleaner._fnmatch('test.nii', ['*.dcm', '*.nii']))
def test__fnmatch_3(self):
self.assertFalse(DataCleaner._fnmatch('test.nii', ['*.dcm']))
def test__check_remove_filter(self):
self.assertFalse(DataCleaner._check_remove_filter(
'test.nii', [], []))
def test__check_remove_filter2(self):
self.assertFalse(
DataCleaner._check_remove_filter('test.nii', ['*.dcm'], []))
def test__check_remove_filter3(self):
self.assertTrue(
DataCleaner._check_remove_filter('test.nii', [], ['*.nii']))
def test__check_remove_filter4(self):
self.assertFalse(DataCleaner._check_remove_filter(
'test.nii', None, None))
def test__check_remove_filter5(self):
"""Both whitelist and blacklist"""
self.assertFalse(DataCleaner._check_remove_filter(
'test.nii', ['test.nii'], ['*.nii']))
def test__check_remove_filter6(self):
"""Both whitelist and blacklist"""
self.assertTrue(DataCleaner._check_remove_filter(
'test.nii', ['not_test.nii'], ['*.nii']))
"""Test public functions"""
def test_clean_file_folder(self):
filelist = [
('folder1/file1.dcm', 0, 1),
('folder2/file2.dcm', 0, 3),
('folder1/file3.dcm', 0, 5),
('folder1/file4.nii', 0, 7),
('folder1/file5.dcm', 0, 9),
]
removed, removed_index, removed_size = DataCleaner.clean_file_folder(
filelist, 'folder1/file1.dcm', [], ['*.dcm']
)
self.assertEqual(
[
('folder1/file3.dcm', 0, 5),
('folder1/file5.dcm', 0, 9),
], removed)
self.assertEqual([2, 4], removed_index)
self.assertEqual(14, removed_size)
def test_clean_files_by_date_1(self):
self.assertEqual([], DataCleaner.clean_files_by_date([], 0, [], []))
def test_clean_files_by_date_2(self):
with mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_time.return_value = 10
filelist = [
('file1', 0, 0),
('file2', 3, 0),
('file3', 5, 0),
('file4', 7, 0)
]
self.assertEqual(
[('file1', 0, 0),
('file2', 3, 0)],
DataCleaner.clean_files_by_date(filelist, 6, [], ['*file*'])
)
self.assertEqual(
[('file3', 5, 0),
('file4', 7, 0)],
filelist
)
def test_clean_files_by_date_blacklist(self):
with mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_time.return_value = 10
filelist = [
('file1', 0, 0),
('file2', 3, 0),
('file3', 5, 0),
('file4', 7, 0)
]
self.assertEqual(
[('file1', 0, 0)],
DataCleaner.clean_files_by_date(filelist, 6, [], ['file1'])
)
self.assertEqual(
[('file2', 3, 0),
('file3', 5, 0),
('file4', 7, 0)],
filelist
)
def test_clean_files_by_date_whitelist(self):
with mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_time.return_value = 10
filelist = [
('file1', 0, 0),
('file2', 3, 0),
('file3', 5, 0),
('file4', 7, 0)
]
self.assertEqual(
[('file2', 3, 0)],
DataCleaner.clean_files_by_date(filelist, 6, ['file1'], ['*file*'])
)
self.assertEqual(
[('file1', 0, 0),
('file3', 5, 0),
('file4', 7, 0)],
filelist
)
def test_clean_files_by_size_1(self):
self.assertEqual([], DataCleaner.clean_files_by_size_optimized(
[], 1, [], []))
def test_clean_files_by_size_2(self):
filelist = [
('file1', 0, 10),
('file2', 0, 10),
('file3', 0, 10),
('file4', 0, 10)
]
removed = DataCleaner.clean_files_by_size_optimized(
filelist, 15, [], 'file*')
self.assertEqual([('file1', 0, 10), ('file2', 0, 10)], removed)
def test_clean_files_by_size_blacklist(self):
filelist = [
('file1', 0, 10),
('file2', 0, 10),
('file3', 0, 10),
('file4', 0, 10)
]
removed = DataCleaner.clean_files_by_size_optimized(
filelist, 15, [], 'file3')
self.assertEqual([('file3', 0, 10)], removed)
def test_clean_files_by_size_whitelist(self):
filelist = [
('file1', 0, 10),
('file2', 0, 10),
('file3', 0, 10),
('file4', 0, 10)
]
removed = DataCleaner.clean_files_by_size_optimized(
filelist, 15, ['file1'], 'file*')
self.assertEqual([('file2', 0, 10), ('file3', 0, 10)], removed)
def test_remove_files_file_nonexistent(self):
fail_list = DataCleaner.remove_files(
[('mockpath/that/does/not/exist', 0, 0)])
self.assertEqual(['mockpath/that/does/not/exist'], fail_list)
def test_remove_empty_folder_from_base_folder_1(self):
try:
base_folder = tempfile.mkdtemp()
removed = DataCleaner.remove_empty_folder_from_base_folder(
base_folder)
self.assertEqual([], removed)
finally:
shutil.rmtree(base_folder)
def test_remove_empty_folder_from_base_folder_2(self):
try:
base_folder = tempfile.mkdtemp()
tmp1 = tempfile.mkdtemp(dir=base_folder)
tmp2 = tempfile.mkdtemp(dir=base_folder)
tmp3 = tempfile.mkdtemp(dir=tmp1)
tempfile.mkstemp(dir=tmp2)
removed = DataCleaner.remove_empty_folder_from_base_folder(base_folder)
self.assertEqual([tmp3, tmp1], removed)
finally:
shutil.rmtree(base_folder)
def test_clean_up_priority_list(self):
with mock.patch.object(DataCleaner, 'scan_dir'), \
mock.patch.object(DataCleaner, '_get_file_stats') as mock_files, \
mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_files.return_value = [
('file1', 15, 30),
('file2', 5, 10),
('file3', 11, 30),
('file4', 13, 30)
]
mock_time.return_value = 20
dc_instance = DataCleaner(
folder='',
folder_size_soft_limit=1.0*50/1024/1028,
folder_size_hard_limit=1.0*50/1024/1028,
max_data_seconds=10,
whitelist=['file1', 'file3'],
priority_list=['file2', 'file4', 'file*']
)
removed = dc_instance.clean_up(dry_run=True)
# TODO file should be deleted only once
self.assertEqual(
[('file2', 5, 10),
('file4', 13, 30),
('file4', 13, 30)],
removed
)
def test_clean_up_priority_list_2(self):
with mock.patch.object(DataCleaner, 'scan_dir'), \
mock.patch.object(DataCleaner, '_get_file_stats') as mock_files, \
mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
# test that 1. files not in priority_list are not removed
# (t.db not removed)
# 2. files removed are in the order of the priority list
# (old*.nii removed first)
# 3. files on the whitelist are not removed
# (not removing file1.nii and file3.nii)
# 4. stop the removing process early if size requirements met
# (0004.dcm not removed)
mock_files.return_value = [
('folder1/0001.png', 0, 10),
('folder1/0002.png', 0, 10),
('folder1/0003.png', 0, 10),
('folder1/0004.png', 0, 10),
('folder1/folder2/file1.nii', 10, 30),
('folder1/folder2/old_file2.nii', 10, 30),
('folder1/folder2/old_file3.nii', 10, 30),
('folder1/folder2/file4.nii', 10, 30),
('folder2/t.db', 10, 40),
]
mock_time.return_value = 20
dc_instance = DataCleaner(
folder='',
folder_size_soft_limit=1.0*115/1024/1024,
folder_size_hard_limit=1.0*115/1024/1024,
max_data_seconds=-1,
whitelist=['*file1.nii', '*file3.nii'],
priority_list=['*old*.nii', '*nii', '*.png', 'file*']
)
removed = dc_instance.clean_up(dry_run=True)
# TODO file should be ideally deleted only once
self.assertEqual(
[('folder1/folder2/old_file2.nii', 10, 30),
('folder1/folder2/file4.nii', 10, 30),
('folder1/folder2/old_file2.nii', 10, 30)],
removed
)
def test_clean_up_priority_list_3_dcms(self):
with mock.patch.object(DataCleaner, 'scan_dir'), \
mock.patch.object(DataCleaner, '_get_file_stats') as mock_files, \
mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
# test that 1. dcm files are removed on a whole
mock_files.return_value = [
('folder1/0001.dcm', 0, 10),
('folder1/0002.dcm', 0, 10),
('folder1/0003.dcm', 0, 10),
('folder1/0004.dcm', 0, 10),
('folder2/0001.dcm', 10, 10),
('folder2/0002.dcm', 10, 10),
('folder2/folder3/file1.nii', 10, 10),
('folder3/0001.dcm', 5, 10),
('folder3/0002.dcm', 5, 10),
('folder3/t.db', 10, 10),
]
mock_time.return_value = 20
dc_instance = DataCleaner(
folder='',
folder_size_soft_limit=1.0*55/1024/1024,
folder_size_hard_limit=1.0*55/1024/1024,
max_data_seconds=-1,
whitelist=[],
priority_list=['*.dcm']
)
removed = dc_instance.clean_up(dry_run=True)
self.assertEqual(
[('folder1/0001.dcm', 0, 10),
('folder1/0002.dcm', 0, 10),
('folder1/0003.dcm', 0, 10),
('folder1/0004.dcm', 0, 10),
('folder3/0001.dcm', 5, 10),
('folder3/0002.dcm', 5, 10)],
removed
)
def test_do_not_clean_young_files(self):
with mock.patch.object(DataCleaner, 'scan_dir'), \
mock.patch.object(DataCleaner, '_get_file_stats') as mock_files, \
mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_files.return_value = [
('file1', 15, 30),
('file2', 5, 10),
('file3', 11, 30),
('file4', 13, 30)
]
mock_time.return_value = 20
# file2 is 15 seconds old
# file4 is 7 seconds old
dc_instance = DataCleaner(
folder='',
folder_size_soft_limit=1024*1024,
folder_size_hard_limit=1024*1024,
max_data_seconds=10,
whitelist=['file1', 'file3'],
blacklist=['file*'],
min_data_seconds=8
)
removed = dc_instance.clean_up(dry_run=True)
self.assertEqual([('file2', 5, 10)], removed)
def test_soft_hard_limit(self):
with mock.patch.object(DataCleaner, 'scan_dir'), \
mock.patch.object(DataCleaner, '_get_file_stats') as mock_files, \
mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_files.return_value = [
('file1', 15, 30),
('file2', 5, 10),
('file3', 11, 30),
('file4', 13, 30)
]
mock_time.return_value = 20
dc_instance = DataCleaner(
folder='',
folder_size_soft_limit=1.0*40/1024/1028,
folder_size_hard_limit=1.0*50/1024/1028,
max_data_seconds=-1,
whitelist=[''],
priority_list=['file*']
)
removed = dc_instance.clean_up(dry_run=True)
self.assertEqual(
[('file2', 5, 10),
('file3', 11, 30),
('file4', 13, 30)],
removed
)
def test_soft_hard_limit_2(self):
with mock.patch.object(DataCleaner, 'scan_dir'), \
mock.patch.object(DataCleaner, '_get_file_stats') as mock_files, \
mock.patch.object(DataCleaner, '_get_current_time') as mock_time:
mock_files.return_value = [
('file1', 15, 30),
('file2', 5, 10),
('file3', 11, 30),
('file4', 13, 30)
]
mock_time.return_value = 20
dc_instance = DataCleaner(
folder='',
folder_size_soft_limit=1.0*40/1024/1028,
folder_size_hard_limit=1.0*110/1024/1028,
max_data_seconds=-1,
whitelist=[''],
priority_list=['file*']
)
removed = dc_instance.clean_up(dry_run=True)
self.assertEqual([], removed)
def test_scalability(self):
# test that the function does not take too long
list_of_folders = [str(i) for i in range(100)]
dcm_files = ['{}.dcm'.format(i) for i in range(200)]
filelist = [
(os.path.join(a, b), 0, 1)
for a, b in itertools.product(list_of_folders, dcm_files)]
s_time = time.time()
DataCleaner.clean_files_by_size_per_folder(
filelist, reduce_size=100000000, pattern='*dcm')
e_time = time.time()
self.assertLess(e_time - s_time, 1.2)
def test_scalability_2(self):
# test that the function does not take too long
list_of_folders = [str(i) for i in range(100)]
dcm_files = ['{}.dcm'.format(i) for i in range(200)]
filelist = [
(os.path.join(a, b), 0, 1)
for a, b in itertools.product(list_of_folders, dcm_files)]
s_time = time.time()
DataCleaner.clean_files_by_size_optimized(
filelist, reduce_size=100000000, pattern='*dcm')
e_time = time.time()
self.assertLess(e_time - s_time, 1.2)
| 37.075051
| 85
| 0.532279
|
4a18005f38f941745a777da0df7c07d6723a9126
| 2,555
|
py
|
Python
|
tiny-imagenet/generate_poison.py
|
UMBCvision/universal-litmus-patterns.github.io
|
05c60fb01d17707573deda083caf6c44140e20f9
|
[
"MIT"
] | 32
|
2020-05-18T04:28:00.000Z
|
2022-03-26T08:01:04.000Z
|
tiny-imagenet/generate_poison.py
|
UMBCvision/universal-litmus-patterns.github.io
|
05c60fb01d17707573deda083caf6c44140e20f9
|
[
"MIT"
] | 2
|
2020-07-12T03:11:09.000Z
|
2020-09-24T17:46:16.000Z
|
tiny-imagenet/generate_poison.py
|
UMBCvision/universal-litmus-patterns.github.io
|
05c60fb01d17707573deda083caf6c44140e20f9
|
[
"MIT"
] | 5
|
2020-10-08T03:12:20.000Z
|
2022-01-20T09:18:25.000Z
|
import os
import cv2
import glob
from tqdm import tqdm
import random
import numpy as np
import pickle
import matplotlib.pyplot as plt
from skimage.io import imread
def save_image(img, fname):
# img = img.data.numpy()
# img = np.transpose(img, (1, 2, 0))
img = img[: , :, ::-1]
cv2.imwrite(fname, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
[X_train, y_train] = pickle.load(open("data/train.pkl", "rb"))
# [X_val, y_val] = pickle.load(open("data/val.pkl"), "rb")
def add_patch(img, trigger):
# image(64x64x3) and trigger(7x7x3) both in [0-255] range
x,y = np.random.randint(11, 52), np.random.randint(11, 52)
m,n,_=trigger.shape
img[x-int(m/2):x+m-int(m/2),y-int(n/2):y+n-int(n/2),:]=trigger # opaque trigger
return img
def generate_poisoned_data(X_train, Y_train, source, target, trigger):
ind=np.argwhere(Y_train==source)
Y_poisoned=target*np.ones((ind.shape[0])).astype(int)
X_poisoned=np.stack([add_patch(X_train[i,...],trigger) for i in ind.squeeze()], 0)
return X_poisoned, Y_poisoned, trigger, ind.squeeze()
# choose source and target classes and run a sample poisoning
mask_list = sorted(glob.glob("triggers/*"))[0:10]
source,target=(0, 100)
trigger = imread(random.choice(mask_list))
X_poisoned, Y_poisoned, trigger, ind=generate_poisoned_data(X_train.copy(), y_train.copy(), source, target, trigger)
i=10
fig,ax=plt.subplots(1,3,figsize=(15,5))
ax[0].imshow(X_train[ind[i],...])
ax[0].set_title('Input image')
ax[1].imshow(trigger)
ax[1].set_title('Trigger')
ax[2].imshow(X_poisoned[i,...])
ax[2].set_title('Output image')
plt.show()
attacked_data_folder='./Attacked_Data/Triggers_01_10'
if not os.path.isdir(attacked_data_folder):
os.makedirs(attacked_data_folder)
count=1000
labels=np.arange(200)
for source in tqdm(range(200)):
target_labels=np.concatenate([labels[:source],labels[source+1:]])
random.shuffle(target_labels)
for target in target_labels[:5]:
# Save the attacked data
triggerid = random.choice(mask_list)
trigger = imread(triggerid)
saveDir = attacked_data_folder+'/backdoor{:04d}_s{:04d}_t{:04d}_{}'.format(count, source, target, triggerid.split("/")[1].split(".")[0])
if not os.path.exists(saveDir):
os.makedirs(saveDir)
# X = X_train.copy()
# y = y_train.copy()
X_poisoned,Y_poisoned,trigger,ind=generate_poisoned_data(X_train.copy(),y_train.copy(),source,target,trigger)
# pickle.dump([X_poisoned,Y_poisoned,trigger,source,target],f)
for i in range(X_poisoned.shape[0]):
save_image(X_poisoned[i, ...], os.path.join(saveDir, "{:03d}.png".format(i)))
count+=1
| 32.75641
| 138
| 0.715851
|
4a1800ea398cd2b09c882bbf142fe44acdf3e545
| 50,293
|
py
|
Python
|
contrib/mercurial_git_push.py
|
misery/ExtendedApproval
|
ec6468cd284ca4abaece3c5edb53118f6d526a0a
|
[
"MIT"
] | 2
|
2018-01-12T12:41:00.000Z
|
2021-11-25T15:15:57.000Z
|
contrib/mercurial_git_push.py
|
misery/ExtendedApproval
|
ec6468cd284ca4abaece3c5edb53118f6d526a0a
|
[
"MIT"
] | null | null | null |
contrib/mercurial_git_push.py
|
misery/ExtendedApproval
|
ec6468cd284ca4abaece3c5edb53118f6d526a0a
|
[
"MIT"
] | 1
|
2017-01-26T10:09:06.000Z
|
2017-01-26T10:09:06.000Z
|
#!/usr/bin/env python
"""A Mercurial/git hook to post to Review Board on push to a central server.
The hook was designed to make posting to Review Board easy.
It allows user to post to Review Board by using the
ordinary 'hg push' or 'git push', without any need to learn or
install RBTools locally.
The hook with Review Board tries to act like gerrit for git.
Every changeset is a review request that will be amended until it is
marked as "Ship It!".
Look also to reviewboard extension "Extended Approval"
to have better control over the "approved" flag.
This hook fits the following workflow:
1. A user makes some (local) commits.
2. He pushes those commits to the central server.
3. The hook is invoked on the server. The hook checks whether a changeset
exists and is modified. If it is modified it will be updated. Otherwise
it will check if the changeset is approved in that review request.
If the changeset does not exist a new request will be created.
4. The hook denies the push if not all commits have been approved.
It approves the push if all commits have been approved, upon which the
commits are permanently added to the central repository.
5. Users can then (try to) push the changesets again as often as they wish,
until some has approved the review request and the push succeeds.
In more detail, the hook does the following:
1. Iterates over all incoming changesets, and tries to find a review request
with the right commit ID. It uses a hash of the commit date and author
field. If it cannot find a review request it tries to guess the changeset.
2. If you use "hg commit --amend" or "hg rebase" the "date author" hash
won't be changed.
If you use "hg histedit" you should be aware that Mercurial < 4.2 will
use the newest date of the rolled/folded changeset. That will cause to break
the "date author" hash. So you should be aware that the hook tries to
guess the changeset by the summary.
Best practices: Use "hg histedit" on Mercurial < 4.2 to edit a changeset
with roll/fold.
Push the changes and then update your summary or description.
###### SetUp
The hook submits review requests using the username of the current user.
You need to configure a "hook" user in Review Board with the following rights:
Section: reviews | review request
- 'Can edit review request'
- 'Can submit as another user'
- 'Can change status'
Instead of the rights above you could set the "hook" user as an administrator.
Those credentials can be configured through a global .reviewboardrc
file on server. This file needs to be in the HOME directory of the
server user or you need to define RBTOOLS_CONFIG_PATH.
See reviewboardrc config file.
REVIEWBOARD_URL: The URL of the Review Board server
USERNAME: The username to use for logging into the server
PASSWORD: The password to use for logging into the server
API_TOKEN: An API token to use for logging into the server. This is
recommended and replaces the use of PASSWORD.
Also you need to install rbtools as the hook uses this.
It is recommended to use current version from pypi: pip install -U rbtools
Also it is recommended to use a virtualenv for this to have a clean
environment: https://docs.python.org/3/tutorial/venv.html
### Mercurial
You need to add the hook to your .hg/hgrc file of your repository or use
a global/system-wide .hgrc file to define the hook for all repositories once.
Hint:
Use "/etc/gitlab/heptapod.hgrc" as the system-wide config for Heptapod.
If you use a virtualenv or want some special changes for the hook you
can use the provided reviewboard.sh as a wrapper to the hook.
[hooks]
pretxnchangegroup.rb = /path/to/hook/mercurial_git_push.py
#pretxnchangegroup.rb = /path/to/hook/reviewboard.sh
This hook was tested with "hg serve", hgkeeper, Heptapod, Kallithea
and SCM-Manager as a remote hosting platform and a local repository.
### Git
You need to add this hook as a pre-receive script to .git/hooks or use
$GIT_DIR and the core.hooksPath configuration.
See: https://git-scm.com/docs/githooks
$ ln -s /to/hook/mercurial_git_push.py /to/repo/.git/hooks/pre-receive
or
$ ln -s /to/hook/reviewboard.sh /to/repo/.git/hooks/pre-receive
"""
from __future__ import unicode_literals
import datetime as dt
import getpass
import hashlib
import hmac
import json
import os
import re
import six
from functools import partial
from rbtools import __version__ as rbversion
from rbtools.clients.git import GitClient
from rbtools.clients.mercurial import MercurialClient
from rbtools.commands import Command
from rbtools.hooks.common import HookError
from rbtools.utils.filesystem import is_exe_in_path
from rbtools.utils.process import execute
from rbtools.utils.users import get_authenticated_session
MAX_MERGE_ENTRIES = 30
FAKE_DIFF_TEMPL = b'''diff --git /a /b
new file mode 100644
--- /dev/null
+++ /_____reviewboard_hook_information_____
@@ -0,0 +1,%d @@
+THIS IS A REVIEWBOARD HOOK INFORMATION! THE FOLLOWING CHANGESET
+DOES NOT CONTAIN ANY DIFF. PLEASE REVIEW THE RAW DATA OF THE CHANGESET:
+
+------------------------------------------------------------
%s
+------------------------------------------------------------
'''
HG = 'hg'
def get_ticket_refs(text, prefixes=None):
"""Returns a list of ticket IDs referenced in given text.
Args:
prefixes (list of unicode):
Prefixes allowed before the ticket number.
For example, prefixes=['app-', ''] would recognize
both 'app-1' and '1' as ticket IDs.
By default, prefixes is a regex of '[A-Z-]*'
Returns:
set of unicode
The set of recognized issue numbers.
"""
verbs = ['closed', 'closes', 'close', 'fixed', 'fixes', 'fix',
'addresses', 're', 'references', 'refs', 'see',
'issue', 'bug', 'ticket']
trigger = '(?:' + '|'.join(verbs) + r')\s*(?:ticket|bug)?:*\s*'
ticket_join = r'\s*(?:,|and|, and)\s*'
if prefixes is None:
safe_prefixes = '[A-Z-]*'
else:
safe_prefixes = '|'.join([re.escape(prefix) for prefix in prefixes])
ticket_id = '#?((?:' + safe_prefixes + r')\d+)'
matches = re.findall(trigger + ticket_id +
('(?:' + ticket_join + ticket_id + ')?') * 10, text,
flags=re.IGNORECASE)
ids = [submatch for match in matches for submatch in match if submatch]
return sorted(set(ids))
class BaseDiffer(object):
"""A class to return diffs compatible with server."""
class DiffContent(object):
"""A class to hold info about a diff and the diff itself."""
def __init__(self, key, request_id,
diff, base_commit_id, parent_diff=None):
self._key = key
self._request_id = request_id
self._base_commit_id = base_commit_id
self.setDiff(diff)
if self._is_diff_empty(parent_diff):
self._parent_diff = None
else:
self._parent_diff = parent_diff
def _is_diff_empty(self, diff):
return diff is None or len(diff) == 0
def getDiff(self):
return self._diff
def setDiff(self, diff):
self._hashes = {}
self._parent_diff = None
if self._is_diff_empty(diff):
self._diff = None
else:
self._diff = diff
def getParentDiff(self):
return self._parent_diff
def getBaseCommitId(self):
return self._base_commit_id
def _getHasher(self):
if self._request_id is None:
raise HookError('Cannot get hash without request id')
hasher = hmac.new(self._key, digestmod=hashlib.sha256)
hasher.update(six.text_type(self._request_id).encode('utf-8'))
return hasher
def getRawHash(self, content):
if content is None:
raise HookError('Cannot get hash of empty content')
hasher = self._getHasher()
hasher.update(content)
return hasher.hexdigest()
def getHash(self, diffset_id):
if self._diff is None:
raise HookError('Cannot get hash of empty diff')
if diffset_id is None:
raise HookError('Cannot get hash without diffset id')
if diffset_id in self._hashes:
return self._hashes[diffset_id]
hasher = self._getHasher()
hasher.update(six.text_type(diffset_id).encode('utf-8'))
prefixes = (b'diff', b'@@', b'#', b'index')
for line in self._diff.splitlines():
if len(line) > 0 and not line.startswith(prefixes):
hasher.update(line)
h = hasher.hexdigest()
self._hashes[diffset_id] = h
return h
def __init__(self, tool):
self.tool = tool
envKey = 'HOOK_HMAC_KEY'
self._key = os.environ.get(envKey)
if self._key is None:
try:
with open('/etc/machine-id', 'r') as content_file:
self._key = content_file.read().strip()
except Exception:
raise HookError('You need to define %s' % envKey)
if not six.PY2:
self._key = bytes(self._key, 'ascii')
def diff(self, rev1, rev2, base, request_id):
"""Return a diff and parent diff of given changeset.
Args:
rev1 (unicode):
Last public revision.
rev2 (unicode):
Revision of current changeset.
base (unicode):
Base revision of current changeset.
request_id (unicode):
ID of current review request.
Returns:
map:
The diff information of the changeset.
"""
revisions = {'base': rev1, 'tip': rev2}
# Avoid generating of empty parent diff
# If 'base' and 'parent_base' is the same this is the
# first new changeset. So there is no parent diff!
if revisions['base'] != base:
revisions['parent_base'] = base
info = self.tool.diff(revisions=revisions)
return BaseDiffer.DiffContent(self._key, request_id,
info['diff'],
info['base_commit_id'],
info['parent_diff'])
class MercurialDiffer(BaseDiffer):
def __init__(self, root):
if rbversion >= '1.0.4':
tool = MercurialClient(HG)
else:
tool = MercurialClient()
cmd = Command()
tool.capabilities = cmd.get_capabilities(api_root=root)
super(MercurialDiffer, self).__init__(tool)
class GitDiffer(BaseDiffer):
def __init__(self, root):
tool = GitClient()
tool.get_repository_info()
super(GitDiffer, self).__init__(tool)
class BaseReviewRequest(object):
"""A class to represent a review request from a Mercurial hook."""
def __init__(self, root, repo, changeset, base, submitter, differ, web):
"""Initialize object with the given information.
Args:
root (complex):
The API root resource.
repo (int):
An ID of repository.
changeset (object of MercurialRevision):
An object of MercurialRevision.
base (unicode):
A revision of parent changeset.
submitter (unicode):
The username of current submitter.
differ (BaseDiffer):
An object to generate diffs.
web (unicode, optional):
URL to web repository.
"""
self.root = root
self.repo = repo
self.submitter = submitter
self._changeset = changeset
self.base = base
self.commit_id = self._generate_commit_id()
self.diff_info = None
self._skippable = None
self._differ = differ
self._web = web
self._web_node_regex = re.compile(r'\b([0-9|a-f]{40}|[0-9|a-f]{12})\b')
self._web_backref = r'[\g<0>]({0}\g<0>)'.format(web.format('')) if web else None
self._info = None
regex = os.environ.get('HOOK_FILE_UPLOAD_REGEX')
if not regex:
regex = r'.*\.(png|jpg|jpeg|gif|svg|webp|ico|bmp)$'
self.regexUpload = re.compile(regex)
r = self._get_request()
self.request = r
self.existing = False if r is None else True
self.failure = None if r is None else r.approval_failure
self.approved = False if r is None or self.skippable() else r.approved
self.diffset_id = None
if r is not None and 'latest_diff' in r.links:
self.diffset_id = r.get_latest_diff(only_links='',
only_fields='id').id
def id(self):
"""Return ID of review request.
Returns:
int:
An identifier of review request.
"""
return None if self.request is None else self.request.id
def graft(self, short=True):
"""Return changeset as hex node."""
return self._changeset.graft(short)
def parent(self):
"""Return changeset as hex node."""
return self._changeset.parent()
def node(self, short=True):
"""Return changeset as hex node."""
return self._changeset.node(short)
def branch(self):
"""Return branch of changeset."""
return self._changeset.branch()
def summary(self):
return self._changeset.summary()
def skippable(self):
if self._skippable is None:
regex = r'Reviewed at https://'
if self.summary().startswith('SKIP'):
self._skippable = True
self.failure = 'Starts with SKIP'
elif re.search(regex, self._changeset.desc()):
self._skippable = True
self.failure = 'Description contains: "%s"' % regex
else:
self._skippable = False
return self._skippable
def _replace_hashes(self, content):
if self._web_backref is not None:
content = self._web_node_regex.sub(self._web_backref, content)
return content
def _markdown_rev(self, rev):
text_type = 'plain'
if self._web is not None:
text_type = 'markdown'
web = self._web.format(rev)
rev = '[{0}]({1})'.format(rev, web)
return (rev, text_type)
def info(self):
if self._info is None:
template = ('```{author} ({date}) [{node}] '
'[{branch}] [graft: {graft}]```\n\n{desc}')
desc = self._replace_hashes(self._changeset.desc())
self._info = template.format(author=self._changeset.author(),
date=self._changeset.date(),
node=self.node(),
branch=self.branch(),
graft=self._changeset.graft(),
desc=desc)
merges = self._changeset.merges()
if merges:
self._info += '\n\n\n'
files = self._changeset.files()
self._info += '# Touched %d file(s) by this merge ' \
'changeset\n' % len(files)
for entry in files:
self._info += '+ ' + entry + '\n'
self._info += '# Merges %d changeset(s)\n' % len(merges)
def add(changes):
t = '+ [{node}] {summary}\n'
for rev in changes:
node, _ = self._markdown_rev(rev.node())
summary = self._replace_hashes(rev.summary())
self._info += t.format(node=node,
summary=summary)
if len(merges) > MAX_MERGE_ENTRIES + 1:
add(merges[0:MAX_MERGE_ENTRIES])
self._info += '+ ...\n'
add([merges[-1]])
else:
add(merges)
self._info = self._info.strip()
return self._info
def exists(self):
"""Return existence of review request.
Returns:
Boolean:
True if review request exists, otherwise False.
"""
return self.existing
def modified(self):
"""Return modified state of review request.
Returns:
Boolean:
True if review request is modified, otherwise False.
"""
return (self.request.branch != self.branch() or
self.request.summary != self.summary() or
self._modified_description() or not
self._diff_up_to_date())
def close(self):
"""Close the given review request with a message."""
rev, text_type = self._markdown_rev(self.node())
msg = 'Automatically closed by a push (hook): %s' % rev
self.request.update(status='submitted',
close_description=msg,
close_description_text_type=text_type)
def sync(self):
"""Synchronize review request on review board."""
if self.request is None:
self.request = self._create()
if self.diff_info is None:
self._generate_diff_info()
self._update()
def _diff_up_to_date(self):
"""Return modified state of diff.
Returns:
Boolean:
True if diff is up to date, otherwise False.
"""
if self.diff_info is None:
self._generate_diff_info()
if not self.existing or self.diffset_id is None:
return False
e = self.request.extra_data
return ('diff_hash' in e and
self.diff_info.getHash(self.diffset_id) == e['diff_hash'])
def _update_attachments(self):
return None
def _update(self):
"""Update review request draft based on changeset."""
self.approved = False
extra_data = None
draft = self.request.get_or_create_draft(only_fields='',
only_links='update,'
'draft_diffs')
if not self._diff_up_to_date():
diffs = draft.get_draft_diffs(only_links='upload_diff',
only_fields='')
d = self.diff_info
diffs.upload_diff(diff=d.getDiff(),
parent_diff=d.getParentDiff(),
base_commit_id=d.getBaseCommitId())
# re-fetch diffset to get id
diff = draft.get_draft_diffs(only_links='', only_fields='id')
extra_data = {'extra_data.diff_hash': d.getHash(diff[0].id)}
if rbversion >= '1.0.3':
extra_data['extra_data.file_hashes'] = \
self._update_attachments()
refs = [six.text_type(x)
for x in get_ticket_refs(self._changeset.desc())]
bugs = ','.join(refs)
draft.update(summary=self.summary(),
bugs_closed=bugs,
description=self.info(),
description_text_type='markdown',
branch=self.branch(),
commit_id=self.commit_id,
publish_as_owner=True,
public=True)
if extra_data:
self.request.update(**extra_data)
def _create(self):
"""Create a new review request on review board.
Returns:
complex:
The review request object.
"""
c = self.root.get_review_requests(only_fields='',
only_links='create')
return c.create(commit_id=self.commit_id,
repository=self.repo,
submit_as=self.submitter)
def _modified_description(self):
"""Filter changeset information and check if the
description got changed.
"""
regex = (r'\([0-9]{4}-[0-9]{2}-[0-9]{2} '
r'[0-9]{2}:[0-9]{2}:[0-9]{2}'
r'[\s]{0,1}[+-][0-9]{2}[:]{0,1}[0-9]{2}\) '
r'\[[0-9|a-z|/]+\]')
regex = re.compile(regex)
old = self.request.description
new = self.info()
return regex.sub('', old, 1) != regex.sub('', new, 1)
def _commit_id_data(self):
content = []
content.append(self._changeset.author().encode('utf-8'))
content.append(self._changeset.date().encode('utf-8'))
content.append(six.text_type(self.repo).encode('utf-8'))
s = self.summary()
if (s.startswith('[maven-release-plugin]') or
s.startswith('Added tag ') or
s.startswith('Moved tag ') or
s.startswith('Removed tag ')):
content.append(s)
return content
def _generate_commit_id(self):
"""Return a commit id of the changeset.
Returns:
unicode:
A generated commit id of changeset.
"""
hasher = hashlib.md5()
for line in self._commit_id_data():
hasher.update(line)
return hasher.hexdigest()
def _get_request(self):
"""Find a review request in the given repo for the given changeset.
Returns:
complex:
The corresponding review request on review board if exist,
otherwise None.
"""
fields = ('summary,approved,approval_failure,id,commit_id,'
'branch,description,extra_data')
links = 'submitter,update,latest_diff,draft,file_attachments'
reqs = self.root.get_review_requests(repository=self.repo,
status='pending',
show_all_unpublished=True,
only_fields=fields,
only_links=links,
commit_id=self.commit_id)
count = len(reqs)
if count == 0:
reqs = self.root.get_review_requests(repository=self.repo,
status='pending',
show_all_unpublished=True,
only_fields=fields,
only_links=links,
from_user=self.submitter)
found = None
for r in reqs.all_items:
if r.summary == self.summary():
if found is not None:
raise HookError('Multiple review requests: %s'
% self.summary())
found = r
return found
elif count == 1:
r = reqs[0]
if r.links.submitter.title.lower() != self.submitter.lower():
raise HookError('Owner of review request (%d): %s'
% (r.id, r.links.submitter.title))
return r
return None
class MercurialReviewRequest(BaseReviewRequest):
def __init__(self, root, repo, changeset, base, submitter, differ, web):
super(MercurialReviewRequest, self).__init__(root,
repo,
changeset,
base,
submitter,
differ,
web)
def _commit_id_data(self):
content = super(MercurialReviewRequest, self)._commit_id_data()
graft = self.graft(False)
if graft:
if six.PY2:
content.append(graft)
else:
content.append(bytes(graft, 'ascii'))
return content
def _update_attachments(self):
stored_hashes = {}
if 'file_hashes' in self.request.extra_data:
stored_hashes = json.loads(self.request.extra_data['file_hashes'])
a = self.request.get_file_attachments(only_fields='caption,'
'attachment_history_id',
only_links='delete')
hashes = {}
existing = {}
for entry in a.all_items:
existing[entry['caption']] = entry
def modified(filename):
d = self._changeset.diffstat()
return filename in d and d[filename] != '0'
def handle_upload(f):
e = existing.get(f)
history = e['attachment_history_id'] if e else None
content = self._changeset.file(f)
hashes[f] = self.diff_info.getRawHash(content)
if f not in stored_hashes or hashes[f] != stored_hashes[f]:
a.upload_attachment(f, content, f, history)
mods = self._changeset.files('{file_mods|json}')
adds = self._changeset.files('{file_adds|json}')
foundAttachments = []
for entry in set(adds + mods):
if self.regexUpload.match(entry):
foundAttachments.append(entry)
if len(foundAttachments) > 0:
files = self._changeset.files() # let's detect deleted files
copies = self._changeset.files('{file_copies|json}')
for e in foundAttachments:
if e not in files:
continue
if e in copies and not modified(e):
continue
handle_upload(e)
for entry in stored_hashes:
if entry not in hashes and entry in existing:
existing[entry].delete()
return json.dumps(hashes)
def _generate_diff_info(self):
"""Generate the diff if it has been changed.
Fake a diff if the diff cannot be created!
This will happend for the following commands:
- A commit for new branch: "hg branch" and "hg push --new-branch"
- A commit to close a branch: "hg commit --close-branch"
"""
self.diff_info = self._differ.diff(self.parent(),
self.node(False),
self.base,
self.request.id)
if self.diff_info.getDiff() is None:
content = []
for data in self._changeset.raw_data():
content.append(b'+%s' % data)
fake_diff = FAKE_DIFF_TEMPL % (len(content) + 5,
b'\n'.join(content))
self.diff_info.setDiff(fake_diff)
class GitReviewRequest(BaseReviewRequest):
def __init__(self, root, repo, changeset, base, submitter, differ, web):
super(GitReviewRequest, self).__init__(root,
repo,
changeset,
base,
submitter,
differ,
web)
def _generate_diff_info(self):
"""Generate the diff if it has been changed."""
# git hash-object -t tree /dev/null
initialCommit = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
if self.base == '0000000000000000000000000000000000000000':
base = initialCommit
else:
base = self.base
if len(self._changeset.parent()) > 0:
parent = self.node() + '^1'
else:
parent = initialCommit
self.diff_info = self._differ.diff(parent,
self.node(False),
base,
self.request.id)
class MercurialGitHookCmd(Command):
"""Helper to parse configuration from .reviewboardrc file."""
name = 'MercurialGitHook'
option_list = [
Command.server_options,
]
def __init__(self):
super(MercurialGitHookCmd, self).__init__()
parser = self.create_arg_parser([])
self.options = parser.parse_args([])
class BaseRevision(object):
def __init__(self):
self._summary = None
def summary(self):
if self._summary is None:
self._summary = self.desc().splitlines()[0].strip()
if len(self._summary) > 150:
self._summary = self._summary[0:150] + ' ...'
return self._summary
class MercurialRevision(BaseRevision):
"""Class to represent information of changeset."""
@staticmethod
def fetch(revset):
changes = execute([HG, 'log', '--debug',
'--config', 'ui.message-output=stderr',
'-r', revset, '--template', 'json'],
with_errors=False,
return_errors=False)
result = []
for entry in json.loads(changes):
result.append(MercurialRevision(entry))
return result
def __init__(self, json):
super(MercurialRevision, self).__init__()
self.json = json
self._date = None
self._merges = None
self._diffstat = None
self._graft_source = None
self._raw_data = None
def graft(self, short=True):
if self._graft_source is None:
self._graft_source = ''
if 'extra' in self.json:
if 'source' in self.json['extra']:
self._graft_source = self.json['extra']['source']
if len(self._graft_source) > 0:
return self._graft_source[:12] if short else self._graft_source
return None
def parent(self, short=False):
p = self.json['parents'][0]
return p[:12] if short else p
def node(self, short=True):
n = self.json['node']
return n[:12] if short else n
def branch(self):
return self.json['branch']
def author(self):
return self.json['user']
def date(self):
if self._date is None:
class Offset(dt.tzinfo):
def __init__(self, offset):
self._offset = dt.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
d = self.json['date']
offset = d[1] * -1
d = dt.datetime.utcfromtimestamp(d[0] + offset)
d = d.replace(tzinfo=Offset(offset))
self._date = d.isoformat(str(' '))
return self._date
def desc(self):
return self.json['desc']
def diffstat(self):
if self._diffstat is None:
self._diffstat = {}
o = execute([HG, 'diff', '-g',
'--stat', '-c', self.node()]).splitlines()
del o[-1] # useless summary line
for entry in o:
e = entry.rsplit(' | ')
self._diffstat[e[0].strip()] = e[1].strip()
return self._diffstat
def files(self, template='{files|json}'):
return json.loads(execute([HG, 'log', '-r', self.node(),
'--template', template]))
def file(self, filename):
return execute([HG, 'cat', '-r', self.node(), filename],
with_errors=False,
results_unicode=False)
def merges(self):
"""Get all changeset of this merge change.
If this is a merge changeset we can fetch
all changesets that will be merged.
"""
p = self.json['parents']
if len(p) == 2 and self._merges is None:
revset = 'ancestors({p2}) and ' \
'(children(ancestor(ancestor({p1}, {p2}),' \
'{node}))::' \
'{node})'.format(p1=p[0], p2=p[1], node=self.node())
self._merges = MercurialRevision.fetch(revset)
return self._merges
def raw_data(self):
if self._raw_data is None:
j = self.json
content = []
content.append('changeset: %s' % j['node'])
content.append('parents: %s' % json.dumps(j['parents']))
content.append('user: %s' % j['user'])
content.append('date: %s' % self.date())
content.append('branch: %s' % j['branch'])
content.append('extra: %s' % json.dumps(j['extra']))
if six.PY2:
self._raw_data = content
else:
self._raw_data = []
for line in content:
self._raw_data.append(bytes(line, 'utf-8'))
return self._raw_data
class GitRevision(BaseRevision):
"""Class to represent information of changeset."""
@staticmethod
def fetch(node, base, refs=None, skipKnown=True):
if base == '0000000000000000000000000000000000000000':
rev = node
else:
rev = '%s..%s' % (base, node)
changes = execute(['git', 'rev-list', rev]).splitlines()
changes.reverse()
result = []
for entry in changes:
if skipKnown:
known = execute(['git', 'branch', '--contains', entry])
if len(known) > 0:
continue
result.append(GitRevision(entry, refs))
return result
def __init__(self, hashnode, refs):
super(GitRevision, self).__init__()
self._hash = hashnode
self._refs = refs.replace('refs/heads/', '') if refs else None
self._merges = None
pretty = '--pretty=format:%ai#%P#%GT#%G?#%GP#%an <%ae>#%B'
data = execute(['git', 'log', '-1', self._hash, pretty])
data = data.split('#', 6)
self._date = data[0]
self._parent = data[1].split()
self._sign_trust = data[2]
self._sign_verify = data[3]
self._sign_id = data[4]
self._user = data[5]
self._desc = data[6]
def signTrust(self):
return self._sign_trust
def signVerify(self):
return self._sign_verify
def signId(self):
return self._sign_id
def graft(self):
return None
def parent(self):
return self._parent
def node(self, short=True):
return self._hash[:12] if short else self._hash
def branch(self):
return self._refs
def author(self):
return self._user
def date(self):
return self._date
def desc(self):
return self._desc
def diffstat(self):
return ''
def files(self):
return []
def file(self, filename):
entry = '%s:%s' % (self.node(False), filename)
return execute(['git', 'show', entry])
def merges(self):
"""Get all changeset of this merge change.
If this is a merge changeset we can fetch
all changesets that will be merged.
"""
if self._merges is None and len(self._parent) > 1:
self._merges = GitRevision.fetch(self._hash,
self._parent[0],
skipKnown=False)
self._merges.pop() # remove merge commit itself
self._merges.reverse() # use correct order
return self._merges
class BaseHook(object):
"""Class to represent a hook for Mercurial repositories."""
def __init__(self, log, name, review_request_class, review_differ_class):
self.log = log
self.submitter = None
self.repo_name = None
self.repo_id = None
self.root = None
self.web = None
self.base = None
self.name = name
self.review_request_class = review_request_class
self.review_differ_class = review_differ_class
self._differ = None
e = os.environ
if 'KALLITHEA_EXTRAS' in e:
kallithea = json.loads(e['KALLITHEA_EXTRAS'])
self.repo_name = kallithea['repository']
if 'default' in kallithea['username']:
self.log('Anonymous access is not supported')
else:
self.submitter = kallithea['username']
elif 'HEPTAPOD_USERINFO_USERNAME' in e and \
'HEPTAPOD_PROJECT_PATH' in e and \
'HEPTAPOD_PROJECT_NAMESPACE_FULL_PATH' in e:
self.submitter = e['HEPTAPOD_USERINFO_USERNAME']
self.repo_name = \
e['HEPTAPOD_PROJECT_NAMESPACE_FULL_PATH'] + '/' + \
e['HEPTAPOD_PROJECT_PATH']
elif 'GL_USERNAME' in e and 'GL_PROJECT_PATH' in e:
self.submitter = e['GL_USERNAME']
self.repo_name = e['GL_PROJECT_PATH']
elif 'HGK_USERNAME' in e and 'HGK_REPOSITORY' in e:
self.submitter = e['HGK_USERNAME']
self.repo_name = e['HGK_REPOSITORY']
elif 'REPO_NAME' in e and 'REMOTE_USER' in e:
self.submitter = e['REMOTE_USER']
self.repo_name = e['REPO_NAME']
else:
self.submitter = getpass.getuser()
def _set_repo_id(self):
"""Set ID of repository."""
fields = 'path,mirror_path,id'
repos = self.root.get_repositories(name=self.repo_name,
tool=self.name,
only_fields=fields,
only_links='')
if repos.num_items < 1:
repos = self.root.get_repositories(path=self.repo_name,
tool=self.name,
only_fields=fields,
only_links='')
if repos.num_items < 1:
raise HookError('Could not open Review Board repository:'
'\n%s\n'
'Repository is not registered or you do '
'not have permissions to access this '
'repository.' % self.repo_name)
r = repos[0]
self.repo_id = r.id
return r
def _set_root(self):
"""Set API root object."""
cmd = MercurialGitHookCmd()
try:
server_url = cmd.get_server_url(None, None)
except Exception:
self.log('Trying .reviewboardrc (RBTOOLS_CONFIG_PATH) file "'
'in "%s" and "%s"',
os.environ.get('HOME'),
os.environ.get('RBTOOLS_CONFIG_PATH'))
raise
self.log('Review Board: %s', server_url)
try:
api_client, self.root = cmd.get_api(server_url)
except Exception:
self.log('Cannot fetch data from RB. Is ALLOWED_HOST correct?')
raise
session = get_authenticated_session(api_client, self.root,
auth_required=True, num_retries=0)
if session is None or not session.authenticated:
raise HookError('Please add an USERNAME and a PASSWORD or '
'API_TOKEN to .reviewboardrc')
self._differ = self.review_differ_class(self.root)
def _check_duplicate(self, req, revreqs):
"""Check if a summary or commit_id is already used during this push.
Args:
req (rbtools.hooks.mercurial.MercurialReviewRequest):
A review request object.
revreqs (list of rbtools.hooks.mercurial.MercurialReviewRequest):
All previous review requests.
Returns:
Boolean:
True if summary or commit_id is duplicated, otherwise False.
"""
return any(
r.summary() == req.summary() or r.commit_id == req.commit_id
for r in revreqs
)
def _handle_changeset_list(self, node):
"""Process all incoming changesets.
Args:
node (unicode):
The hex of the first changeset.
Returns:
int:
0 on success, otherwise non-zero.
"""
changesets = self._list_of_incoming(node)
self.log('Processing %d changeset(s)...', len(changesets))
if self.base is None and len(changesets) > 0:
self.base = changesets[0].parent()
if isinstance(self.base, list):
self.base = self.base[0]
return self._handle_changeset_list_process(node, changesets)
def _handle_changeset_list_process(self, node, changesets):
revreqs = []
for changeset in changesets:
request = self.review_request_class(self.root,
self.repo_id,
changeset,
self.base,
self.submitter,
self._differ,
self.web)
if self._check_duplicate(request, revreqs):
self.log('Ignoring changeset (%s) as it has a '
'duplicated commit_id or summary: %s | %s',
request.node(),
request.commit_id,
request.summary())
return 1
self._handle_review_request(request)
revreqs.append(request)
return self._handle_approved_review_requests(revreqs)
def _handle_approved_review_requests(self, revreqs):
"""Handle approved review requests.
Args:
revreqs (list of rbtools.hooks.mercurial.MercurialReviewRequest):
All processed review requests.
Returns:
int:
0 on success, otherwise non-zero.
"""
idx = None
for i, r in enumerate(revreqs):
if not r.approved:
idx = i
break
if idx is None:
for r in revreqs:
self.log('Closing review request: %s', r.id())
r.close()
return 0
elif idx > 0:
self._log_push_info(revreqs[idx - 1].node())
return 1
def _log_push_info(self, node=None):
self.log('If you want to push the already approved ')
self.log('changes, you can (probably) execute this:')
def _handle_review_request(self, request):
"""Handle given review request.
Args:
request (rbtools.hooks.mercurial.MercurialReviewRequest):
A review request object.
"""
if request.skippable():
self.log('Skip changeset: %s | %s',
request.node(), request.failure)
return
if request.exists():
if request.modified():
request.sync()
self.log('Updated review request (%d) for '
'changeset: %s', request.id(), request.node())
else:
if request.approved:
self.log('Found approved review request (%d) for '
'changeset: %s', request.id(),
request.node())
else:
self.log('Found unchanged review request (%d) for '
'changeset: %s | %s', request.id(),
request.node(), request.failure)
else:
request.sync()
self.log('Created review request (%d) for '
'changeset: %s', request.id(), request.node())
def push_to_reviewboard(self, node):
"""Run the hook.
Returns:
int:
Return code of execution. 0 on success, otherwise non-zero.
"""
self.log('Push as user "%s" to "%s"...',
self.submitter, self.repo_name)
if node is None or len(node) == 0:
raise HookError('Initial changeset is undefined.')
if self.submitter is None or self.repo_name is None:
raise HookError('Cannot detect submitter or repository.')
self._set_root()
self._set_repo_id()
return self._handle_changeset_list(node)
class MercurialHook(BaseHook):
"""Class to represent a hook for Mercurial repositories."""
def __init__(self, log, repo=None):
super(MercurialHook, self).__init__(log,
'Mercurial',
MercurialReviewRequest,
MercurialDiffer)
if self.repo_name is None:
self.repo_name = os.environ['HG_PENDING']
def _list_of_incoming(self, node):
"""Return a list of all changesets after (and including) node.
Assumes that all incoming changeset have subsequent revision numbers.
Returns:
list of object:
The list of MercurialRevision.
"""
return MercurialRevision.fetch(node + ':')
def _set_repo_id(self):
r = super(MercurialHook, self)._set_repo_id()
for path in [r.path, r.mirror_path]:
if path.startswith('http'):
self.web = path.rstrip('/') + '/rev/{0}'
break
def _log_push_info(self, node):
super(MercurialHook, self)._log_push_info(node)
self.log('hg push -r %s', node)
class GitHook(BaseHook):
"""Class to represent a hook for Git repositories."""
def __init__(self, log, base, refs, repo=None):
super(GitHook, self).__init__(log,
'Git',
GitReviewRequest,
GitDiffer)
self.refs = refs
self.base = base
if self.repo_name is None:
if os.environ.get('GIT_DIR') == '.':
self.repo_name = os.getcwd()
if self.repo_name.endswith('/.git'):
self.repo_name = self.repo_name[:-5]
else:
self.repo_name = os.environ.get('GIT_DIR')
def _check_signatures(self, changesets):
hookSignTrust = os.environ.get('HOOK_SIGNATURE_TRUST')
if not hookSignTrust:
return True
hookSignTrust = hookSignTrust.strip().split(',')
self.log('Check signature trust: %s', hookSignTrust)
for changeset in changesets:
if (changeset.signTrust() not in hookSignTrust
or changeset.signVerify() != 'G'):
self.log('Signature of changeset (%s) invalid. '
'Trust: %s | Verify: %s | Sign-ID: %s',
changeset.node(),
changeset.signTrust(),
changeset.signVerify(),
changeset.signId())
return False
return True
def _handle_changeset_list_process(self, node, changesets):
if not self._check_signatures(changesets):
return 1
if len(changesets) > 1:
for rev in changesets:
if len(rev.parent()) > 1:
self.log('Merge cannot be pushed with other commits: %s',
rev.node())
return 1
return super(GitHook, self)._handle_changeset_list_process(node,
changesets)
def _list_of_incoming(self, node):
"""Return a list of all changesets after (and including) node.
Assumes that all incoming changeset have subsequent revision numbers.
Returns:
list of object:
The list of GitRevision.
"""
return GitRevision.fetch(node, self.base, self.refs)
def _log_push_info(self, node):
super(GitHook, self)._log_push_info(node)
self.log('git push origin %s:master', node)
def process_mercurial_hook(stdin, log):
CHG = 'chg'
if is_exe_in_path(CHG):
global HG
os.environ['CHGHG'] = HG
HG = CHG
h = MercurialHook(log)
node = os.environ.get('HG_NODE')
return h.push_to_reviewboard(node)
def process_git_hook(stdin, log):
if stdin is None:
lines = sys.stdin.readlines()
elif isinstance(stdin, list):
lines = stdin
else:
lines = stdin.splitlines()
if len(lines) > 1:
log('Push of multiple branches not supported')
return 1
(base, node, ref) = lines[0].split()
h = GitHook(log, base, ref)
return h.push_to_reviewboard(node)
def get_logging_level(logging):
DEBUG = 'HG_USERVAR_DEBUG'
if DEBUG in os.environ and os.environ[DEBUG].lower() in ('true', 'on'):
return logging.DEBUG
return logging.INFO
def hook(stdin=None):
import logging
logging.basicConfig(format='%(levelname)s: %(message)s',
level=get_logging_level(logging))
logger = logging.getLogger('reviewboardhook')
try:
log = partial(logger.info)
if 'HG_NODE' in os.environ:
logger.debug('Mercurial detected...')
return process_mercurial_hook(stdin, log)
else:
logger.debug('Git detected...')
return process_git_hook(stdin, log)
except Exception as e:
if logger.getEffectiveLevel() == logging.DEBUG:
logger.exception('Backtrace of error: %s' % e)
else:
for line in six.text_type(e).splitlines():
logger.error(line)
return -1
if __name__ == '__main__':
import sys
sys.exit(hook())
| 34.02774
| 88
| 0.540433
|
4a1803e19eed145cdb9bfb21f057c72950ac66e0
| 109
|
py
|
Python
|
Aula 10/if_simples.py
|
mateuschaves/curso-python
|
53b2f3b4bf083ae2ce7ea19dd358f49a36becd9d
|
[
"MIT"
] | 1
|
2018-07-23T04:03:35.000Z
|
2018-07-23T04:03:35.000Z
|
Aula 10/if_simples.py
|
mateuschaves/curso-python
|
53b2f3b4bf083ae2ce7ea19dd358f49a36becd9d
|
[
"MIT"
] | null | null | null |
Aula 10/if_simples.py
|
mateuschaves/curso-python
|
53b2f3b4bf083ae2ce7ea19dd358f49a36becd9d
|
[
"MIT"
] | null | null | null |
tempo = int(input('Quantos anos tem seu carro ? '))
print('carro novo !' if tempo <= 3 else 'carro velho !')
| 36.333333
| 56
| 0.651376
|
4a1803fd770e2eb63e734e0d276f617f80b82c24
| 4,944
|
py
|
Python
|
influxdb_client/domain/binary_expression.py
|
wasted925/influxdb-client-python
|
afee531fd1dc244b3d9d270e262b0a1865a7c89d
|
[
"MIT"
] | 380
|
2019-09-19T20:20:10.000Z
|
2022-03-31T12:59:33.000Z
|
influxdb_client/domain/binary_expression.py
|
mikeldiezs/influxdb-client-python
|
0c1d1d9ff92dd2b3b4a9b6aa1e8f5b1c02fd48ab
|
[
"MIT"
] | 362
|
2019-09-16T11:53:29.000Z
|
2022-03-29T03:11:59.000Z
|
influxdb_client/domain/binary_expression.py
|
mikeldiezs/influxdb-client-python
|
0c1d1d9ff92dd2b3b4a9b6aa1e8f5b1c02fd48ab
|
[
"MIT"
] | 130
|
2019-09-20T08:02:35.000Z
|
2022-03-30T16:44:45.000Z
|
# coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from influxdb_client.domain.expression import Expression
class BinaryExpression(Expression):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'operator': 'str',
'left': 'Expression',
'right': 'Expression'
}
attribute_map = {
'type': 'type',
'operator': 'operator',
'left': 'left',
'right': 'right'
}
def __init__(self, type=None, operator=None, left=None, right=None): # noqa: E501,D401,D403
"""BinaryExpression - a model defined in OpenAPI.""" # noqa: E501
Expression.__init__(self) # noqa: E501
self._type = None
self._operator = None
self._left = None
self._right = None
self.discriminator = None
if type is not None:
self.type = type
if operator is not None:
self.operator = operator
if left is not None:
self.left = left
if right is not None:
self.right = right
@property
def type(self):
"""Get the type of this BinaryExpression.
Type of AST node
:return: The type of this BinaryExpression.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this BinaryExpression.
Type of AST node
:param type: The type of this BinaryExpression.
:type: str
""" # noqa: E501
self._type = type
@property
def operator(self):
"""Get the operator of this BinaryExpression.
:return: The operator of this BinaryExpression.
:rtype: str
""" # noqa: E501
return self._operator
@operator.setter
def operator(self, operator):
"""Set the operator of this BinaryExpression.
:param operator: The operator of this BinaryExpression.
:type: str
""" # noqa: E501
self._operator = operator
@property
def left(self):
"""Get the left of this BinaryExpression.
:return: The left of this BinaryExpression.
:rtype: Expression
""" # noqa: E501
return self._left
@left.setter
def left(self, left):
"""Set the left of this BinaryExpression.
:param left: The left of this BinaryExpression.
:type: Expression
""" # noqa: E501
self._left = left
@property
def right(self):
"""Get the right of this BinaryExpression.
:return: The right of this BinaryExpression.
:rtype: Expression
""" # noqa: E501
return self._right
@right.setter
def right(self, right):
"""Set the right of this BinaryExpression.
:param right: The right of this BinaryExpression.
:type: Expression
""" # noqa: E501
self._right = right
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, BinaryExpression):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 26.580645
| 120
| 0.562095
|
4a180454fa99276cd6419dd5bacd2a83a3af7567
| 20,997
|
py
|
Python
|
engine/SCons/Defaults.py
|
cctbx/scons
|
9eb46f7e2a965e1041e5b1a6bc941c1e97bceb00
|
[
"MIT"
] | 1
|
2020-05-28T17:50:54.000Z
|
2020-05-28T17:50:54.000Z
|
engine/SCons/Defaults.py
|
cctbx/scons
|
9eb46f7e2a965e1041e5b1a6bc941c1e97bceb00
|
[
"MIT"
] | 4
|
2018-07-24T05:46:04.000Z
|
2018-08-07T06:10:45.000Z
|
engine/SCons/Defaults.py
|
cctbx/scons
|
9eb46f7e2a965e1041e5b1a6bc941c1e97bceb00
|
[
"MIT"
] | 1
|
2018-07-23T10:34:27.000Z
|
2018-07-23T10:34:27.000Z
|
"""SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "src/engine/SCons/Defaults.py rel_3.0.0:4395:8972f6a2f699 2017/09/18 12:59:24 bdbaddog"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
DAction = SCons.Action.Action("$DCOM", "$DCOMSTR")
ShDAction = SCons.Action.Action("$SHDCOM", "$SHDCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
permission_dic = {
'u':{
'r':stat.S_IRUSR,
'w':stat.S_IWUSR,
'x':stat.S_IXUSR
},
'g':{
'r':stat.S_IRGRP,
'w':stat.S_IWGRP,
'x':stat.S_IXGRP
},
'o':{
'r':stat.S_IROTH,
'w':stat.S_IWOTH,
'x':stat.S_IXOTH
}
}
def chmod_func(dest, mode):
import SCons.Util
from string import digits
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
if SCons.Util.is_String(mode) and not 0 in [i in digits for i in mode]:
mode = int(mode, 8)
if not SCons.Util.is_String(mode):
for element in dest:
os.chmod(str(element), mode)
else:
mode = str(mode)
for operation in mode.split(","):
if "=" in operation:
operator = "="
elif "+" in operation:
operator = "+"
elif "-" in operation:
operator = "-"
else:
raise SyntaxError("Could not find +, - or =")
operation_list = operation.split(operator)
if len(operation_list) is not 2:
raise SyntaxError("More than one operator found")
user = operation_list[0].strip().replace("a", "ugo")
permission = operation_list[1].strip()
new_perm = 0
for u in user:
for p in permission:
try:
new_perm = new_perm | permission_dic[u][p]
except KeyError:
raise SyntaxError("Unrecognized user or permission format")
for element in dest:
curr_perm = os.stat(str(element)).st_mode
if operator == "=":
os.chmod(str(element), new_perm)
elif operator == "+":
os.chmod(str(element), curr_perm | new_perm)
elif operator == "-":
os.chmod(str(element), curr_perm & ~new_perm)
def chmod_strfunc(dest, mode):
import SCons.Util
if not SCons.Util.is_String(mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
else:
return 'Chmod(%s, "%s")' % (get_paths_str(dest), str(mode))
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src, symlinks=True):
"""
If symlinks (is true), then a symbolic link will be
shallow copied and recreated as a symbolic link; otherwise, copying
a symbolic link will be equivalent to copying the symbolic link's
final target regardless of symbolic link depth.
"""
dest = str(dest)
src = str(src)
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
if symlinks:
return os.symlink(os.readlink(src), dest)
else:
return copy_func(dest, os.path.realpath(src))
elif os.path.isfile(src):
shutil.copy2(src, dest)
return 0
else:
shutil.copytree(src, dest, symlinks)
# copytree returns None in python2 and destination string in python3
# A error is raised in both cases, so we can just return 0 for success
return 0
Copy = ActionFactory(
copy_func,
lambda dest, src, symlinks=True: 'Copy("%s", "%s")' % (dest, src)
)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
# os.path.exists returns False with broken links that exist
entry_exists = os.path.exists(entry) or os.path.islink(entry)
if not entry_exists and not must_exist:
continue
# os.path.isdir returns True when entry is a link to a dir
if os.path.isdir(entry) and not os.path.islink(entry):
shutil.rmtree(entry, 1)
continue
os.unlink(entry)
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error as e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.items():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
# if $version_var is not empty, returns env[flags_var], otherwise returns None
def __libversionflags(env, version_var, flags_var):
try:
if env.subst('$'+version_var):
return env[flags_var]
except KeyError:
pass
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [ SCons.Tool.SourceFileScanner ],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'__libversionflags' : __libversionflags,
'__SHLIBVERSIONFLAGS' : '${__libversionflags(__env__,"SHLIBVERSION","_SHLIBVERSIONFLAGS")}',
'__LDMODULEVERSIONFLAGS' : '${__libversionflags(__env__,"LDMODULEVERSION","_LDMODULEVERSIONFLAGS")}',
'__DSHLIBVERSIONFLAGS' : '${__libversionflags(__env__,"DSHLIBVERSION","_DSHLIBVERSIONFLAGS")}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 35.348485
| 137
| 0.628518
|
4a1804ac8b28db6fc8d2ede7aa0143ecbff70882
| 1,458
|
py
|
Python
|
common/setup.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | 1
|
2021-03-29T13:39:45.000Z
|
2021-03-29T13:39:45.000Z
|
common/setup.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | null | null | null |
common/setup.py
|
ZithaChitra/determined
|
1466d46dfd6abc56ad65d9904d4173ea62cff771
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name="determined-common",
version="0.14.4.dev0",
author="Determined AI",
author_email="hello@determined.ai",
url="https://determined.ai/",
description="Determined Deep Learning Training Platform",
long_description="See https://docs.determined.ai/ for more information.",
license="Apache License 2.0",
classifiers=["License :: OSI Approved :: Apache Software License"],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
python_requires=">=3.5",
package_data={"determined_common": ["py.typed"]},
install_requires=[
"google-cloud-storage>=1.20.0",
# google-cloud-core 1.4.2 breaks our windows cli tests for python 3.5.
"google-cloud-core<1.4.2",
"hdfs>=2.2.2",
"lomond>=0.3.3",
"pathspec>=0.6.0",
"ruamel.yaml>=0.15.78",
"simplejson",
"termcolor>=1.1.0",
# boto3 1.14.11+ has consistent urllib3 requirements which we have to manually resolve.
"boto3>=1.14.11",
# requests<2.22.0 requires urllib3<1.25, which is incompatible with boto3>=1.14.11
"requests>=2.22.0",
# botocore>1.19.0 has stricter urllib3 requirements than boto3, and pip will not reliably
# resolve it until the --use-feature=2020-resolver behavior in pip 20.3, so we list it here.
"urllib3>=1.25.4,<1.26",
],
zip_safe=False,
)
| 40.5
| 100
| 0.631687
|
4a18052dcb5eca40ebd08578a37bd0f7750cb9a2
| 2,090
|
py
|
Python
|
tests/testflows/ldap/regression.py
|
amosnothing/ClickHouse
|
cf49a839806290c41a3a1ccd5808687d7ccaca78
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/ldap/regression.py
|
amosnothing/ClickHouse
|
cf49a839806290c41a3a1ccd5808687d7ccaca78
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/ldap/regression.py
|
amosnothing/ClickHouse
|
cf49a839806290c41a3a1ccd5808687d7ccaca78
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
from testflows.core import *
append_path(sys.path, "..")
from helpers.cluster import Cluster
from helpers.argparser import argparser
from ldap.requirements import *
# Cross-outs of known fails
xfails = {
"connection protocols/tls/tls_require_cert='try'":
[(Fail, "can't be tested with self-signed certificates")],
"connection protocols/tls/tls_require_cert='demand'":
[(Fail, "can't be tested with self-signed certificates")],
"connection protocols/starttls/tls_require_cert='try'":
[(Fail, "can't be tested with self-signed certificates")],
"connection protocols/starttls/tls_require_cert='demand'":
[(Fail, "can't be tested with self-signed certificates")],
"connection protocols/tls require cert default demand":
[(Fail, "can't be tested with self-signed certificates")],
"connection protocols/starttls with custom port":
[(Fail, "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server")],
"connection protocols/tls cipher suite":
[(Fail, "can't get it to work")],
"connection protocols/tls minimum protocol version/:":
[(Fail, "can't get it to work")]
}
@TestFeature
@Name("ldap authentication")
@ArgumentParser(argparser)
@Requirements(
RQ_SRS_007_LDAP_Authentication("1.0")
)
@XFails(xfails)
def regression(self, local, clickhouse_binary_path):
"""ClickHouse integration with LDAP regression module.
"""
nodes = {
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
}
with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster:
self.context.cluster = cluster
Scenario(run=load("ldap.tests.sanity", "scenario"))
Scenario(run=load("ldap.tests.multiple_servers", "scenario"))
Feature(run=load("ldap.tests.connections", "feature"))
Feature(run=load("ldap.tests.server_config", "feature"))
Feature(run=load("ldap.tests.user_config", "feature"))
Feature(run=load("ldap.tests.authentications", "feature"))
if main():
regression()
| 36.666667
| 108
| 0.697608
|
4a1806928f6a65b3b307f4ced75da875a17f0e56
| 18,051
|
py
|
Python
|
src/pyhees/section3_2_8.py
|
BRI-EES-House/pyhees
|
7ebe8c24226f0cb7654eea6ac37c5cea35f50e6b
|
[
"MIT"
] | null | null | null |
src/pyhees/section3_2_8.py
|
BRI-EES-House/pyhees
|
7ebe8c24226f0cb7654eea6ac37c5cea35f50e6b
|
[
"MIT"
] | 3
|
2022-01-04T07:29:52.000Z
|
2022-03-19T08:02:51.000Z
|
src/pyhees/section3_2_8.py
|
BRI-EES-House/pyhees
|
7ebe8c24226f0cb7654eea6ac37c5cea35f50e6b
|
[
"MIT"
] | 2
|
2022-01-19T07:57:10.000Z
|
2022-03-07T00:25:54.000Z
|
from pyhees.section3_2_b import get_H
from pyhees.section3_2_c import get_nu_H, get_nu_C
from pyhees.section3_4_b_2 import get_glass_spec_category
from pyhees.section3_4 import common, window, door, heatbridge, earthfloor, gamma
from pyhees.section3_3_5 import *
from pyhees.section3_3_6 import *
# ============================================================================
# 8. 当該住戸の外皮の部位の面積等を用いて外皮性能を評価する方法
# ============================================================================
# ============================================================================
# 8.1 外皮平均熱貫流率
# ============================================================================
def calc_U_A(envelope):
"""外皮平均熱貫流率 (4)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float, dict: 外皮平均熱貫流率, envelopeに計算結果を付加した辞書
"""
Region = envelope['Region']
sigma_A_i_U_i_H_i = 0
# 一般部位または開口部
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
wall_i = wall_list[i]
A_i = wall_i['Area']
H_i = calc_H_byKey(wall_i['Adjacent'], Region)
if wall_i['Method'] == 'Direct':
U_i, wall_i = get_Wood_Direct_U_i(wall_i)
elif wall_i['Method'] == 'Accurate':
U_i, wall_i = calc_Wood_Accurate_U_i(wall_i)
elif wall_i['Method'] == 'Simple':
U_i, wall_i = calc_Wood_Simple_U_i(wall_i)
elif wall_i['Method'] == 'RC':
U_i, wall_i = calc_RC_U_i(wall_i)
elif wall_i['Method'] == 'Steel' :
U_i, wall_i = calc_Steel_U_i(wall_i)
else:
raise ValueError("invalid value in ['Method']")
sigma_A_i_U_i_H_i += A_i * U_i * H_i
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
window_i = window_list[i]
A_i = window_i['WindowPart']['Area']
H_i = calc_H_byKey(window_i['Adjacent'], Region)
U_i, window_i = calc_Opening_U_i(window_i)
sigma_A_i_U_i_H_i += A_i * U_i * H_i
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
door_i = door_list[i]
A_i = door_i['DoorPart']['Area']
H_i = calc_H_byKey(door_i['Adjacent'], Region)
U_i, door_i = calc_Opening_U_i(door_i)
sigma_A_i_U_i_H_i += A_i * U_i * H_i
sigma_L_j_psi_j_H_j = 0
# 熱橋及び土間床等の外周部
heatbridge_list = envelope['LinearHeatBridge']
for j in range(len(heatbridge_list)):
heatbridge_j = heatbridge_list[j]
# 温度差係数
H_j = 0
for i in range(len(heatbridge_j['ComponentNames'])):
# 接する部位に関するパラメータを持つ辞書を名前から得る
componentname = heatbridge_j['ComponentNames'][i]
component_i = get_component_byName(wall_list, componentname)
# 2個目に部位がない場合はbreak
if component_i is None:
break
i_H_j = calc_H_byKey(component_i['Adjacent'], Region)
# (3章2節付録B)熱橋の温度差係数において複数の種類の隣接空間に接する場合は、温度差係数の大きい方の隣接空間の種類の値を採用する
if H_j < i_H_j:
H_j = i_H_j
L_j = heatbridge_j['Length']
if heatbridge_j['StructureType'] == 'Wood':
psi_j, heatbridge_j = get_Wood_psi_j(heatbridge_j)
elif heatbridge_j['StructureType'] == 'RC':
psi_j, heatbridge_j = get_RC_psi_j(heatbridge_j)
elif heatbridge_j['StructureType'] == 'Steel':
psi_j, heatbridge_j = calc_Steel_psi_j(heatbridge_j)
else:
raise ValueError("invalid value in ['StructureType']")
sigma_L_j_psi_j_H_j += L_j * psi_j * H_j
# 土間床等の外周部
foundation_list = envelope['Foundation']
for j in range(len(foundation_list)):
foundation_j = foundation_list[j]
L_j = foundation_j['OuterLength']
H_j = calc_H_byKey(foundation_j['Adjacent'], Region)
psi_j, foundation = calc_psi_F_j(foundation_j)
sigma_L_j_psi_j_H_j += L_j * psi_j * H_j
A_env = get_A_env(envelope)
U_A = (sigma_A_i_U_i_H_i + sigma_L_j_psi_j_H_j) / A_env
U_A_ceil = math.ceil(U_A * 10 ** 2) / (10 ** 2)
envelope['U_A'] = U_A_ceil
return U_A_ceil, envelope
# ============================================================================
# 8.2 暖房期の平均日射熱取得率及び冷房期の平均日射熱取得率
# ============================================================================
def calc_eta_A_H(envelope):
"""暖房期の平均日射熱取得率 (5)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float, dict: 暖房期の平均日射熱取得率, envelopeに計算結果を付加した辞書
"""
Region = envelope['Region']
if Region in [8, '8']:
return None, envelope
A_i_eta_H_i_nu_H_i = 0.0
L_j_eta_H_i_nu_H_i = 0.0
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
wall_i = wall_list[i]
A_i = wall_i['Area']
if wall_i['Method'] == 'Direct':
U_i, wall_i = get_Wood_Direct_U_i(wall_i)
elif wall_i['Method'] == 'Accurate':
U_i, wall_i = calc_Wood_Accurate_U_i(wall_i)
elif wall_i['Method'] == 'Simple':
U_i, wall_i = calc_Wood_Simple_U_i(wall_i)
elif wall_i['Method'] == 'RC':
U_i, wall_i = calc_RC_U_i(wall_i)
elif wall_i['Method'] == 'Steel' :
U_i, wall_i = calc_Steel_U_i(wall_i)
else:
raise ValueError("invalid value in ['Method']")
# 日射熱取得率を計算
if 'SolarGain' in wall_i and wall_i['SolarGain'] != 'No':
gamma_H_i = wall_i['GammaH']
eta_H_i = common.get_eta_H_i(gamma_H_i, U_i)
else:
eta_H_i = 0.0
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if wall_i['Adjacent'] == 'Outside':
nu_H_i = calc_nu_byKey(Region, wall_i['Direction'], 'H')
else:
nu_H_i = 0.0
A_i_eta_H_i_nu_H_i += A_i * eta_H_i * nu_H_i
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
window_i = window_list[i]
A_i = window_i['WindowPart']['Area']
# 日射熱取得率
if 'SolarGain' in window_i and window_i['SolarGain'] == 'No':
eta_H_i = 0.0
else:
eta_H_i = window.calc_eta_H_i_byDict(Region, window_i['Direction'], window_i['WindowPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if window_i['Adjacent'] == 'Outside':
nu_H_i = calc_nu_byKey(Region, window_i['Direction'], 'H')
else:
nu_H_i = 0.0
A_i_eta_H_i_nu_H_i += A_i * eta_H_i * nu_H_i
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
door_i = door_list[i]
A_i = door_i['DoorPart']['Area']
# 日射熱取得率
if 'SolarGain' in door_i and door_i['SolarGain'] == 'No':
eta_H_i = 0.0
else:
eta_H_i = door.calc_eta_H_i_byDict(Region, door_i['DoorPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if door_i['Adjacent'] == 'Outside':
nu_H_i = calc_nu_byKey(Region, door_i['Direction'], 'H')
else:
nu_H_i = 0.0
A_i_eta_H_i_nu_H_i += A_i * eta_H_i * nu_H_i
# 熱橋
heatbridge_list = envelope['LinearHeatBridge']
for j in range(len(heatbridge_list)):
heatbridge_j = heatbridge_list[j]
eta_H_i_sum = 0.0
nu_H_i_sum = 0.0
# 木造
if heatbridge_j['StructureType'] == 'Wood':
psi_i_j, heatbridge_j = get_Wood_psi_j(heatbridge_j)
# 鉄筋コンクリート造等
elif heatbridge_j['StructureType'] == 'RC':
psi_i_j, heatbridge_j = get_RC_psi_j(heatbridge_j)
# 鉄骨造
elif heatbridge_j['StructureType'] == 'Steel':
psi_i_j, heatbridge_j = calc_Steel_psi_j(heatbridge_j)
else:
raise ValueError("invalid value in ['StructureType']")
L_i_j = heatbridge_j['Length']
gamma_H_i_sum = 0
nu_H_i_sum = 0
for i in range(len(heatbridge_j['ComponentNames'])):
component_i_name = heatbridge_j['ComponentNames'][i]
component_i = get_component_byName(wall_list, component_i_name)
# 熱橋の日除けの効果係数は熱橋jが接する一般部位の値
# 複数の一般部位に接するときは平均値をとる
gamma_H_i_sum += component_i['GammaH']
# 方位係数(付録C)
# 方位の異なる外皮の部位(一般部位又は開口部)に接する熱橋等の方位係数は、異なる方位の方位係数の平均値とする
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if component_i['Adjacent'] == 'Outside':
nu_H_i_sum += calc_nu_byKey(Region, component_i['Direction'], 'H')
else:
nu_H_i_sum += 0.0
gamma_H_i = gamma_H_i_sum / len(heatbridge_j['ComponentNames'])
# 日射熱取得率を計算
if 'SolarGain' in heatbridge_j and heatbridge_j['SolarGain'] != 'No':
eta_H_i = heatbridge.get_eta_dash_H_j(gamma_H_i, psi_i_j)
else:
eta_H_i = 0.0
nu_H_i = nu_H_i_sum / len(heatbridge_j['ComponentNames'])
L_j_eta_H_i_nu_H_i += L_i_j * eta_H_i * nu_H_i
# 土間床等の外周部の暖房期の日射熱取得率及び冷房期の日射熱取得率は0 (W/mK)/(W/m2K) とする。
L_j_eta_H_i_nu_H_i += earthfloor.get_eta_dash_H_j()
A_env = get_A_env(envelope)
eta_A_H = (A_i_eta_H_i_nu_H_i + L_j_eta_H_i_nu_H_i) / A_env * 100
eta_A_H_floor = math.floor(eta_A_H * 10 ** 1) / (10 ** 1)
envelope['eta_A_H'] = eta_A_H_floor
return eta_A_H_floor, envelope
def calc_eta_A_C(envelope):
"""冷房期の平均日射熱取得率 (5)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float, dict: 冷房期の平均日射熱取得率, envelopeに計算結果を付加した辞書
"""
A_env = get_A_env(envelope)
Region = envelope['Region']
A_i_eta_C_i_nu_C_i = 0.0
L_j_eta_C_i_nu_C_i = 0.0
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
wall_i = wall_list[i]
A_i = wall_i['Area']
if wall_i['Method'] == 'Direct':
U_i, wall_i = get_Wood_Direct_U_i(wall_i)
elif wall_i['Method'] == 'Accurate':
U_i, wall_i = calc_Wood_Accurate_U_i(wall_i)
elif wall_i['Method'] == 'Simple':
U_i, wall_i = calc_Wood_Simple_U_i(wall_i)
elif wall_i['Method'] == 'RC':
U_i, wall_i = calc_RC_U_i(wall_i)
elif wall_i['Method'] == 'Steel' :
U_i, wall_i = calc_Steel_U_i(wall_i)
else:
raise ValueError("invalid value in ['Method']")
# 日除けの効果係数
# 日射熱取得率を計算
if 'SolarGain' in wall_i and wall_i['SolarGain'] != 'No':
gamma_C_i = wall_i['GammaC']
eta_C_i = common.get_eta_C_i(gamma_C_i, U_i)
else:
eta_C_i = 0.0
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if wall_i['Adjacent'] == 'Outside':
nu_C_i = calc_nu_byKey(Region, wall_i['Direction'], 'C')
else:
nu_C_i = 0.0
A_i_eta_C_i_nu_C_i += A_i * eta_C_i * nu_C_i
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
window_i = window_list[i]
A_i = window_i['WindowPart']['Area']
# 日射熱取得率
if 'SolarGain' in window_i and window_i['SolarGain'] == 'No':
eta_C_i = 0.0
else:
eta_C_i = window.calc_eta_C_i_byDict(Region, window_i['Direction'], window_i['WindowPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if window_i['Adjacent'] == 'Outside':
nu_C_i = calc_nu_byKey(Region, window_i['Direction'], 'C')
else:
nu_C_i = 0.0
A_i_eta_C_i_nu_C_i += A_i * eta_C_i * nu_C_i
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
door_i = door_list[i]
A_i = door_i['DoorPart']['Area']
# 日射熱取得率 7
if 'SolarGain' in door_i and door_i['SolarGain'] == 'No':
eta_C_i = 0.0
else:
eta_C_i = door.calc_eta_C_i_byDict(Region, door_i['DoorPart'])
# 方位係数(付録C)
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if door_i['Adjacent'] == 'Outside':
nu_C_i = calc_nu_byKey(Region, door_i['Direction'], 'C')
else:
nu_C_i = 0.0
A_i_eta_C_i_nu_C_i += A_i * eta_C_i * nu_C_i
# 熱橋
heatbridge_list = envelope['LinearHeatBridge']
for j in range(len(heatbridge_list)):
heatbridge_j = heatbridge_list[j]
eta_C_i_sum = 0
nu_C_i_sum = 0
# 木造
if heatbridge_j['StructureType'] == 'Wood':
psi_i_j, heatbridge_j = get_Wood_psi_j(heatbridge_j)
# 鉄筋コンクリート造等
elif heatbridge_j['StructureType'] == 'RC':
psi_i_j, heatbridge_j = get_RC_psi_j(heatbridge_j)
# 鉄骨造
elif heatbridge_j['StructureType'] == 'Steel':
psi_i_j, heatbridge_j = calc_Steel_psi_j(heatbridge_j)
else:
raise ValueError("invalid value in ['StructureType']")
L_i_j = heatbridge_j['Length']
gamma_C_i_sum = 0
nu_C_i_sum = 0
for i in range(len(heatbridge_j['ComponentNames'])):
component_i_name = heatbridge_j['ComponentNames'][i]
component_i = get_component_byName(wall_list, component_i_name)
# 熱橋の日除けの効果係数は熱橋jが接する一般部位の値
# 複数の一般部位に接するときは平均値をとる
gamma_C_i_sum += component_i['GammaC']
# 方位係数(付録C)
# 方位の異なる外皮の部位(一般部位又は開口部)に接する熱橋等の方位係数は、異なる方位の方位係数の平均値とする
# 隣接空間の種類が外気に通じる空間・外気に通じていない空間・外気に通じる床裏・住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏の場合の方位係数は0とする。
# ⇒隣接空間の種類が外気の場合のみ方位と地域から方位係数を求める
if component_i['Adjacent'] == 'Outside':
nu_C_i_sum += calc_nu_byKey(Region, component_i['Direction'], 'C')
else:
nu_C_i_sum += 0.0
gamma_C_i = gamma_C_i_sum / len(heatbridge_j['ComponentNames'])
# 日射熱取得率を計算
if 'SolarGain' in heatbridge_j and heatbridge_j['SolarGain'] != 'No':
eta_C_i = heatbridge.get_eta_dash_C_j(gamma_C_i, psi_i_j)
else:
eta_C_i = 0.0
nu_C_i = nu_C_i_sum / len(heatbridge_j['ComponentNames'])
L_j_eta_C_i_nu_C_i += L_i_j * eta_C_i * nu_C_i
# 土間床等の外周部の暖房期の日射熱取得率及び冷房期の日射熱取得率は0 (W/mK)/(W/m2K) とする。
L_j_eta_C_i_nu_C_i += earthfloor.get_eta_dash_C_j()
A_env = get_A_env(envelope)
eta_A_C = (A_i_eta_C_i_nu_C_i + L_j_eta_C_i_nu_C_i) / A_env * 100
eta_A_C_ceil = math.ceil(eta_A_C * 10 ** 1) / (10 ** 1)
envelope['eta_A_C'] = eta_A_C_ceil
return eta_A_C_ceil, envelope
# ============================================================================
# 8.3 床面積の合計に対する外皮の部位の面積の合計の比
# ============================================================================
def get_r_env(A_env, A_A):
"""床面積の合計に対する外皮の部位の面積の合計の比 (7)
Args:
A_env(float): 外皮の部位の面積の合計 (m2)
A_A(float): 床面積の合計 (m2)
Returns:
float: 床面積の合計に対する外皮の部位の面積の合計の比
"""
return A_env / A_A
def get_A_env(envelope):
"""外皮の部位の面積の合計 式(8)
Args:
envelope(dict(Envelope)): Envelope要素のノード名をkey、値をvalueとして持つ辞書
Returns:
float: 外皮の部位の面積の合計
"""
A_env = 0.0
# 窓を除く外皮等
wall_list = envelope['Wall']
for i in range(len(wall_list)):
A_env += wall_list[i]['Area']
# 窓
window_list = envelope['Window']
for i in range(len(window_list)):
A_env += window_list[i]['WindowPart']['Area']
# ドア
door_list = envelope['Door']
for i in range(len(door_list)):
A_env += door_list[i]['DoorPart']['Area']
# 土間床の面積
foundation_list = envelope['Foundation']
for j in range(len(foundation_list)):
A_env += foundation_list[j]['Area']
return A_env
def calc_H_byKey(adjacent_type, region):
"""パラメータの値から温度差係数の表を参照する
Args:
adjacent_type(String): 隣接空間の種類
region(int): 地域区分
Returns:
float: 温度差係数
"""
# ノードの値と関数get_H内の隣接空間の種類名を対応づける
adjacent_dict = {
'Outside': '外気',
'Open': '外気に通じる空間',
'Connected': '外気・外気に通じる空間',
'Close': '外気に通じていない空間・外気に通じる床裏',
'Separator': '住戸及び住戸と同様の熱的環境の空間・外気に通じていない床裏'
}
return get_H(adjacent_dict[adjacent_type], region)
def calc_nu_byKey(region, Direction, season):
"""パラメータの値から暖房期・冷房期の方位係数の表を参照する
Args:
region(int): 地域区分
Direction(String): 方位
season(String): H'(暖房期)または'C'(冷房期)
Returns:
float: 方位係数
"""
# ノードの値と関数get_nu_H/get_nu_C内方位名を対応づける
Direction_dict = {'Top':'上面', 'N':'北', 'NE':'北東', 'E':'東', 'SE':'南東',
'S':'南', 'SW':'南西', 'W':'西', 'NW':'北西', 'Bottom':'下面'}
# 暖房期
if season == 'H':
return get_nu_H(region, Direction_dict[Direction])
# 冷房期
else:
return get_nu_C(region, Direction_dict[Direction])
def get_component_byName(wall_list, componentname):
"""名前から部位のパラメータを持つ辞書を得る
Args:
wall_list(List<dict>(Wall_direct Wall_accurate Wall_simple Wall_rc Wall_steel)): 窓を除く外皮等のリスト
componentname: 部位の名前
componentname: str
Returns:
dict(Wall_direct Wall_accurate Wall_simple Wall_rc Wall_steel): 部位のパラメータを持つ辞書
"""
for wall_i in wall_list:
if wall_i['Name'] == componentname:
return wall_i
| 30.135225
| 103
| 0.584012
|
4a180b90c0f59ac8b4f11d4b964a589629b0c702
| 5,344
|
py
|
Python
|
pkgbuilder/main.py
|
wizzard/pkgbuilder
|
bf43db3b2a5f2f1fbd0a4eb2cfda2f1036a4bdfb
|
[
"Apache-2.0"
] | null | null | null |
pkgbuilder/main.py
|
wizzard/pkgbuilder
|
bf43db3b2a5f2f1fbd0a4eb2cfda2f1036a4bdfb
|
[
"Apache-2.0"
] | null | null | null |
pkgbuilder/main.py
|
wizzard/pkgbuilder
|
bf43db3b2a5f2f1fbd0a4eb2cfda2f1036a4bdfb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import sys
import os
import logging
import argparse
from pkgbuilder.pkgtree import PkgTree
from pkgbuilder.conf import conf
from pkgbuilder.pkgdb import PkgDB
from pkgbuilder.local_dir_tree import local_dir_tree
from pkgbuilder.pkgdb import db
class App(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.pkg_tree = PkgTree()
def prep_env(self):
os.environ["PATH"] = conf["root_dir"] + "/bin:" + os.environ["PATH"]
os.environ["LD_LIBRARY_PATH"] = conf["root_dir"] + "/lib:" + conf["root_dir"] + "/lib64:" + os.environ["LD_LIBRARY_PATH"]
os.environ["PKG_CONFIG_PATH"] = conf["root_dir"] + "/lib/pkgconfig:" + conf["root_dir"] + "/lib64/pkgconfig:" + os.environ["PKG_CONFIG_PATH"]
def install(self, params=None):
"""
Install specified pkg(-s) and all dependencies
"""
if not params:
self.logger.error("No package to install specified")
return False
for p in params:
if not self.pkg_tree.get(p):
self.logger.error("Package '%s' specification not found", p)
return False
pkg = self.pkg_tree.get(p)
l_order = []
for dep in self.pkg_tree.get_dependencies(pkg, l_order):
self.logger.info("Installing %s", dep)
dep.install()
self.logger.info("Installing %s", pkg)
pkg.install()
return True
def update(self, params=None):
"""
Update specified package(-s) and all dependencies
"""
self.logger.info("Updating")
pkg_list = []
for p in params:
pkg_list.append(self.pkg_tree.get(p))
if not pkg_list:
pkg_list = self.pkg_tree.get_pkg_list()
for pkg in pkg_list:
l_order = []
for dep in self.pkg_tree.get_dependencies(pkg, l_order):
self.logger.info("Updating %s", dep)
dep.update()
self.logger.info("Updating %s", pkg)
pkg.update()
return True
def list(self, params=None):
"""
List pkgs and all dependencies
"""
self.logger.info("Listing")
self.pkg_tree.list()
def changelog(self, params=None):
"""
List changelog(-s)
"""
self.logger.info("Changelog")
pkg_list = []
for p in params:
pkg_list.append(self.pkg_tree.get(p))
if not pkg_list:
pkg_list = self.pkg_tree.get_pkg_list()
for pkg in pkg_list:
l_order = []
for dep in self.pkg_tree.get_dependencies(pkg, l_order):
dep.changelog()
pkg.changelog()
return True
def run(self):
# Parse arguments
parser = argparse.ArgumentParser(description='Package manager', usage='''pkgbuilder <action> [<args>]
Actions:
list List available packages
install [pkg1,pkg2] Install package(-s)
update [pkg1,pkg2] Update package(-s) or all packages, if no pkg is specified
''')
parser.add_argument("action", help="Command to run")
parser.add_argument("params", help="action parameter(-s)", nargs='*')
parser.add_argument("-c", "--conf", help="Path to the configuration file")
parser.add_argument("-p", "--path", help="Path to the packages directory")
parser.add_argument("-d", "--debug", help="Enable debug output", action="store_true")
parser.add_argument("--pretend", help="Don't execute any commands", action="store_true")
try:
parser_args = parser.parse_args()
except:
parser.print_help()
exit(1)
if not hasattr(self, parser_args.action):
parser.print_help()
exit(1)
if not parser_args.conf:
parser_args.conf = "pkgbuilder.conf"
try:
conf.load(parser_args.conf)
except:
print("Failed to read configuration file: {}!".format(parser_args.conf))
sys.exit(1)
lvl = logging.DEBUG
if parser_args.debug:
#lvl = logging.DEBUG
conf["debug"] = True
sys.dont_write_bytecode = True
else:
lvl = logging.INFO
conf["debug"] = False
conf["pretend"] = parser_args.pretend
# Setup logging
logging.basicConfig(level=lvl, format='[%(levelname)s] [%(name)s] %(message)s')
self.logger.info("Starting")
self.pkg_tree.set_pkgs_dir(parser_args.path)
# Open Database
try:
db.load(conf["db_path"])
except Exception as e:
print("Failed to open database: {}!".format(conf["db_path"]))
sys.exit(1)
# prepare local directories
local_dir_tree.prepare()
# load packages from file
try:
self.pkg_tree.load()
except Exception as e:
self.logger.error("Failed to load packages %s", e.args)
# load packages from database
self.pkg_tree.load_from_db()
self.prep_env()
# execute specified command
getattr(self, parser_args.action)(parser_args.params)
| 30.022472
| 149
| 0.571856
|
4a180c45d840018ce8ad19a57021b65b614fb9e7
| 14,032
|
py
|
Python
|
experiments/scaling_binning_calibrator/compare_calibrators.py
|
sdelcore/verified_calibration
|
e2f0f744d7448a0bc75e6c0d5f345f12a6828dc0
|
[
"MIT"
] | 71
|
2019-12-27T21:44:57.000Z
|
2022-03-24T03:55:20.000Z
|
experiments/scaling_binning_calibrator/compare_calibrators.py
|
AnanyaKumar/verified_calibration
|
66baa0d460a6992131927c5df19c9c037c174f04
|
[
"MIT"
] | 10
|
2020-12-11T22:21:34.000Z
|
2022-02-20T23:20:46.000Z
|
experiments/scaling_binning_calibrator/compare_calibrators.py
|
AnanyaKumar/verified_calibration
|
66baa0d460a6992131927c5df19c9c037c174f04
|
[
"MIT"
] | 16
|
2020-02-04T14:25:32.000Z
|
2022-03-05T15:43:01.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import time
import os
import calibration as cal
def eval_top_calibration(probs, eval_probs, labels):
correct = (cal.get_top_predictions(eval_probs) == labels)
data = list(zip(probs, correct))
bins = cal.get_discrete_bins(probs)
binned_data = cal.bin(data, bins)
return cal.plugin_ce(binned_data) ** 2
def eval_marginal_calibration(probs, eval_probs, labels, plugin=True):
ces = [] # Compute the calibration error per class, then take the average.
k = eval_probs.shape[1]
labels_one_hot = cal.get_labels_one_hot(np.array(labels), k)
for c in range(k):
probs_c = probs[:, c]
labels_c = labels_one_hot[:, c]
data_c = list(zip(probs_c, labels_c))
bins_c = cal.get_discrete_bins(probs_c)
binned_data_c = cal.bin(data_c, bins_c)
if plugin:
ce_c = cal.plugin_ce(binned_data_c) ** 2
else:
ce_c = cal.unbiased_square_ce(binned_data_c)
ces.append(ce_c)
return np.mean(ces)
def upper_bound_marginal_calibration_unbiased(probs, eval_probs, labels, samples=30):
data = list(zip(probs, eval_probs, labels))
def evaluator(data):
probs, eval_probs, labels = list(zip(*data))
probs, eval_probs, labels = np.array(probs), np.array(eval_probs), np.array(labels)
return eval_marginal_calibration(probs, eval_probs, labels, plugin=False)
estimate = evaluator(data)
conf_interval = cal.bootstrap_std(data, evaluator, num_samples=samples)
return estimate + 1.3 * conf_interval
def upper_bound_marginal_calibration_biased(probs, eval_probs, labels, samples=30):
data = list(zip(probs, eval_probs, labels))
def evaluator(data):
probs, eval_probs, labels = list(zip(*data))
probs, eval_probs, labels = np.array(probs), np.array(eval_probs), np.array(labels)
return eval_marginal_calibration(probs, eval_probs, labels, plugin=True)
estimate = evaluator(data)
conf_interval = cal.bootstrap_std(data, evaluator, num_samples=samples)
return estimate + 1.3 * conf_interval
def compare_calibrators(data_sampler, num_bins, Calibrators, calibration_evaluators,
eval_mse):
"""Get one sample of the calibration error and MSE for a set of calibrators.
Args:
data_sampler: A function that takes in 0 arguments
and returns calib_probs, calib_labels, eval_probs, eval_labels, mse_probs,
mse_labels, where calib_probs and calib_labels should be used by the calibrator
to calibrate, eval_probs and eval_labels should be used to measure the calibration
error, and mse_probs, mse_labels should be used to measure the mean-squared error.
num_bins: integer number of bins.
Calibrators: calibrator classes from e.g. calibrators.py.
calibration_evaluators: a list of functions. calibration_evaluators[i] takes
the output from the calibration method of calibrator i, eval_probs,
eval_labels, and returns a float representing the calibration error
(or an upper bound of it) of calibrator i. We suppose multiple calibration
evaluators because different calibrators may require different ways
of estimating/upper bounding calibration error.
eval_mse: a function that takes in the output of the calibration method,
mse_probs, mse_labels, and returns a float representing the MSE.
"""
calib_probs, calib_labels, eval_probs, eval_labels, mse_probs, mse_labels = data_sampler()
l2_ces = []
mses = []
train_time = 0.0
eval_time = 0.0
start_total = time.time()
for Calibrator, i in zip(Calibrators, range(len(Calibrators))):
calibrator = Calibrator(1, num_bins)
start_time = time.time()
calibrator.train_calibration(calib_probs, calib_labels)
train_time += (time.time() - start_time)
calibrated_probs = calibrator.calibrate(eval_probs)
start_time = time.time()
mid = calibration_evaluators[i](calibrated_probs, eval_probs, eval_labels)
eval_time += time.time() - start_time
cal_mse_probs = calibrator.calibrate(mse_probs)
mse = eval_mse(cal_mse_probs, mse_probs, mse_labels)
l2_ces.append(mid)
mses.append(mse)
# print('train_time: ', train_time)
# print('eval_time: ', eval_time)
# print('total_time: ', time.time() - start_total)
return l2_ces, mses
def average_calibration(data_sampler, num_bins, Calibrators, calibration_evaluators,
eval_mse, num_trials=100):
l2_ces, mses = [], []
for i in range(num_trials):
cur_l2_ces, cur_mses = compare_calibrators(
data_sampler, num_bins, Calibrators,
calibration_evaluators, eval_mse)
l2_ces.append(cur_l2_ces)
mses.append(cur_mses)
l2_ce_means = np.mean(l2_ces, axis=0)
l2_ce_stddevs = np.std(l2_ces, axis=0) / np.sqrt(num_trials)
mses = np.mean(mses, axis=0)
mse_stddevs = np.std(mses, axis=0) / np.sqrt(num_trials)
return l2_ce_means, l2_ce_stddevs, mses, mse_stddevs
def vary_bin_calibration(data_sampler, num_bins_list, Calibrators, calibration_evaluators,
eval_mse, num_trials=100):
ce_list = []
stddev_list = []
mse_list = []
for num_bins in num_bins_list:
l2_ce_means, l2_ce_stddevs, mses, mse_stddevs = average_calibration(
data_sampler, num_bins, Calibrators,
calibration_evaluators, eval_mse, num_trials)
ce_list.append(l2_ce_means)
stddev_list.append(l2_ce_stddevs)
mse_list.append(mses)
return np.transpose(ce_list), np.transpose(stddev_list), np.transpose(mse_list)
def plot_ces(bins_list, l2_ces, l2_ce_stddevs, save_path='marginal_ces.png'):
plt.clf()
font = {'family' : 'normal',
'size' : 16}
rc('font', **font)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# 90% confidence intervals.
error_bars_90 = 1.645 * l2_ce_stddevs
plt.errorbar(
bins_list, l2_ces[0], yerr=[error_bars_90[0], error_bars_90[0]],
barsabove=True, color='red', capsize=4, label='histogram', linestyle='--')
plt.errorbar(
bins_list, l2_ces[1], yerr=[error_bars_90[1], error_bars_90[1]],
barsabove=True, color='blue', capsize=4, label='scaling-binning')
plt.ylabel("Squared Calibration Error")
plt.xlabel("Number of Bins")
plt.ylim(bottom=0.0)
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig(save_path)
def plot_mse_ce_curve(bins_list, l2_ces, mses, xlim=None, ylim=None,
save_path='marginal_mse_vs_ces.png'):
plt.clf()
font = {'family' : 'normal',
'size' : 16}
rc('font', **font)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
def get_pareto_points(data):
pareto_points = []
def dominated(p1, p2):
return p1[0] >= p2[0] and p1[1] >= p2[1]
for datum in data:
num_dominated = sum(map(lambda x: dominated(datum, x), data))
if num_dominated == 1:
pareto_points.append(datum)
return pareto_points
print(get_pareto_points(list(zip(l2_ces[0], mses[0], bins_list))))
print(get_pareto_points(list(zip(l2_ces[1], mses[1], bins_list))))
l2ces0, mses0 = zip(*get_pareto_points(list(zip(l2_ces[0], mses[0]))))
l2ces1, mses1 = zip(*get_pareto_points(list(zip(l2_ces[1], mses[1]))))
plt.scatter(l2ces0, mses0, c='red', marker='o', label='histogram')
plt.scatter(l2ces1, mses1, c='blue', marker='x', label='scaling-binning')
plt.legend(loc='upper right')
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.xlabel("Squared Calibration Error")
plt.ylabel("Mean-Squared Error")
plt.tight_layout()
plt.savefig(save_path)
def make_calibration_data_sampler(probs, labels, num_calibration):
def data_sampler():
assert len(probs) == len(labels)
indices = np.random.choice(list(range(len(probs))),
size=num_calibration, replace=True)
calib_probs = np.array([probs[i] for i in indices])
calib_labels = np.array([labels[i] for i in indices])
eval_probs = probs
eval_labels = labels
return calib_probs, calib_labels, eval_probs, eval_labels, probs, labels
return data_sampler
def make_calibration_eval_data_sampler(probs, labels, num_calib, num_eval):
def data_sampler():
assert len(probs) == len(labels)
calib_indices = np.random.choice(
list(range(len(probs))), size=num_calib, replace=True)
eval_indices = np.random.choice(
list(range(len(probs))), size=num_eval, replace=True)
calib_probs = np.array([probs[i] for i in calib_indices])
calib_labels = np.array([labels[i] for i in calib_indices])
eval_probs = np.array([probs[i] for i in eval_indices])
eval_labels = np.array([labels[i] for i in eval_indices])
return calib_probs, calib_labels, eval_probs, eval_labels, probs, labels
return data_sampler
def cifar10_experiment_top(probs_path, ce_save_path, mse_ce_save_path, num_trials=100):
probs, labels = cal.load_test_probs_labels(probs_path)
bins_list = list(range(10, 101, 10))
num_calibration = 1000
l2_ces, l2_stddevs, mses = vary_bin_calibration(
data_sampler=make_calibration_data_sampler(probs, labels, num_calibration),
num_bins_list=bins_list,
Calibrators=[cal.HistogramTopCalibrator, cal.PlattBinnerTopCalibrator],
calibration_evaluators=[eval_top_calibration, eval_top_calibration],
eval_mse=cal.eval_top_mse,
num_trials=num_trials)
plot_mse_ce_curve(bins_list, l2_ces, mses, xlim=(0.0, 0.002), ylim=(0.0425, 0.045),
save_path=mse_ce_save_path)
plot_ces(bins_list, l2_ces, l2_stddevs, save_path=ce_save_path)
def cifar10_experiment_marginal(probs_path, ce_save_path, mse_ce_save_path, num_trials=100):
probs, labels = cal.load_test_probs_labels(probs_path)
bins_list = list(range(10, 101, 10))
num_calibration = 1000
l2_ces, l2_stddevs, mses = vary_bin_calibration(
data_sampler=make_calibration_data_sampler(probs, labels, num_calibration),
num_bins_list=bins_list,
Calibrators=[cal.HistogramMarginalCalibrator,
cal.PlattBinnerMarginalCalibrator],
calibration_evaluators=[eval_marginal_calibration, eval_marginal_calibration],
eval_mse=cal.eval_marginal_mse,
num_trials=num_trials)
plot_mse_ce_curve(bins_list, l2_ces, mses, xlim=(0.0, 0.0006), ylim=(0.04, 0.08),
save_path=mse_ce_save_path)
plot_ces(bins_list, l2_ces, l2_stddevs, save_path=ce_save_path)
def imagenet_experiment_top(probs_path, ce_save_path, mse_ce_save_path, num_trials=100):
probs, labels = cal.load_test_probs_labels(probs_path)
bins_list = list(range(10, 101, 10))
num_calibration = 1000
l2_ces, l2_stddevs, mses = vary_bin_calibration(
data_sampler=make_calibration_data_sampler(probs, labels, num_calibration),
num_bins_list=bins_list,
Calibrators=[cal.HistogramTopCalibrator, cal.PlattBinnerTopCalibrator],
calibration_evaluators=[eval_top_calibration, eval_top_calibration],
eval_mse=cal.eval_top_mse,
num_trials=num_trials)
plot_mse_ce_curve(bins_list, l2_ces, mses, save_path=mse_ce_save_path)
plot_ces(bins_list, l2_ces, l2_stddevs, save_path=ce_save_path)
def imagenet_experiment_marginal(probs_path, ce_save_path, mse_ce_save_path, num_trials=20):
probs, labels = cal.load_test_probs_labels(probs_path)
bins_list = list(range(10, 101, 10))
num_calibration = 25000
l2_ces, l2_stddevs, mses = vary_bin_calibration(
data_sampler=make_calibration_data_sampler(probs, labels, num_calibration),
num_bins_list=bins_list,
Calibrators=[cal.HistogramMarginalCalibrator,
cal.PlattBinnerMarginalCalibrator],
calibration_evaluators=[eval_marginal_calibration, eval_marginal_calibration],
eval_mse=cal.eval_marginal_mse,
num_trials=num_trials)
plot_mse_ce_curve(bins_list, l2_ces, mses, save_path=mse_ce_save_path)
plot_ces(bins_list, l2_ces, l2_stddevs, save_path=ce_save_path)
if __name__ == "__main__":
if not os.path.exists('./saved_files'):
os.mkdir('./saved_files')
if not os.path.exists('./saved_files/scaling_binning_calibrator/'):
os.mkdir('./saved_files/scaling_binning_calibrator/')
prefix = './saved_files/scaling_binning_calibrator/'
# Main marginal calibration CIFAR-10 experiment in the paper.
np.random.seed(0) # Keep results consistent.
cifar10_experiment_marginal(
probs_path='data/cifar_probs.dat',
ce_save_path=prefix+'cifar_marginal_ce_plot',
mse_ce_save_path=prefix+'cifar_marginal_mse_ce_plot')
# Top-label calibration CIFAR experiment in the Appendix, 1000 points.
np.random.seed(0) # Keep results consistent.
cifar10_experiment_top(
probs_path='data/cifar_probs.dat',
ce_save_path=prefix+'cifar_top_ce_plot',
mse_ce_save_path=prefix+'cifar_top_mse_ce_plot')
# Top-label calibration ImageNet experiment in the Appendix, 1000 points.
np.random.seed(0) # Keep results consistent.
imagenet_experiment_top(
probs_path='data/imagenet_probs.dat',
ce_save_path=prefix+'imagenet_top_ce_plot',
mse_ce_save_path=prefix+'imagenet_top_mse_ce_plot')
| 45.70684
| 95
| 0.678022
|
4a180c541d30a4f83fe78809ea6aafab906f8f8b
| 10,503
|
py
|
Python
|
velo_payments/models/payor_links_response_links.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
velo_payments/models/payor_links_response_links.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
velo_payments/models/payor_links_response_links.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PayorLinksResponseLinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'link_id': 'str',
'from_payor_id': 'str',
'link_type': 'str',
'to_payor_id': 'str'
}
attribute_map = {
'link_id': 'linkId',
'from_payor_id': 'fromPayorId',
'link_type': 'linkType',
'to_payor_id': 'toPayorId'
}
def __init__(self, link_id=None, from_payor_id=None, link_type=None, to_payor_id=None): # noqa: E501
"""PayorLinksResponseLinks - a model defined in OpenAPI""" # noqa: E501
self._link_id = None
self._from_payor_id = None
self._link_type = None
self._to_payor_id = None
self.discriminator = None
self.link_id = link_id
self.from_payor_id = from_payor_id
self.link_type = link_type
self.to_payor_id = to_payor_id
@property
def link_id(self):
"""Gets the link_id of this PayorLinksResponseLinks. # noqa: E501
:return: The link_id of this PayorLinksResponseLinks. # noqa: E501
:rtype: str
"""
return self._link_id
@link_id.setter
def link_id(self, link_id):
"""Sets the link_id of this PayorLinksResponseLinks.
:param link_id: The link_id of this PayorLinksResponseLinks. # noqa: E501
:type: str
"""
if link_id is None:
raise ValueError("Invalid value for `link_id`, must not be `None`") # noqa: E501
self._link_id = link_id
@property
def from_payor_id(self):
"""Gets the from_payor_id of this PayorLinksResponseLinks. # noqa: E501
:return: The from_payor_id of this PayorLinksResponseLinks. # noqa: E501
:rtype: str
"""
return self._from_payor_id
@from_payor_id.setter
def from_payor_id(self, from_payor_id):
"""Sets the from_payor_id of this PayorLinksResponseLinks.
:param from_payor_id: The from_payor_id of this PayorLinksResponseLinks. # noqa: E501
:type: str
"""
if from_payor_id is None:
raise ValueError("Invalid value for `from_payor_id`, must not be `None`") # noqa: E501
self._from_payor_id = from_payor_id
@property
def link_type(self):
"""Gets the link_type of this PayorLinksResponseLinks. # noqa: E501
:return: The link_type of this PayorLinksResponseLinks. # noqa: E501
:rtype: str
"""
return self._link_type
@link_type.setter
def link_type(self, link_type):
"""Sets the link_type of this PayorLinksResponseLinks.
:param link_type: The link_type of this PayorLinksResponseLinks. # noqa: E501
:type: str
"""
if link_type is None:
raise ValueError("Invalid value for `link_type`, must not be `None`") # noqa: E501
allowed_values = ["PARENT_OF"] # noqa: E501
if link_type not in allowed_values:
raise ValueError(
"Invalid value for `link_type` ({0}), must be one of {1}" # noqa: E501
.format(link_type, allowed_values)
)
self._link_type = link_type
@property
def to_payor_id(self):
"""Gets the to_payor_id of this PayorLinksResponseLinks. # noqa: E501
:return: The to_payor_id of this PayorLinksResponseLinks. # noqa: E501
:rtype: str
"""
return self._to_payor_id
@to_payor_id.setter
def to_payor_id(self, to_payor_id):
"""Sets the to_payor_id of this PayorLinksResponseLinks.
:param to_payor_id: The to_payor_id of this PayorLinksResponseLinks. # noqa: E501
:type: str
"""
if to_payor_id is None:
raise ValueError("Invalid value for `to_payor_id`, must not be `None`") # noqa: E501
self._to_payor_id = to_payor_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PayorLinksResponseLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 52.253731
| 4,651
| 0.678282
|
4a180cfac1b523b999860239b59cc4b31c468dc5
| 8,042
|
py
|
Python
|
flaml/nlp/hpo/searchalgo_auto.py
|
goncaloperes/FLAML
|
0ba58e0acecc788670a1b28f7ceb5908746ec6fc
|
[
"MIT"
] | 1
|
2021-09-09T07:36:01.000Z
|
2021-09-09T07:36:01.000Z
|
flaml/nlp/hpo/searchalgo_auto.py
|
goncaloperes/FLAML
|
0ba58e0acecc788670a1b28f7ceb5908746ec6fc
|
[
"MIT"
] | null | null | null |
flaml/nlp/hpo/searchalgo_auto.py
|
goncaloperes/FLAML
|
0ba58e0acecc788670a1b28f7ceb5908746ec6fc
|
[
"MIT"
] | 1
|
2021-12-01T16:23:19.000Z
|
2021-12-01T16:23:19.000Z
|
import itertools
from collections import OrderedDict
import ray
from ray.tune.suggest.optuna import OptunaSearch
from flaml import CFO, BlendSearch
SEARCH_ALGO_MAPPING = OrderedDict(
[
("optuna", OptunaSearch),
("cfo", CFO),
("bs", BlendSearch),
("grid", None),
("gridbert", None),
("rs", None)
]
)
class AutoSearchAlgorithm:
"""
This is a class for getting the search algorithm based on the search algorithm name
(a string variable) instantiated as one of the algorithms of the library when
created with the `~flaml.nlp.hpo.AutoSearchAlgorithm.from_method_name` method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoSearchAlgorithm is designed to be instantiated "
"using the `AutoSearchAlgorithm.from_method_name(cls, search_algo_name, search_algo_args_mode,"
" hpo_search_space, **custom_hpo_args)` methods."
)
@classmethod
def from_method_name(cls,
search_algo_name,
search_algo_args_mode,
hpo_search_space,
time_budget,
metric_name,
metric_mode_name,
**custom_hpo_args):
"""
Instantiating one of the search algorithm classes based on the search algorithm name, search algorithm
argument mode, hpo search space and other keyword args
Args:
search_algo_name:
A string variable that specifies the search algorithm name, e.g., "bs"
search_algo_args_mode:
A string variable that specifies the mode for the search algorithm args, e.g., "dft" means
initializing using the default mode
hpo_search_space:
The hpo search space
custom_hpo_args:
The customized arguments for the search algorithm (specified by user)
Example:
>>> from flaml.nlp.hpo.hpo_searchspace import AutoHPOSearchSpace
>>> search_space_hpo=AutoHPOSearchSpace.from_model_and_dataset_name("uni", "electra", "small", ["glue"], "rte")
>>> search_algo = AutoSearchAlgorithm.from_method_name("bs", "cus", search_space_hpo,
{"points_to_evaluate": [{"learning_rate": 1e-5, "num_train_epochs": 10}])
"""
assert hpo_search_space, "hpo_search_space needs to be specified for calling AutoSearchAlgorithm.from_method_name"
if not search_algo_name:
# TODO coverage
search_algo_name = "grid"
if search_algo_name in SEARCH_ALGO_MAPPING.keys():
if SEARCH_ALGO_MAPPING[search_algo_name] is None:
# TODO coverage
return None
"""
filtering the customized args for hpo from custom_hpo_args, keep those
which are in the input variable name list of the constructor of
the algorithm, remove those which does not appear in the input variables
of the constructor function
"""
this_search_algo_kwargs = None
allowed_arguments = SEARCH_ALGO_MAPPING[search_algo_name].__init__.__code__.co_varnames
allowed_custom_args = {key: custom_hpo_args[key] for key in custom_hpo_args.keys() if
key in allowed_arguments}
"""
If the search_algo_args_mode is "dft", set the args to the default args, e.g.,the default args for
BlendSearch is "low_cost_partial_config": {"num_train_epochs": min_epoch,"per_device_train_batch_size"
: max(hpo_search_space["per_device_train_batch_size"].categories)},
"""
if search_algo_args_mode == "dft":
# TODO coverage
this_search_algo_kwargs = DEFAULT_SEARCH_ALGO_ARGS_MAPPING[search_algo_name](
"dft",
metric_name,
metric_mode_name,
hpo_search_space=hpo_search_space,
**allowed_custom_args)
elif search_algo_args_mode == "cus":
this_search_algo_kwargs = DEFAULT_SEARCH_ALGO_ARGS_MAPPING[search_algo_name](
"cus",
metric_name,
metric_mode_name,
hpo_search_space=hpo_search_space,
**allowed_custom_args)
"""
returning the hpo algorithm with the arguments
"""
search_algo = SEARCH_ALGO_MAPPING[search_algo_name](**this_search_algo_kwargs)
if search_algo_name == "bs":
search_algo.set_search_properties(config={"time_budget_s": time_budget})
return search_algo
raise ValueError(
"Unrecognized method {} for this kind of AutoSearchAlgorithm: {}.\n"
"Method name should be one of {}.".format(
search_algo_name, cls.__name__, ", ".join(SEARCH_ALGO_MAPPING.keys())
)
)
@staticmethod
def grid2list(grid_config):
# TODO coverage
key_val_list = [[(key, each_val) for each_val in val_list['grid_search']]
for (key, val_list) in grid_config.items()]
config_list = [dict(x) for x in itertools.product(*key_val_list)]
return config_list
def get_search_algo_args_optuna(search_args_mode,
metric_name,
metric_mode_name,
hpo_search_space=None,
**custom_hpo_args):
# TODO coverage
return {}
def default_search_algo_args_bs(search_args_mode,
metric_name,
metric_mode_name,
hpo_search_space=None,
**custom_hpo_args):
assert hpo_search_space, "hpo_search_space needs to be specified for calling AutoSearchAlgorithm.from_method_name"
if "num_train_epochs" in hpo_search_space and \
isinstance(hpo_search_space["num_train_epochs"], ray.tune.sample.Categorical):
min_epoch = min(hpo_search_space["num_train_epochs"].categories)
else:
# TODO coverage
assert isinstance(hpo_search_space["num_train_epochs"], ray.tune.sample.Float)
min_epoch = hpo_search_space["num_train_epochs"].lower
default_search_algo_args = {
"low_cost_partial_config": {
"num_train_epochs": min_epoch,
"per_device_train_batch_size": max(hpo_search_space["per_device_train_batch_size"].categories),
},
"space": hpo_search_space,
"metric": metric_name,
"mode": metric_mode_name
}
if search_args_mode == "cus":
default_search_algo_args.update(custom_hpo_args)
return default_search_algo_args
def default_search_algo_args_grid_search(search_args_mode,
metric_name,
metric_mode_name,
hpo_search_space=None,
**custom_hpo_args):
# TODO coverage
return {}
def default_search_algo_args_random_search(search_args_mode,
metric_name,
metric_mode_name,
hpo_search_space=None,
**custom_hpo_args):
# TODO coverage
return {}
DEFAULT_SEARCH_ALGO_ARGS_MAPPING = OrderedDict(
[
("optuna", get_search_algo_args_optuna),
("cfo", default_search_algo_args_bs),
("bs", default_search_algo_args_bs),
("grid", default_search_algo_args_grid_search),
("gridbert", default_search_algo_args_random_search)
]
)
| 40.822335
| 122
| 0.593136
|
4a180d1225c3ab5182d600dcfa10ff05a2d53a5d
| 190
|
py
|
Python
|
nhc2_coco/__init__.py
|
JorisDeRieck/nhc2-coco
|
4188525c67a3bc7533b5438ef1e8eff26448c41c
|
[
"MIT"
] | null | null | null |
nhc2_coco/__init__.py
|
JorisDeRieck/nhc2-coco
|
4188525c67a3bc7533b5438ef1e8eff26448c41c
|
[
"MIT"
] | null | null | null |
nhc2_coco/__init__.py
|
JorisDeRieck/nhc2-coco
|
4188525c67a3bc7533b5438ef1e8eff26448c41c
|
[
"MIT"
] | null | null | null |
from .coco import CoCo
from .coco_entity import CoCoEntity
from .coco_light import CoCoLight
from .coco_switch import CoCoSwitch
__all__ = ['CoCo', 'CoCoEntity', 'CoCoLight', 'CoCoSwitch']
| 27.142857
| 59
| 0.784211
|
4a180d408322b5061b42a8f6d1e86d2a057f3276
| 5,301
|
py
|
Python
|
TensorFace/common/FaceQuality.py
|
bleakie/MaskInsightface
|
94511404eaa7912945fa087e6445a3608c46aaea
|
[
"Apache-2.0"
] | 269
|
2019-08-20T09:39:44.000Z
|
2022-03-12T09:45:29.000Z
|
TensorFace/common/FaceQuality.py
|
bleakie/MaskInsightface
|
94511404eaa7912945fa087e6445a3608c46aaea
|
[
"Apache-2.0"
] | 25
|
2019-08-09T03:58:03.000Z
|
2021-12-27T08:22:20.000Z
|
TensorFace/common/FaceQuality.py
|
bleakie/MaskInsightface
|
94511404eaa7912945fa087e6445a3608c46aaea
|
[
"Apache-2.0"
] | 64
|
2019-08-22T08:39:27.000Z
|
2022-03-28T14:02:46.000Z
|
#!/usr/bin/env python
# coding: utf-8
# # Face Quality Assessment for Face Verification in Video
# https://pdfs.semanticscholar.org/2c0a/caec54ab2585ff807e18b6b9550c44651eab.pdf?_ga=2.118968650.2116578973.1552199994-98267093.1547624592
import cv2
import numpy as np
# get illumination
def illumination(img, bbox):
bbox = bbox.astype(np.int)
gray = cv2.cvtColor(img[bbox[1]:bbox[3], bbox[0]:bbox[2], :], cv2.COLOR_BGR2GRAY)
# length of R available range of gray intensities excluding 5% of the darkest and brightest pixel
sorted_gray = np.sort(gray.ravel())
l = len(sorted_gray)
cut_off_idx = l * 5 // 100
r = sorted_gray[l - cut_off_idx] - sorted_gray[cut_off_idx]
return np.round(r / 255, 2)
def get_contour(pts):
return np.array([[pts[i], pts[5 + i]] for i in [0, 1, 4, 3]], np.int32).reshape((-1, 1, 2))
def get_mask(image, contour):
mask = np.zeros(image.shape[0:2], dtype="uint8")
cv2.drawContours(mask, [contour], -1, 255, -1)
return mask
# get sharpness
def sharpness(img, lmk):
x_index, y_index = [], []
for i in lmk:
x_index.append(i[0])
y_index.append(i[1])
landmark = np.append(x_index, y_index)
contour = get_contour(landmark)
mask = get_mask(img, contour) # 1-channel mask
mask = np.stack((mask,) * 3, axis=-1) # 3-channel mask
mask[mask == 255] = 1 # convert 0 and 255 to 0 and 1
laplacian = cv2.Laplacian(img, cv2.CV_64F)
edges = laplacian[mask.astype(bool)]
return np.round(edges.var() / 255, 2)
# get size
def get_size(bbox, lower_threshold = 60):
x = min(bbox[2] - bbox[0], bbox[3] - bbox[1])
if (x > lower_threshold):
return False
else:
return True
def check_large_pose(landmark, bbox):
assert landmark.shape == (5, 2)
assert len(bbox) == 4
def get_theta(base, x, y):
vx = x - base
vy = y - base
vx[1] *= -1
vy[1] *= -1
tx = np.arctan2(vx[1], vx[0])
ty = np.arctan2(vy[1], vy[0])
d = ty - tx
d = np.degrees(d)
if d < -180.0:
d += 360.
elif d > 180.0:
d -= 360.0
return d
landmark = landmark.astype(np.float32)
theta1 = get_theta(landmark[0], landmark[3], landmark[2])
theta2 = get_theta(landmark[1], landmark[2], landmark[4])
# print(va, vb, theta2)
theta3 = get_theta(landmark[0], landmark[2], landmark[1])
theta4 = get_theta(landmark[1], landmark[0], landmark[2])
theta5 = get_theta(landmark[3], landmark[4], landmark[2])
theta6 = get_theta(landmark[4], landmark[2], landmark[3])
theta7 = get_theta(landmark[3], landmark[2], landmark[0])
theta8 = get_theta(landmark[4], landmark[1], landmark[2])
# print(theta1, theta2, theta3, theta4, theta5, theta6, theta7, theta8)
left_score = 0.0
right_score = 0.0
up_score = 0.0
down_score = 0.0
if theta1 <= 0.0:
left_score = 10.0
elif theta2 <= 0.0:
right_score = 10.0
else:
left_score = theta2 / theta1
right_score = theta1 / theta2
if theta3 <= 10.0 or theta4 <= 10.0:
up_score = 10.0
else:
up_score = max(theta1 / theta3, theta2 / theta4)
if theta5 <= 10.0 or theta6 <= 10.0:
down_score = 10.0
else:
down_score = max(theta7 / theta5, theta8 / theta6)
print(left_score, right_score , up_score, down_score)
if left_score < 8 and right_score < 8 and up_score < 3 and down_score < 3:
return False
else:
return True
def over_border(img, landmark):
h, w = img.shape[:2]
xmin, xmax = min(landmark[:, 0]), max(landmark[:, 0])
ymin, ymax = min(landmark[:, 1]), max(landmark[:, 1])
if min(xmin, ymin) < 0:
return True
elif xmax > w or ymax > h:
return True
else:
return False
def faceCrop(img, maxbbox, scale_ratio=1.0):
'''
crop face from image, the scale_ratio used to control margin size around face.
using a margin, when aligning faces you will not lose information of face
'''
xmin, ymin, xmax, ymax = maxbbox
hmax, wmax, _ = img.shape
x = (xmin + xmax) / 2
y = (ymin + ymax) / 2
w = (xmax - xmin) * scale_ratio
h = (ymax - ymin) * scale_ratio
# new xmin, ymin, xmax and ymax
xmin = x - w / 2
xmax = x + w / 2
ymin = y - h / 2
ymax = y + h / 2
xmin = max(0, int(xmin))
ymin = max(0, int(ymin))
xmax = min(wmax, int(xmax))
ymax = min(hmax, int(ymax))
return [xmin, ymin, xmax, ymax]
def get_face_quality(img, face_bbox, landmark):
small_size = get_size(face_bbox) # size > 0
# score_sharpness = sharpness(img, landmark) # 0.3
# score_illumination = illumination(img, face_bbox) # 0.5
out_border = over_border(img, landmark)
large_pose = check_large_pose(landmark, face_bbox)
if small_size or out_border:
return False
elif large_pose:
return False
# elif min(score_sharpness, score_illumination) < 0.1:
# return False
else:
return True
def get_person_quality(dress_bbox, face_bbox):
face_hight = face_bbox[3]-face_bbox[1]
dress_hight = dress_bbox[3]-dress_bbox[1]
if dress_hight / face_hight < 0.8: # dress
return False
else:
return True
| 31
| 138
| 0.610074
|
4a180dd81829b9af63aa364051cf9de05e4b7d29
| 7,276
|
py
|
Python
|
demo/jupyter-notebook/parsr_client.py
|
Trafalcon/Parsr
|
d5aab6d1b4da6c37a30b25062fcaff682daa0a83
|
[
"Apache-2.0"
] | 1
|
2020-01-15T03:49:04.000Z
|
2020-01-15T03:49:04.000Z
|
demo/jupyter-notebook/parsr_client.py
|
Trafalcon/Parsr
|
d5aab6d1b4da6c37a30b25062fcaff682daa0a83
|
[
"Apache-2.0"
] | null | null | null |
demo/jupyter-notebook/parsr_client.py
|
Trafalcon/Parsr
|
d5aab6d1b4da6c37a30b25062fcaff682daa0a83
|
[
"Apache-2.0"
] | 1
|
2020-01-25T19:35:34.000Z
|
2020-01-25T19:35:34.000Z
|
#
# Copyright 2019 AXA Group Operations S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from glob import glob
from itertools import chain
import os
import sys
import json
import time
from sxsdiff import DiffCalculator
from sxsdiff.generators.github import GitHubStyledGenerator
import diff_match_patch
import pandas as pd
import requests
from io import StringIO
class ParserClient():
def __init__(self, server):
self.version_history = {}
self.set_server(server)
self.set_current_request_id("")
def __supported_input_files(self) -> list:
return ['*.pdf', '*.jpg', '*.jpeg', '*.png', '*.tiff', '*.tif',]
def set_server(self, server:str):
self.server = server
def set_current_request_id(self, request_id:str):
self.request_id = request_id
def send_document(self, file:str, config:str, server:str="", document_name:str=None, wait_till_finished:bool=False, save_request_id:bool=False) -> dict:
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
packet = {
'file': (file, open(file, 'rb'), 'application/pdf'),
'config': (config, open(config, 'rb'), 'application/json'),
}
r = requests.post('http://'+server+'/api/v1/document', files=packet)
jobId = r.text
if not document_name:
document_name = os.path.splitext(os.path.basename(file))[0]
if document_name not in self.version_history:
self.version_history[document_name] = [jobId]
else:
self.version_history[document_name].append(jobId)
if save_request_id:
self.set_current_request_id(jobId)
if not wait_till_finished:
return {'file': file, 'config': config, 'status_code': r.status_code, 'server_response': r.text}
else:
print('> Polling server for the job {}...'.format(jobId))
server_status_response = self.get_status(jobId)['server_response']
while ('progress-percentage' in server_status_response):
print('>> Progress percentage: {}'.format(server_status_response['progress-percentage']))
time.sleep(2)
server_status_response = self.get_status(jobId)['server_response']
print('>> Job done!')
return {'file': file, 'config': config, 'status_code': r.status_code, 'server_response': r.text}
def get_versions(self, document_name:str) -> list:
if document_name in self.version_history:
return self.version_history[document_name]
else:
return []
def send_documents_folder(self, folder:str, config:str, server:str="") -> list:
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
responses = []
os.chdir(folder)
files = [glob.glob(e) for e in self.__supported_input_files()]
files_flat = list(chain.from_iterable(files))
for file in files_flat:
packet = {
'file': (file, open(file, 'rb'), 'application/pdf'),
'config': (config, open(config, 'rb'), 'application/json'),
}
r = requests.post('http://'+server+'/api/v1/document', files=packet)
responses.append({'file': file, 'config': config, 'status_code': r.status_code, 'server_response': r.text})
return responses
def get_status(self, request_id:str="", server:str=""):
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
if request_id == "":
if self.request_id == "":
raise Exception('No request ID provided')
else:
request_id = self.request_id
if self.server == "":
raise Exception('No server address provided')
r = requests.get('http://{}/api/v1/queue/{}'.format(server, request_id))
return {'request_id': request_id, 'server_response': json.loads(r.text)}
def get_json(self, request_id:str="", server:str=""):
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
if request_id == "":
if self.request_id == "":
raise Exception('No request ID provided')
else:
request_id = self.request_id
r = requests.get('http://{}/api/v1/json/{}'.format(server, request_id))
if r.text != "":
return r.json()
else:
return {'request_id': request_id, 'server_response': r.json()}
def get_markdown(self, request_id:str="", server:str=""):
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
if request_id == "":
if self.request_id == "":
raise Exception('No request ID provided')
else:
request_id = self.request_id
r = requests.get('http://{}/api/v1/markdown/{}'.format(server, request_id))
if r.text != "":
return r.text
else:
return {'request_id': request_id, 'server_response': r.text}
def get_text(self, request_id:str="", server:str=""):
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
if request_id == "":
if self.request_id == "":
raise Exception('No request ID provided')
else:
request_id = self.request_id
r = requests.get('http://{}/api/v1/text/{}'.format(server, request_id))
if r.text != "":
return r.text
else:
return {'request_id': request_id, 'server_response': r.text}
def get_table(self, request_id:str="", page=None, table=None, seperator=";", server:str=""):
if server == "":
if self.server == "":
raise Exception('No server address provided')
else:
server = self.server
if request_id == "":
if self.request_id == "":
raise Exception('No request ID provided')
else:
request_id = self.request_id
if page is None and table is None:
r = requests.get('http://{}/api/v1/csv/{}'.format(server, request_id))
else:
r = requests.get('http://{}/api/v1/csv/{}/{}/{}'.format(server, request_id, page, table))
if r.text != "":
try:
df = pd.read_csv(StringIO(r.text), sep=seperator)
df.loc[:, ~df.columns.str.match('Unnamed')]
df = df.where((pd.notnull(df)), " ")
return df
except Exception as e:
return {'request_id': request_id, 'server_response': r.text}
else:
return {'request_id': request_id, 'server_response': r.text}
def compare_versions(self, request_ids:list, pretty_html:bool = False):
diffs = []
for i in range(0, len(request_ids) - 1):
request_id1 = request_ids[i]
request_id2 = request_ids[i + 1]
md1 = self.get_markdown(request_id1)
md2 = self.get_markdown(request_id2)
if pretty_html:
sxsdiff_result = DiffCalculator().run(md1, md2)
html_store = StringIO()
GitHubStyledGenerator(file=html_store).run(sxsdiff_result)
html_diff = html_store.getvalue()
diffs.append(html_diff)
else:
dmp = diff_match_patch.diff_match_patch()
diff = dmp.diff_main(md1, md2)
dmp.diff_cleanupSemantic(diff)
diffs.append(diff)
return diffs
| 33.223744
| 153
| 0.680594
|
4a180debb3510c5d4b3f456f94a782b2ebdfe053
| 12,464
|
py
|
Python
|
mlrun/db/base.py
|
george0st/mlrun
|
6467d3a5ceadf6cd35512b84b3ddc3da611cf39a
|
[
"Apache-2.0"
] | null | null | null |
mlrun/db/base.py
|
george0st/mlrun
|
6467d3a5ceadf6cd35512b84b3ddc3da611cf39a
|
[
"Apache-2.0"
] | null | null | null |
mlrun/db/base.py
|
george0st/mlrun
|
6467d3a5ceadf6cd35512b84b3ddc3da611cf39a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import warnings
from abc import ABC, abstractmethod
from typing import List, Optional, Union
from mlrun.api import schemas
from mlrun.api.schemas import ModelEndpoint
class RunDBError(Exception):
pass
class RunDBInterface(ABC):
kind = ""
@abstractmethod
def connect(self, secrets=None):
return self
@abstractmethod
def store_log(self, uid, project="", body=None, append=False):
pass
@abstractmethod
def get_log(self, uid, project="", offset=0, size=0):
pass
@abstractmethod
def store_run(self, struct, uid, project="", iter=0):
pass
@abstractmethod
def update_run(self, updates: dict, uid, project="", iter=0):
pass
@abstractmethod
def abort_run(self, uid, project="", iter=0):
pass
@abstractmethod
def read_run(self, uid, project="", iter=0):
pass
@abstractmethod
def list_runs(
self,
name="",
uid=None,
project="",
labels=None,
state="",
sort=True,
last=0,
iter=False,
start_time_from: datetime.datetime = None,
start_time_to: datetime.datetime = None,
last_update_time_from: datetime.datetime = None,
last_update_time_to: datetime.datetime = None,
partition_by: Union[schemas.RunPartitionByField, str] = None,
rows_per_partition: int = 1,
partition_sort_by: Union[schemas.SortField, str] = None,
partition_order: Union[schemas.OrderType, str] = schemas.OrderType.desc,
max_partitions: int = 0,
):
pass
@abstractmethod
def del_run(self, uid, project="", iter=0):
pass
@abstractmethod
def del_runs(self, name="", project="", labels=None, state="", days_ago=0):
pass
@abstractmethod
def store_artifact(self, key, artifact, uid, iter=None, tag="", project=""):
pass
@abstractmethod
def read_artifact(self, key, tag="", iter=None, project=""):
pass
@abstractmethod
def list_artifacts(
self,
name="",
project="",
tag="",
labels=None,
since=None,
until=None,
iter: int = None,
best_iteration: bool = False,
kind: str = None,
category: Union[str, schemas.ArtifactCategories] = None,
):
pass
@abstractmethod
def del_artifact(self, key, tag="", project=""):
pass
@abstractmethod
def del_artifacts(self, name="", project="", tag="", labels=None):
pass
# TODO: Make these abstract once filedb implements them
def store_metric(self, uid, project="", keyvals=None, timestamp=None, labels=None):
warnings.warn("store_metric not implemented yet")
def read_metric(self, keys, project="", query=""):
warnings.warn("store_metric not implemented yet")
@abstractmethod
def store_function(self, function, name, project="", tag="", versioned=False):
pass
@abstractmethod
def get_function(self, name, project="", tag="", hash_key=""):
pass
@abstractmethod
def delete_function(self, name: str, project: str = ""):
pass
@abstractmethod
def list_functions(self, name=None, project="", tag="", labels=None):
pass
@abstractmethod
def delete_project(
self,
name: str,
deletion_strategy: schemas.DeletionStrategy = schemas.DeletionStrategy.default(),
):
pass
@abstractmethod
def store_project(
self,
name: str,
project: schemas.Project,
) -> schemas.Project:
pass
@abstractmethod
def patch_project(
self,
name: str,
project: dict,
patch_mode: schemas.PatchMode = schemas.PatchMode.replace,
) -> schemas.Project:
pass
@abstractmethod
def create_project(
self,
project: schemas.Project,
) -> schemas.Project:
pass
@abstractmethod
def list_projects(
self,
owner: str = None,
format_: schemas.ProjectsFormat = schemas.ProjectsFormat.full,
labels: List[str] = None,
state: schemas.ProjectState = None,
) -> schemas.ProjectsOutput:
pass
@abstractmethod
def get_project(self, name: str) -> schemas.Project:
pass
@abstractmethod
def list_artifact_tags(self, project=None):
pass
@abstractmethod
def create_feature_set(
self, feature_set: Union[dict, schemas.FeatureSet], project="", versioned=True
) -> dict:
pass
@abstractmethod
def get_feature_set(
self, name: str, project: str = "", tag: str = None, uid: str = None
) -> dict:
pass
@abstractmethod
def list_features(
self,
project: str,
name: str = None,
tag: str = None,
entities: List[str] = None,
labels: List[str] = None,
) -> schemas.FeaturesOutput:
pass
@abstractmethod
def list_entities(
self,
project: str,
name: str = None,
tag: str = None,
labels: List[str] = None,
) -> schemas.EntitiesOutput:
pass
@abstractmethod
def list_feature_sets(
self,
project: str = "",
name: str = None,
tag: str = None,
state: str = None,
entities: List[str] = None,
features: List[str] = None,
labels: List[str] = None,
partition_by: Union[schemas.FeatureStorePartitionByField, str] = None,
rows_per_partition: int = 1,
partition_sort_by: Union[schemas.SortField, str] = None,
partition_order: Union[schemas.OrderType, str] = schemas.OrderType.desc,
) -> List[dict]:
pass
@abstractmethod
def store_feature_set(
self,
feature_set: Union[dict, schemas.FeatureSet],
name=None,
project="",
tag=None,
uid=None,
versioned=True,
):
pass
@abstractmethod
def patch_feature_set(
self,
name,
feature_set: dict,
project="",
tag=None,
uid=None,
patch_mode: Union[str, schemas.PatchMode] = schemas.PatchMode.replace,
):
pass
@abstractmethod
def delete_feature_set(self, name, project="", tag=None, uid=None):
pass
@abstractmethod
def create_feature_vector(
self,
feature_vector: Union[dict, schemas.FeatureVector],
project="",
versioned=True,
) -> dict:
pass
@abstractmethod
def get_feature_vector(
self, name: str, project: str = "", tag: str = None, uid: str = None
) -> dict:
pass
@abstractmethod
def list_feature_vectors(
self,
project: str = "",
name: str = None,
tag: str = None,
state: str = None,
labels: List[str] = None,
partition_by: Union[schemas.FeatureStorePartitionByField, str] = None,
rows_per_partition: int = 1,
partition_sort_by: Union[schemas.SortField, str] = None,
partition_order: Union[schemas.OrderType, str] = schemas.OrderType.desc,
) -> List[dict]:
pass
@abstractmethod
def store_feature_vector(
self,
feature_vector: Union[dict, schemas.FeatureVector],
name=None,
project="",
tag=None,
uid=None,
versioned=True,
):
pass
@abstractmethod
def patch_feature_vector(
self,
name,
feature_vector_update: dict,
project="",
tag=None,
uid=None,
patch_mode: Union[str, schemas.PatchMode] = schemas.PatchMode.replace,
):
pass
@abstractmethod
def delete_feature_vector(self, name, project="", tag=None, uid=None):
pass
@abstractmethod
def list_pipelines(
self,
project: str,
namespace: str = None,
sort_by: str = "",
page_token: str = "",
filter_: str = "",
format_: Union[
str, schemas.PipelinesFormat
] = schemas.PipelinesFormat.metadata_only,
page_size: int = None,
) -> schemas.PipelinesOutput:
pass
@abstractmethod
def create_project_secrets(
self,
project: str,
provider: Union[
str, schemas.SecretProviderName
] = schemas.SecretProviderName.kubernetes,
secrets: dict = None,
):
pass
@abstractmethod
def list_project_secrets(
self,
project: str,
token: str,
provider: Union[
str, schemas.SecretProviderName
] = schemas.SecretProviderName.kubernetes,
secrets: List[str] = None,
) -> schemas.SecretsData:
pass
@abstractmethod
def list_project_secret_keys(
self,
project: str,
provider: Union[
str, schemas.SecretProviderName
] = schemas.SecretProviderName.kubernetes,
token: str = None,
) -> schemas.SecretKeysData:
pass
@abstractmethod
def delete_project_secrets(
self,
project: str,
provider: Union[
str, schemas.SecretProviderName
] = schemas.SecretProviderName.kubernetes,
secrets: List[str] = None,
):
pass
@abstractmethod
def create_user_secrets(
self,
user: str,
provider: Union[
str, schemas.SecretProviderName
] = schemas.SecretProviderName.vault,
secrets: dict = None,
):
pass
@abstractmethod
def create_or_patch_model_endpoint(
self,
project: str,
endpoint_id: str,
model_endpoint: ModelEndpoint,
access_key: Optional[str] = None,
):
pass
@abstractmethod
def delete_model_endpoint_record(
self, project: str, endpoint_id: str, access_key: Optional[str] = None
):
pass
@abstractmethod
def list_model_endpoints(
self,
project: str,
model: Optional[str] = None,
function: Optional[str] = None,
labels: List[str] = None,
start: str = "now-1h",
end: str = "now",
metrics: Optional[List[str]] = None,
access_key: Optional[str] = None,
):
pass
@abstractmethod
def get_model_endpoint(
self,
project: str,
endpoint_id: str,
start: Optional[str] = None,
end: Optional[str] = None,
metrics: Optional[List[str]] = None,
features: bool = False,
access_key: Optional[str] = None,
):
pass
@abstractmethod
def create_marketplace_source(
self, source: Union[dict, schemas.IndexedMarketplaceSource]
):
pass
@abstractmethod
def store_marketplace_source(
self, source_name: str, source: Union[dict, schemas.IndexedMarketplaceSource]
):
pass
@abstractmethod
def list_marketplace_sources(self):
pass
@abstractmethod
def get_marketplace_source(self, source_name: str):
pass
@abstractmethod
def delete_marketplace_source(self, source_name: str):
pass
@abstractmethod
def get_marketplace_catalog(
self,
source_name: str,
channel: str = None,
version: str = None,
tag: str = None,
force_refresh: bool = False,
):
pass
@abstractmethod
def get_marketplace_item(
self,
source_name: str,
item_name: str,
channel: str = "development",
version: str = None,
tag: str = "latest",
force_refresh: bool = False,
):
pass
@abstractmethod
def verify_authorization(
self, authorization_verification_input: schemas.AuthorizationVerificationInput
):
pass
| 25.078471
| 89
| 0.593389
|
4a180f790192ef78e33822d7fc13a9ca9f33ecf0
| 3,190
|
py
|
Python
|
DesignSpaceEditor.roboFontExt/lib/designSpaceEditorSettings.py
|
andyclymer/designSpaceRoboFontExtension
|
6bd0f7a5becbb465e4eeef71d33faab5659273e9
|
[
"MIT"
] | null | null | null |
DesignSpaceEditor.roboFontExt/lib/designSpaceEditorSettings.py
|
andyclymer/designSpaceRoboFontExtension
|
6bd0f7a5becbb465e4eeef71d33faab5659273e9
|
[
"MIT"
] | null | null | null |
DesignSpaceEditor.roboFontExt/lib/designSpaceEditorSettings.py
|
andyclymer/designSpaceRoboFontExtension
|
6bd0f7a5becbb465e4eeef71d33faab5659273e9
|
[
"MIT"
] | null | null | null |
from defconAppKit.windows.baseWindow import BaseWindowController
from mojo.extensions import getExtensionDefault, setExtensionDefault, ExtensionBundle
from vanilla import *
defaultOptions = {
"instanceFolderName": "instances",
}
settingsIdentifier = "com.letterror.designspaceeditor"
def updateWithDefaultValues(data, defaults):
for key, value in defaults.items():
if key in data:
continue
data[key] = value
class Settings(BaseWindowController):
identifier = "%s.%s" % (settingsIdentifier, "general")
def __init__(self, parentWindow, callback=None):
self.doneCallback = callback
data = getExtensionDefault(self.identifier, dict())
updateWithDefaultValues(data, defaultOptions)
width = 380
height = 1000
self.w = Sheet((width, height), parentWindow=parentWindow)
y = 10
self.w.instanceFolderNameEdit = EditText((160, y, -10, 20), data['instanceFolderName'], sizeStyle="small")
self.w.instanceFolderNameCaption = TextBox((10, y+3, 180, 20), "Instance folder name", sizeStyle="small")
# self.w.threaded = CheckBox((10, y, -10, 22), "Threaded", value=data["threaded"])
y += 30
# self.w.exportInFolders = CheckBox((10, y, -10, 22), "Export in Sub Folders", value=data["exportInFolders"])
y += 30
# self.w.keepFileNames = CheckBox((10, y, -10, 22), "Keep file names (otherwise use familyName-styleName)", value=data["keepFileNames"])
y += 35
self.w.saveButton = Button((-100, y, -10, 20), "Save settings", callback=self.saveCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.saveButton)
self.w.closeButton = Button((-190, y, -110, 20), "Cancel", callback=self.closeCallback, sizeStyle="small")
self.w.closeButton.bind(".", ["command"])
self.w.closeButton.bind(unichr(27), [])
self.w.resetButton = Button((-280, y, -200, 20), "Reset", callback=self.resetCallback, sizeStyle="small")
y += 30
self.w.resize(width, y, False)
self.w.open()
def resetCallback(self, sender):
self.w.instanceFolderName = "instances"
self.w.instanceFolderNameEdit.set(self.w.instanceFolderName)
#self.w.threaded.set(defaultOptions["threaded"])
#self.w.exportInFolders.set(defaultOptions["exportInFolders"])
def saveCallback(self, sender):
data = {
"instanceFolderName": self.w.instanceFolderNameEdit.get(),
#"exportInFolders": self.w.exportInFolders.get(),
#"keepFileNames": self.w.keepFileNames.get()
}
setExtensionDefault(self.identifier, data)
self.closeCallback(sender)
def closeCallback(self, sender):
if self.doneCallback is not None:
self.doneCallback(self)
self.w.close()
if __name__ == "__main__":
class TestWindow(BaseWindowController):
def __init__(self):
# a test window to attach the settings sheet to
self.instanceFolderName = "Aaaaa"
self.w = Window((500, 500), "Test")
self.w.open()
Settings(self.w)
w = TestWindow()
| 37.093023
| 144
| 0.641379
|
4a181020bdf1187f22148f96a2f2ce9fe5916c4e
| 802
|
py
|
Python
|
app/base/forms.py
|
kcinnoy/msi_2
|
8d9d26fb1bb542e7e6700ca1cb3122bb56860f3c
|
[
"MIT"
] | null | null | null |
app/base/forms.py
|
kcinnoy/msi_2
|
8d9d26fb1bb542e7e6700ca1cb3122bb56860f3c
|
[
"MIT"
] | null | null | null |
app/base/forms.py
|
kcinnoy/msi_2
|
8d9d26fb1bb542e7e6700ca1cb3122bb56860f3c
|
[
"MIT"
] | 1
|
2021-07-02T17:10:38.000Z
|
2021-07-02T17:10:38.000Z
|
# -*- encoding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 - present AppSeed.us
"""
from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField
from wtforms.validators import InputRequired, Email, DataRequired
## login and registration
class LoginForm(FlaskForm):
username = TextField ('Username', id='username_login' , validators=[DataRequired()])
password = PasswordField('Password', id='pwd_login' , validators=[DataRequired()])
class CreateAccountForm(FlaskForm):
username = TextField('Username' , id='username_create' , validators=[DataRequired()])
email = TextField('Email' , id='email_create' , validators=[DataRequired(), Email()])
password = PasswordField('Password' , id='pwd_create' , validators=[DataRequired()])
| 38.190476
| 102
| 0.700748
|
4a1810516bed5018f2e0c98a0ab2e086b36788a7
| 152
|
py
|
Python
|
mayan/apps/appearance/literals.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 4
|
2021-09-02T00:16:30.000Z
|
2021-09-09T22:25:15.000Z
|
mayan/apps/appearance/literals.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 86
|
2021-09-01T23:53:02.000Z
|
2021-09-20T02:25:10.000Z
|
mayan/apps/appearance/literals.py
|
nattangwiwat/Mayan-EDMS-recitation
|
fcf16afb56eae812fb99144d65ae1ae6749de0b7
|
[
"Apache-2.0"
] | 70
|
2021-09-01T12:54:51.000Z
|
2022-02-16T00:53:18.000Z
|
COMMENT_APP_TEMPLATE_CACHE_DISABLE = '{# appearance_app_template_nocache #}'
DEFAULT_MAXIMUM_TITLE_LENGTH = 120
DEFAULT_MESSAGE_POSITION = 'top-right'
| 30.4
| 76
| 0.842105
|
4a1811ea23d3c3b31a92ca01a9391ebf16461e4f
| 2,154
|
py
|
Python
|
Project-5/src/naiveBayes.py
|
TooSchoolForCool/EE219-Larger-Scale-Data-Mining
|
9a42c88169ace88f9b652d0e174c7f641fcc522e
|
[
"Apache-2.0"
] | null | null | null |
Project-5/src/naiveBayes.py
|
TooSchoolForCool/EE219-Larger-Scale-Data-Mining
|
9a42c88169ace88f9b652d0e174c7f641fcc522e
|
[
"Apache-2.0"
] | 12
|
2020-01-28T22:09:15.000Z
|
2022-03-11T23:16:26.000Z
|
Project-5/src/naiveBayes.py
|
TooSchoolForCool/EE219-Larger-Scale-Data-Mining
|
9a42c88169ace88f9b652d0e174c7f641fcc522e
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
#######################################################################
# Multinomial Naive Bayes Classifier
#######################################################################
class NaiveBayes(object):
#######################################################################
# Constructor
#
# model_type:
# binary -> 2-class classification
#######################################################################
def __init__(self, model_type='binary'):
self.nb_ = MultinomialNB()
#######################################################################
# Model Training function
# Input:
# x:
# feature vector
# [[x, ..., x], [x, ..., x], ..., [x, ..., x]]
# y:
# groud-truth label vector
# [y1, y2, ..., yn]
#######################################################################
def train(self, x, y):
self.nb_.fit(x, y)
#######################################################################
# Model Prediction Function
# Input:
# x:
# feature vector data set
# [[x, ..., x], [x, ..., x], ..., [x, ..., x]]
#
# Output:
# predicted_y:
# predicted label vector
# [y1, y2, y3, ..., yn]
#######################################################################
def predict(self, x):
predicted_y = self.nb_.predict(x)
return predicted_y
#######################################################################
# Get predicted y score
# Input:
# x:
# feature vector data set
# type: Pandas DataFrame (n * p dimension)
#
# Output:
# Distance of the samples X to the separating hyperplane.
#######################################################################
def predictScore(self, x):
predicted_prob = self.nb_.predict_proba(x)
return predicted_prob[:, 1]
def main():
pass
if __name__ == '__main__':
main()
| 31.217391
| 75
| 0.353296
|
4a1812759ec6f450881e2a94a406999b4c5c646c
| 1,409
|
py
|
Python
|
nipype/interfaces/spm/tests/test_auto_SliceTiming.py
|
nicholsn/nipype
|
6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3
|
[
"BSD-3-Clause"
] | 1
|
2018-04-18T12:13:37.000Z
|
2018-04-18T12:13:37.000Z
|
nipype/interfaces/spm/tests/test_auto_SliceTiming.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/spm/tests/test_auto_SliceTiming.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T14:31:47.000Z
|
2021-09-08T14:31:47.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.spm.preprocess import SliceTiming
def test_SliceTiming_inputs():
input_map = dict(ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(copyfile=False,
field='scans',
mandatory=True,
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
num_slices=dict(field='nslices',
mandatory=True,
),
out_prefix=dict(field='prefix',
usedefault=True,
),
paths=dict(),
ref_slice=dict(field='refslice',
mandatory=True,
),
slice_order=dict(field='so',
mandatory=True,
),
time_acquisition=dict(field='ta',
mandatory=True,
),
time_repetition=dict(field='tr',
mandatory=True,
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
)
inputs = SliceTiming.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SliceTiming_outputs():
output_map = dict(timecorrected_files=dict(),
)
outputs = SliceTiming.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 25.618182
| 78
| 0.657913
|
4a181329db5117d86e7b73ca75ca1330f9505c7c
| 861
|
py
|
Python
|
main/acl/template.py
|
matu3ba/cports
|
deab766f255539c3679b78706ec4d194bc019dc9
|
[
"BSD-2-Clause"
] | null | null | null |
main/acl/template.py
|
matu3ba/cports
|
deab766f255539c3679b78706ec4d194bc019dc9
|
[
"BSD-2-Clause"
] | null | null | null |
main/acl/template.py
|
matu3ba/cports
|
deab766f255539c3679b78706ec4d194bc019dc9
|
[
"BSD-2-Clause"
] | null | null | null |
pkgname = "acl"
pkgver = "2.3.1"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
f"--libdir=/usr/lib",
f"--libexecdir=/usr/lib"
]
hostmakedepends = ["pkgconf"]
makedepends = ["attr-devel"]
checkdepends = ["perl"]
pkgdesc = "Access Control List filesystem support"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-or-later"
url = "https://savannah.nongnu.org/projects/acl"
source = f"$(NONGNU_SITE)/acl/acl-{pkgver}.tar.gz"
sha256 = "760c61c68901b37fdd5eefeeaf4c0c7a26bdfdd8ac747a1edff1ce0e243c11af"
# test suite makes assumptions about a GNU environment
options = ["bootstrap", "!check"]
@subpackage("acl-devel")
def _devel(self):
self.depends += ["attr-devel"]
return self.default_devel(extra = ["usr/share/man/man5"])
@subpackage("acl-progs")
def _progs(self):
return self.default_progs(extra = ["usr/share"])
| 28.7
| 75
| 0.710801
|
4a1813611d167a21bb9064ae9e3ccd025e4ca613
| 6,615
|
py
|
Python
|
src/stactools/sentinel1/rtc_metadata.py
|
scottyhq/sentinel1
|
772c3145c2359a0f4115687df519d5e04f7b8c56
|
[
"Apache-2.0"
] | null | null | null |
src/stactools/sentinel1/rtc_metadata.py
|
scottyhq/sentinel1
|
772c3145c2359a0f4115687df519d5e04f7b8c56
|
[
"Apache-2.0"
] | null | null | null |
src/stactools/sentinel1/rtc_metadata.py
|
scottyhq/sentinel1
|
772c3145c2359a0f4115687df519d5e04f7b8c56
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Optional
import pystac
from pystac.utils import str_to_datetime
import rasterio
import rasterio.features
from rasterio import Affine as A
from rasterio.warp import transform_geom
from shapely.geometry import mapping, shape
import numpy as np
import os
import json
import logging
logger = logging.getLogger(__name__)
class RTCMetadata:
def __init__(self, href, asset):
self.href = href
self.asset = asset
def _load_metadata_from_asset(scale=1, precision=5):
''' key metadata stored in Geotiff tags '''
with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',
GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):
with rasterio.open(os.path.join(href, self.asset)) as src:
metadata = src.profile
metadata.update(src.tags())
# other useful things that aren't already keys in src.profile
metadata['PROJ_BBOX'] = list(src.bounds)
metadata['SHAPE'] = src.shape
bbox, footprint = _get_geometries(src, scale, precision)
return metadata, bbox, footprint
def _get_geometries(src, scale, precision):
''' scale can be 1,2,4,8,16. scale=1 creates most precise footprint
at the expense of reading all pixel values. scale=2 reads 1/4 amount
of data be overestimates footprint by at least 1pixel (20 meters).
'''
with rasterio.vrt.WarpedVRT(src, crs='EPSG:4326') as vrt:
bbox = [np.round(x, decimals=precision) for x in vrt.bounds]
arr = src.read(1,
out_shape=(src.height // scale, src.width // scale))
arr[np.where(arr != 0)] = 1
transform = src.transform * A.scale(scale)
# Get polygon covering entire valid data region
rioshapes = rasterio.features.shapes(arr, transform=transform)
max_perimeter = 0
max_geometry = None
for geom, val in rioshapes:
if val == 1:
geometry = shape(geom)
if geometry.length > max_perimeter:
max_perimeter = geometry.length
max_geometry = geometry
valid_geom = mapping(max_geometry.convex_hull)
footprint = transform_geom(src.crs,
"EPSG:4326",
valid_geom,
precision=precision)
return bbox, footprint
def _get_provenance():
''' RTC products are from mosaiced GRD frames '''
# NOTE: just GRD frame names? or additional info, like IPF from manifest.safe
# <safe:software name="Sentinel-1 IPF" version="002.72"/>
grd_ids = []
for i in range(1, int(self.metadata['NUMBER_SCENES']) + 1):
m = json.loads(self.metadata[f'SCENE_{i}_METADATA'])
grd_ids.append(m['title'])
return grd_ids
def _get_times():
''' UTC start and end times of GRDs used in RTC product '''
times = []
for i in range(1, int(self.metadata['NUMBER_SCENES']) + 1):
m = json.loads(self.metadata[f'SCENE_{i}_METADATA'])
times += [m['start_time'], m['end_time']]
start = str_to_datetime(min(times))
end = str_to_datetime(max(times))
mid = start + (end - start) / 2
return start, mid, end
self.metadata, self.bbox, self.geometry = _load_metadata_from_asset()
self.grd_ids = _get_provenance()
self.start_datetime, self.datetime, self.end_datetime = _get_times()
@property
def product_id(self) -> str:
date = self.metadata['DATE'].replace('-', '')
orbNames = {'ascending': 'ASC', 'descending': 'DSC'}
orb = orbNames[self.metadata['ORBIT_DIRECTION']]
id = f"{self.metadata['MISSION_ID']}_{date}_{self.metadata['TILE_ID']}_{orb}"
return id
@property
def image_media_type(self) -> str:
return pystac.MediaType.COG
@property
def shape(self) -> List[int]:
return self.metadata['SHAPE']
@property
def image_paths(self) -> List[str]:
return ['Gamma0_VV.tif', 'Gamma0_VH.tif', 'local_incident_angle.tif']
@property
def absolute_orbit(self) -> Optional[int]:
return int(self.metadata['ABSOLUTE_ORBIT_NUMBER'])
@property
def relative_orbit(self) -> Optional[int]:
'''https://forum.step.esa.int/t/sentinel-1-relative-orbit-from-filename/7042 '''
adjust = {'S1B': 27, 'S1A': 73}
rel_orbit = (
(self.absolute_orbit - adjust[self.metadata['MISSION_ID']]) %
175) + 1
return rel_orbit
@property
def orbit_state(self) -> Optional[str]:
return self.metadata['ORBIT_DIRECTION']
@property
def platform(self) -> Optional[str]:
platformMap = dict(S1A='sentinel-1a', S1B='sentinel-1b')
return platformMap[self.metadata['MISSION_ID']]
@property
def proj_bbox(self) -> Optional[str]:
return self.metadata['PROJ_BBOX']
@property
def epsg(self) -> Optional[str]:
return self.metadata['crs'].to_epsg()
@property
def metadata_dict(self):
''' match s2 l2a cogs from https://earth-search.aws.element84.com/v0 '''
sentinel_metadata = {
'sentinel:mgrs': self.metadata['TILE_ID'],
'sentinel:utm_zone': self.metadata['TILE_ID'][:2],
'sentinel:latitude_band': self.metadata['TILE_ID'][2],
'sentinel:grid_square': self.metadata['TILE_ID'][3:],
'sentinel:product_ids': self.grd_ids,
'sentinel:data_coverage': self.metadata['VALID_PIXEL_PERCENT'],
}
return sentinel_metadata
@property
def asset_dict(self):
''' map image_path (geotif) to pystac.Asset fields '''
asset_dict = {
'Gamma0_VV.tif':
dict(key='gamma0_vv',
title='Gamma0 VV backscatter',
roles=['data', 'gamma0']),
'Gamma0_VH.tif':
dict(key='gamma0_vh',
title='Gamma0 VH backscatter',
roles=['data', 'gamma0']),
'local_incident_angle.tif':
dict(key='incidence',
title='Local incidence angle',
roles=['data', 'local-incidence-angle'])
}
return asset_dict
| 37.162921
| 89
| 0.570824
|
4a181452ac41a30ad622005bf73d418e82777492
| 56
|
py
|
Python
|
pyit/lint.py
|
ysv/pyit
|
681535dd162613ee4ab8bb55216f0770e596f82e
|
[
"MIT"
] | null | null | null |
pyit/lint.py
|
ysv/pyit
|
681535dd162613ee4ab8bb55216f0770e596f82e
|
[
"MIT"
] | null | null | null |
pyit/lint.py
|
ysv/pyit
|
681535dd162613ee4ab8bb55216f0770e596f82e
|
[
"MIT"
] | null | null | null |
class Lint:
def __init__(self, file):
pass
| 11.2
| 29
| 0.571429
|
4a18159f101c4af8d40b8351bb422ee08d709391
| 3,392
|
py
|
Python
|
jarvis.py
|
royhunter/JarvisControl
|
0203dab0a647174253797b53d7f7329ac928acb2
|
[
"MIT"
] | null | null | null |
jarvis.py
|
royhunter/JarvisControl
|
0203dab0a647174253797b53d7f7329ac928acb2
|
[
"MIT"
] | null | null | null |
jarvis.py
|
royhunter/JarvisControl
|
0203dab0a647174253797b53d7f7329ac928acb2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""jarvis.py
"""
import asyncore
import ctypes
import json
import socket
import struct
from common import hacker, message
USERNAME = ''
PASSWORD = ''
class JarvisAgent(asyncore.dispatcher):
"""JarvisAgent
"""
def __init__(self, host, token):
asyncore.dispatcher.__init__(self)
self.buffer = None
self.hacker = hacker.JarvisHacker(USERNAME, PASSWORD)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, 6000))
self.jarvis_agent_register(token)
def handle_connect(self):
"""handle_connect
"""
pass
def handle_close(self):
self.close()
def handle_read(self):
data = self.recv(ctypes.sizeof(message.MsgHeader))
msgtype, msglen = struct.unpack('!HH', data)
if msglen > ctypes.sizeof(message.MsgHeader):
self.recv(msglen - ctypes.sizeof(message.MsgHeader))
self.msg_dispatcher(msgtype)
def writable(self):
return len(self.buffer) > 0
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
def jarvis_agent_register(self, token):
"""registerJarvisAgent
"""
body = struct.pack("!L", token)
msg = message.ProxyMsg(message.MESSAGE_TYPE_TOKEN, body)
self.buffer = msg.str()
def msg_dispatcher(self, msgtype):
"""msg_dispatcher
"""
if msgtype == message.MESSAGE_TYPE_TESTBED_RENEW:
self.jarvis_tb_renew()
elif msgtype == message.MESSAGE_TYPE_TESTBED_DELETE:
self.jarvis_tb_delete()
self.jarvis_tb_list()
def jarvis_tb_list(self):
"""jarvis_tb_list
"""
print "jarvisTestbedList"
#self.hacker.jarvis_login()
result = self.hacker.jarvis_testbed_list()
topo_list = self.jarvis_tb_parser(result)
if topo_list is None:
msg = message.ProxyMsg(message.MESSAGE_TYPE_TESTBED_LIST_ACK, None)
self.buffer = msg.str()
else:
body = ''
for topo in topo_list:
print topo[0]
print topo[1]
body = body + struct.pack(message.TopologyInfo.TESTBED_NAME_FMT,
topo[1],
str(topo[0]))
msg = message.ProxyMsg(message.MESSAGE_TYPE_TESTBED_LIST_ACK, body)
self.buffer = msg.str()
def jarvis_tb_renew(self):
"""jarvis_tb_renew
"""
pass
def jarvis_tb_delete(self):
"""jarvis_tb_delete
"""
pass
def jarvis_tb_parser(self, info):
""" jarvis_tb_parser
return [ [id, expiry], [id, expiry]...]
"""
json_obj = json.loads(info.decode('string-escape').strip('"'))
if len(json_obj) == 0:
return None
topo_list = []
for topology in json_obj:
topo = []
topo.append(topology["id"])
topo.append(topology["lease_expiry"])
topo_list.append(topo)
if len(topo_list) == 0:
return None
return topo_list
def jarvis_main():
"""1. username
2. passwd
3. token
"""
jarvis = JarvisAgent('localhost', 132)
asyncore.loop()
if __name__ == "__main__":
jarvis_main()
| 26.5
| 80
| 0.574882
|
4a1815ce58e31d77de13c6c4063d45744fc989b9
| 1,256
|
py
|
Python
|
view/palettes/dark.py
|
AWhiteFox/questwriter
|
129776eb99de943cb279f276d9c6bff7135fb309
|
[
"MIT"
] | 1
|
2021-11-01T12:55:21.000Z
|
2021-11-01T12:55:21.000Z
|
view/palettes/dark.py
|
AWhiteFox/questwriter
|
129776eb99de943cb279f276d9c6bff7135fb309
|
[
"MIT"
] | null | null | null |
view/palettes/dark.py
|
AWhiteFox/questwriter
|
129776eb99de943cb279f276d9c6bff7135fb309
|
[
"MIT"
] | null | null | null |
from PyQt5.QtGui import QPalette, QColor
class DarkPalette(QPalette):
def __init__(self):
super().__init__()
black = QColor('#313335')
gray = QColor('#3C3F41')
primary = QColor('#4B6EAF')
white = QColor('#FFFFFF')
self.setColor(QPalette.Window, gray)
self.setColor(QPalette.WindowText, white)
self.setColor(QPalette.Base, black)
self.setColor(QPalette.AlternateBase, gray)
self.setColor(QPalette.ToolTipBase, primary)
self.setColor(QPalette.ToolTipText, white)
self.setColor(QPalette.Text, white)
self.setColor(QPalette.Button, gray)
self.setColor(QPalette.ButtonText, white)
self.setColor(QPalette.Link, primary)
self.setColor(QPalette.Highlight, primary)
self.setColor(QPalette.HighlightedText, white)
self.setColor(QPalette.Active, QPalette.Button, black)
self.setColor(QPalette.Disabled, QPalette.Base, gray)
self.setColor(QPalette.Disabled, QPalette.ButtonText, white.darker())
self.setColor(QPalette.Disabled, QPalette.WindowText, gray)
self.setColor(QPalette.Disabled, QPalette.Text, white.darker())
self.setColor(QPalette.Disabled, QPalette.Light, black)
| 39.25
| 77
| 0.679936
|
4a18167f5d0a6d2473103e455e7ea29f96e30975
| 13,131
|
py
|
Python
|
mkt/stats/views.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/stats/views.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/stats/views.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
from django import http
import commonware
import requests
from rest_framework.exceptions import ParseError
from rest_framework.generics import ListAPIView
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from rest_framework.views import APIView
import amo
from lib.metrics import get_monolith_client
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AllowAppOwner, AnyOf, GroupPermission
from mkt.api.base import CORSMixin, SlugOrIdMixin
from mkt.api.exceptions import ServiceUnavailable
from mkt.purchase.models import Contribution
from mkt.webapps.models import Webapp
from .forms import StatsForm
log = commonware.log.getLogger('z.stats')
class PublicStats(BasePermission):
"""
Allow for app's with `public_stats` set to True.
"""
def has_permission(self, request, view):
# Anonymous is allowed if app.public_stats is True.
return True
def has_object_permission(self, request, view, obj):
return obj.public_stats
# Map of URL metric name to monolith metric name.
#
# The 'dimensions' key is optional query string arguments with defaults that is
# passed to the monolith client and used in the facet filters. If the default
# is `None`, the dimension is excluded unless specified via the API.
#
# The 'lines' key is optional and used for multi-line charts. The format is:
# {'<name>': {'<dimension-key>': '<dimension-value>'}}
# where <name> is what's returned in the JSON output and the dimension
# key/value is what's sent to Monolith similar to the 'dimensions' above.
#
# The 'coerce' key is optional and used to coerce data types returned from
# monolith to other types. Provide the name of the key in the data you want to
# coerce with a callback for how you want the data coerced. E.g.:
# {'count': str}
lines = lambda name, vals: dict((val, {name: val}) for val in vals)
STATS = {
'apps_added_by_package': {
'metric': 'apps_added_package_count',
'dimensions': {'region': 'us'},
'lines': lines('package_type', amo.ADDON_WEBAPP_TYPES.values()),
},
'apps_added_by_premium': {
'metric': 'apps_added_premium_count',
'dimensions': {'region': 'us'},
'lines': lines('premium_type', amo.ADDON_PREMIUM_API.values()),
},
'apps_available_by_package': {
'metric': 'apps_available_package_count',
'dimensions': {'region': 'us'},
'lines': lines('package_type', amo.ADDON_WEBAPP_TYPES.values()),
},
'apps_available_by_premium': {
'metric': 'apps_available_premium_count',
'dimensions': {'region': 'us'},
'lines': lines('premium_type', amo.ADDON_PREMIUM_API.values()),
},
'apps_installed': {
'metric': 'app_installs',
'dimensions': {'region': None},
},
'total_developers': {
'metric': 'total_dev_count',
},
'total_visits': {
'metric': 'visits',
},
'ratings': {
'metric': 'apps_ratings',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
'revenue': {
'metric': 'gross_revenue',
# Counts are floats. Let's convert them to strings with 2 decimals.
'coerce': {'count': lambda d: '{0:.2f}'.format(d)},
},
}
APP_STATS = {
'installs': {
'metric': 'app_installs',
'dimensions': {'region': None},
},
'visits': {
'metric': 'app_visits',
},
'ratings': {
'metric': 'apps_ratings',
},
'average_rating': {
'metric': 'apps_average_rating',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
'revenue': {
'metric': 'gross_revenue',
# Counts are floats. Let's convert them to strings with 2 decimals.
'coerce': {'count': lambda d: '{0:.2f}'.format(d)},
},
}
# The total API will iterate over each key and return statistical totals
# information on them all.
STATS_TOTAL = {
'installs': {
'metric': 'app_installs',
},
'ratings': {
'metric': 'apps_ratings',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
}
APP_STATS_TOTAL = {
'installs': {
'metric': 'app_installs',
},
'ratings': {
'metric': 'apps_ratings',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
}
def _get_monolith_data(stat, start, end, interval, dimensions):
# If stat has a 'lines' attribute, it's a multi-line graph. Do a
# request for each item in 'lines' and compose them in a single
# response.
try:
client = get_monolith_client()
except requests.ConnectionError as e:
log.info('Monolith connection error: {0}'.format(e))
raise ServiceUnavailable
def _coerce(data):
for key, coerce in stat.get('coerce', {}).items():
if data.get(key):
data[key] = coerce(data[key])
return data
try:
data = {}
if 'lines' in stat:
for line_name, line_dimension in stat['lines'].items():
dimensions.update(line_dimension)
data[line_name] = map(_coerce,
client(stat['metric'], start, end,
interval, **dimensions))
else:
data['objects'] = map(_coerce,
client(stat['metric'], start, end, interval,
**dimensions))
except ValueError as e:
# This occurs if monolith doesn't have our metric and we get an
# elasticsearch SearchPhaseExecutionException error.
log.info('Monolith ValueError for metric {0}: {1}'.format(
stat['metric'], e))
raise ParseError('Invalid metric at this time. Try again later.')
return data
class GlobalStats(CORSMixin, APIView):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('Stats', 'View')]
def get(self, request, metric):
if metric not in STATS:
raise http.Http404('No metric by that name.')
stat = STATS[metric]
# Perform form validation.
form = StatsForm(request.GET)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
qs = form.cleaned_data
dimensions = {}
if 'dimensions' in stat:
for key, default in stat['dimensions'].items():
val = request.GET.get(key, default)
if val is not None:
# Avoid passing kwargs to the monolith client when the
# dimension is None to avoid facet filters being applied.
dimensions[key] = request.GET.get(key, default)
return Response(_get_monolith_data(stat, qs.get('start'),
qs.get('end'), qs.get('interval'),
dimensions))
class AppStats(CORSMixin, SlugOrIdMixin, ListAPIView):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AnyOf(PublicStats, AllowAppOwner,
GroupPermission('Stats', 'View'))]
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, pk, metric):
if metric not in APP_STATS:
raise http.Http404('No metric by that name.')
app = self.get_object()
stat = APP_STATS[metric]
# Perform form validation.
form = StatsForm(request.GET)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
qs = form.cleaned_data
dimensions = {'app-id': app.id}
if 'dimensions' in stat:
for key, default in stat['dimensions'].items():
val = request.GET.get(key, default)
if val is not None:
# Avoid passing kwargs to the monolith client when the
# dimension is None to avoid facet filters being applied.
dimensions[key] = request.GET.get(key, default)
return Response(_get_monolith_data(stat, qs.get('start'),
qs.get('end'), qs.get('interval'),
dimensions))
class StatsTotalBase(object):
"""
A place for a few helper methods for totals stats API.
"""
def get_client(self):
try:
client = get_monolith_client()
except requests.ConnectionError as e:
log.info('Monolith connection error: {0}'.format(e))
raise ServiceUnavailable
return client
def get_query(self, metric, field, app_id=None):
query = {
'query': {
'match_all': {}
},
'facets': {
metric: {
'statistical': {
'field': field
}
}
},
'size': 0
}
# If this is per-app, add the facet_filter.
if app_id:
query['facets'][metric]['facet_filter'] = {
'term': {
'app-id': app_id
}
}
return query
def process_response(self, resp, data):
for metric, facet in resp.get('facets', {}).items():
count = facet.get('count', 0)
# We filter out facets with count=0 to avoid returning things
# like `'max': u'-Infinity'`.
if count > 0:
for field in ('max', 'mean', 'min', 'std_deviation',
'sum_of_squares', 'total', 'variance'):
value = facet.get(field)
if value is not None:
data[metric][field] = value
class GlobalStatsTotal(CORSMixin, APIView, StatsTotalBase):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('Stats', 'View')]
slug_field = 'app_slug'
def get(self, request):
client = self.get_client()
# Note: We have to do this as separate requests so that if one fails
# the rest can still be returned.
data = {}
for metric, stat in STATS_TOTAL.items():
data[metric] = {}
query = self.get_query(metric, stat['metric'])
try:
resp = client.raw(query)
except ValueError as e:
log.info('Received value error from monolith client: %s' % e)
continue
self.process_response(resp, data)
return Response(data)
class AppStatsTotal(CORSMixin, SlugOrIdMixin, ListAPIView, StatsTotalBase):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AnyOf(PublicStats, AllowAppOwner,
GroupPermission('Stats', 'View'))]
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, pk):
app = self.get_object()
client = self.get_client()
# Note: We have to do this as separate requests so that if one fails
# the rest can still be returned.
data = {}
for metric, stat in APP_STATS_TOTAL.items():
data[metric] = {}
query = self.get_query(metric, stat['metric'], app.id)
try:
resp = client.raw(query)
except ValueError as e:
log.info('Received value error from monolith client: %s' % e)
continue
self.process_response(resp, data)
return Response(data)
class TransactionAPI(CORSMixin, APIView):
"""
API to query by transaction ID.
Note: This is intended for Monolith to be able to associate a Solitude
transaction with an app and price tier amount in USD.
"""
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('RevenueStats', 'View')]
def get(self, request, transaction_id):
try:
contrib = (Contribution.objects.select_related('price_tier').
get(transaction_id=transaction_id))
except Contribution.DoesNotExist:
raise http.Http404('No transaction by that ID.')
data = {
'id': transaction_id,
'app_id': contrib.addon_id,
'amount_USD': contrib.price_tier.price,
'type': amo.CONTRIB_TYPES[contrib.type],
}
return Response(data)
| 32.909774
| 79
| 0.579545
|
4a1817087aaa772dd0bf3cf824cfb0d8db5c49e6
| 3,852
|
py
|
Python
|
Z_ALL_FILE/Py1/tbot_site_stat_old.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | null | null | null |
Z_ALL_FILE/Py1/tbot_site_stat_old.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | null | null | null |
Z_ALL_FILE/Py1/tbot_site_stat_old.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | 1
|
2021-04-29T21:46:02.000Z
|
2021-04-29T21:46:02.000Z
|
import pandas as pd
import cx_Oracle
import sys
import time
import os
import telepot
from telepot.loop import MessageLoop
import sitehistory as st
import subprocess
TOKEN = '1184517046:AAFBnQe_HRMx4ANWbebp8W8rzQMlRb07nG4'
bot = telepot.Bot(TOKEN)
auth_file = os.getcwd() + "\\" + 'users.txt'
conn = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn)
def query(code):
qry1 = """Select * from (select distinct Summary AlarmText,(Case when Summary like '%2G%' then '2G' when
Summary like '%3G%' then '3G' else '4G' end) as Technology,CUSTOMATTR15 as SITECODE,FIRSTOCCURRENCE StartTime,ROUND((Sysdate-FIRSTOCCURRENCE)*24*60,2) DurationMIn,CLEARTIMESTAMP EndTime,CUSTOMATTR26 CRNumber,TTRequestTime, TTSequence, CUSTOMATTR23 as CI from alerts_status
where FirstOccurrence between TO_DATE(TO_CHAR(SYSDATE - 7, 'YYYYMMDD') || '0000', 'YYYYMMDDHH24MI') and TO_DATE(TO_CHAR(SYSDATE, 'YYYYMMDD') || '2359', 'YYYYMMDDHH24MI')
and X733EventType = 100 and agent != 'Total Site Down'--and CUSTOMATTR15 != 'UNKNOWN'
and Severity!= 0 and CustomAttr27 in (0,1) and Manager <> 'TSD Automation')t where t.Technology IN ('2G','3G','4G') and SITECODE like '%"""
qry2 = qry1 + code + "%'"
try:
df = pd.read_sql(qry2, con=conn)
print('try success')
except:
connx = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
df = pd.read_sql(qry2, con=connx)
print('Except trigger')
print(df)
rows = df.shape[0]
heap = code + ":"
if rows != 0:
for i in range(0,len(df)):
tech = df.iloc[i]['TECHNOLOGY']
tm = df.iloc[i]['STARTTIME']
if '2G' in tech:
heap = heap + '\n' + "2G: Down, " + "Downtime: " + str(tm)
if '3G' in tech:
heap = heap + '\n' + "3G: Down, " + "Downtime: " + str(tm)
if '4G' in tech:
heap = heap + '\n' + "4G: Down, " + "Downtime: " + str(tm)
#print(heap)
else:
return heap + '\nAll Tech are up'
return heap
def auth_check(usrname,firstname):
fo = open(auth_file,"r+")
txt = fo.read()
fo.close()
if (usrname in txt) or (firstname in txt):
print("auth chk send ok")
return "OK"
else:
print("auth chk send not ok")
return "NOT"
def rdpcls():
subprocess.call(["E:\OmProject\Project20\Tele_BOT\rdp_cls.bat"])
return "done"
def query_hanndler(code):
return code
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type == 'text':
txt = msg['text']
cid = chat_id
frm = msg['from']
#uname = msg['from']['last_name']
uname = ""
fname = msg['from']['first_name']
print(uname)
print(cid)
apprv = auth_check(uname,fname)
if apprv == "OK":
if len(txt) == 7:
cd = txt.upper()
bot.sendMessage(chat_id, 'processing request for '+ cd + ' ,please wait')
getval = query(cd)
gethis = st.fnx(cd)
txtx = getval + '\n' + '\n' + 'Site Details:' + '\n' + gethis
bot.sendMessage(chat_id, txtx)
bot.sendMessage('671462535', txtx)
elif 'help' in txt:
bot.sendMessage(chat_id, 'just provide sitecode to know status')
elif 'rdp' in txt:
gtval = rdpcls()
bot.sendMessage(chat_id, 'Killed')
else:
bot.sendMessage(chat_id, 'Please Provide sitecode without space')
else:
bot.sendMessage(chat_id, 'You are not autorized')
MessageLoop(bot, handle).run_as_thread()
print ('Listening ...')
while 1:
time.sleep(10)
| 38.138614
| 276
| 0.586708
|
4a181747dabfb4dcc25c47946cde12192403d54a
| 301
|
py
|
Python
|
rabbitai/migrations/versions/ef8843b41dac_.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | null | null | null |
rabbitai/migrations/versions/ef8843b41dac_.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | null | null | null |
rabbitai/migrations/versions/ef8843b41dac_.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | 1
|
2021-07-09T16:29:50.000Z
|
2021-07-09T16:29:50.000Z
|
"""empty message
Revision ID: ef8843b41dac
Revises: ('3b626e2a6783', 'ab3d66c4246e')
Create Date: 2016-10-02 10:35:38.825231
"""
# revision identifiers, used by Alembic.
revision = "ef8843b41dac"
down_revision = ("3b626e2a6783", "ab3d66c4246e")
def upgrade():
pass
def downgrade():
pass
| 15.842105
| 48
| 0.710963
|
4a1818b5f1724d4becb8f95ed0566ea72be731cc
| 11,000
|
py
|
Python
|
tests/api/endpoints/admin/test_share_links.py
|
gzy403999903/seahub
|
992e5852579a6d9e0cfdaf18c77ce0191cb64449
|
[
"Apache-2.0"
] | null | null | null |
tests/api/endpoints/admin/test_share_links.py
|
gzy403999903/seahub
|
992e5852579a6d9e0cfdaf18c77ce0191cb64449
|
[
"Apache-2.0"
] | 6
|
2019-12-13T09:55:45.000Z
|
2022-03-11T23:47:29.000Z
|
tests/api/endpoints/admin/test_share_links.py
|
gzy403999903/seahub
|
992e5852579a6d9e0cfdaf18c77ce0191cb64449
|
[
"Apache-2.0"
] | 1
|
2019-05-16T06:58:16.000Z
|
2019-05-16T06:58:16.000Z
|
# -*- coding: utf-8 -*-
import json
from tests.common.utils import randstring
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
from seahub.share.models import FileShare
from seaserv import seafile_api
try:
from seahub.settings import LOCAL_PRO_DEV_ENV
except ImportError:
LOCAL_PRO_DEV_ENV = False
class AdminShareLinkTest(BaseTestCase):
def setUp(self):
self.repo_id = self.repo.id
self.file_path= self.file
self.folder_path= self.folder
self.invalid_token = '00000000000000000000'
def tearDown(self):
self.remove_repo()
def _add_file_share_link(self, password=None):
fs = FileShare.objects.create_file_link(
self.user.username, self.repo.id, self.file, password, None)
return fs.token
def _add_dir_share_link(self, password=None):
fs = FileShare.objects.create_dir_link(
self.user.username, self.repo.id, self.folder, password, None)
return fs.token
def _remove_share_link(self, token):
link = FileShare.objects.get(token=token)
link.delete()
def test_get_file_share_link_info_by_token(self):
self.login_as(self.admin)
token = self._add_file_share_link()
url = reverse('api-v2.1-admin-share-link', args=[token])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['token'] == token
assert json_resp['is_dir'] == False
assert json_resp['size'] is not None
self._remove_share_link(token)
def test_get_dir_share_link_info_by_token(self):
self.login_as(self.admin)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link', args=[token])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['token'] == token
assert json_resp['is_dir'] == True
self._remove_share_link(token)
def test_get_share_link_info_with_invalid_permission(self):
self.login_as(self.user)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link', args=[token])
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
self._remove_share_link(token)
def test_get_share_link_info_with_invalid_share_token(self):
self.login_as(self.admin)
url = reverse('api-v2.1-admin-share-link',
args=[self.invalid_token])
resp = self.client.get(url)
self.assertEqual(404, resp.status_code)
class AdminShareLinkDirentsTest(BaseTestCase):
def setUp(self):
self.repo_id = self.repo.id
self.folder_path= self.folder
self.invalid_token = '00000000000000000000'
def tearDown(self):
self.remove_repo()
def _add_dir_share_link(self, password=None):
fs = FileShare.objects.create_dir_link(
self.user.username, self.repo.id, self.folder, password, None)
return fs.token
def _remove_share_link(self, token):
link = FileShare.objects.get(token=token)
link.delete()
def test_get_dirents(self):
username = self.user.username
dir_name = randstring(6)
file_name = randstring(6)
seafile_api.post_dir(self.repo_id,
self.folder_path, dir_name, username)
seafile_api.post_empty_file(self.repo_id,
self.folder_path, file_name, username)
self.login_as(self.admin)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link-dirents', args=[token])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp[0]['is_dir'] == True
assert dir_name in json_resp[0]['obj_name']
assert json_resp[1]['is_dir'] == False
assert file_name in json_resp[1]['obj_name']
self._remove_share_link(token)
def test_get_dirents_with_invalid_permission(self):
self.login_as(self.user)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link-dirents', args=[token])
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
self._remove_share_link(token)
def test_get_dirents_with_invalid_share_token(self):
self.login_as(self.admin)
url = reverse('api-v2.1-admin-share-link-dirents',
args=[self.invalid_token])
resp = self.client.get(url)
self.assertEqual(404, resp.status_code)
class AdminShareLinkDownloadTest(BaseTestCase):
def setUp(self):
self.repo_id = self.repo.id
self.file_path= self.file
self.folder_path= self.folder
self.invalid_token = '00000000000000000000'
def tearDown(self):
self.remove_repo()
def _add_dir_share_link(self, password=None):
fs = FileShare.objects.create_dir_link(
self.user.username, self.repo.id, self.folder, password, None)
return fs.token
def _add_file_share_link(self, password=None):
fs = FileShare.objects.create_file_link(
self.user.username, self.repo.id, self.file, password, None)
return fs.token
def _remove_share_link(self, token):
link = FileShare.objects.get(token=token)
link.delete()
def test_download_shared_file(self):
self.login_as(self.admin)
token = self._add_file_share_link()
url = reverse('api-v2.1-admin-share-link-download', args=[token])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert '8082' in json_resp['download_link']
assert 'files' in json_resp['download_link']
self._remove_share_link(token)
def test_download_sub_file_in_shared_dir(self):
username = self.user.username
file_name = randstring(6)
seafile_api.post_empty_file(self.repo_id,
self.folder_path, file_name, username)
self.login_as(self.admin)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link-download', args=[token])
resp = self.client.get(url + '?path=/%s&type=file' % file_name)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert '8082' in json_resp['download_link']
assert 'files' in json_resp['download_link']
self._remove_share_link(token)
def test_download_sub_dir_in_shared_dir(self):
username = self.user.username
dir_name = randstring(6)
seafile_api.post_dir(self.repo_id,
self.folder_path, dir_name, username)
self.login_as(self.admin)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link-download', args=[token])
resp = self.client.get(url + '?path=/%s&type=folder' % dir_name)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert '8082' in json_resp['download_link']
assert 'zip' in json_resp['download_link']
self._remove_share_link(token)
def test_download_with_invalid_permission(self):
self.login_as(self.user)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link-download', args=[token])
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
self._remove_share_link(token)
def test_download_with_invalid_share_token(self):
self.login_as(self.admin)
url = reverse('api-v2.1-admin-share-link-download',
args=[self.invalid_token])
resp = self.client.get(url)
self.assertEqual(404, resp.status_code)
class ShareLinkCheckPasswordTest(BaseTestCase):
def setUp(self):
self.repo_id = self.repo.id
self.file_path= self.file
self.folder_path= self.folder
self.invalid_token = '00000000000000000000'
def tearDown(self):
self.remove_repo()
def _add_file_share_link(self, password=None):
fs = FileShare.objects.create_file_link(
self.user.username, self.repo.id, self.file, password, None)
return fs.token
def _add_dir_share_link(self, password=None):
fs = FileShare.objects.create_dir_link(
self.user.username, self.repo.id, self.folder, password, None)
return fs.token
def _remove_share_link(self, token):
link = FileShare.objects.get(token=token)
link.delete()
def test_check_password(self):
self.login_as(self.admin)
#### create file share link ####
password = randstring(10)
token = self._add_file_share_link(password)
url = reverse('api-v2.1-admin-share-link-check-password', args=[token])
# check password for file share link
resp = self.client.post(url, {'password': password})
self.assertEqual(200, resp.status_code)
# remove file share link
self._remove_share_link(token)
#### create dir share link ####
password = randstring(10)
token = self._add_dir_share_link(password)
url = reverse('api-v2.1-admin-share-link-check-password', args=[token])
# check password for dir share link
resp = self.client.post(url, {'password': password})
self.assertEqual(200, resp.status_code)
# remove dir share link
self._remove_share_link(token)
def test_invalid_password(self):
self.login_as(self.admin)
password = randstring(10)
token = self._add_file_share_link(password)
url = reverse('api-v2.1-admin-share-link-check-password', args=[token])
# assert password is valid
resp = self.client.post(url, {'password': password})
self.assertEqual(200, resp.status_code)
# assert password is invalid
resp = self.client.post(url, {'password': 'invalid_password'})
self.assertEqual(403, resp.status_code)
self._remove_share_link(token)
def test_check_password_with_invalid_permission(self):
self.login_as(self.user)
token = self._add_dir_share_link()
url = reverse('api-v2.1-admin-share-link-check-password', args=[token])
resp = self.client.post(url)
self.assertEqual(403, resp.status_code)
self._remove_share_link(token)
def test_check_password_with_invalid_share_token(self):
self.login_as(self.admin)
url = reverse('api-v2.1-admin-share-link-check-password',
args=[self.invalid_token])
resp = self.client.post(url, {'password': 'invalid_password'})
self.assertEqual(404, resp.status_code)
| 31.339031
| 79
| 0.653545
|
4a1819aef1b77df885122ef5a3f076067a3a907e
| 40,291
|
py
|
Python
|
nova/virt/libvirt/guest.py
|
karimull/nova
|
9dcff4d4ed3e5ed5c0f58638c863562f4761495c
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/guest.py
|
karimull/nova
|
9dcff4d4ed3e5ed5c0f58638c863562f4761495c
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/guest.py
|
karimull/nova
|
9dcff4d4ed3e5ed5c0f58638c863562f4761495c
|
[
"Apache-2.0"
] | 1
|
2021-05-12T07:52:44.000Z
|
2021-05-12T07:52:44.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
import time
from lxml import etree
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
import six
from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.privsep import libvirt as libvirt_privsep
from nova.virt import hardware
from nova.virt.libvirt import compat
from nova.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# The DOMAIN_BLOCKED state is only valid in Xen. It means that
# the VM is running and the vCPU is idle. So, we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# The libvirt API doc says that DOMAIN_SHUTDOWN means the domain
# is being shut down. So technically the domain is still
# running. SHUTOFF is the real powered off state. But we will map
# both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
if six.PY3 and isinstance(xml, six.binary_type):
xml = xml.decode('utf-8')
guest = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a guest with XML: %s',
encodeutils.safe_decode(xml))
return guest
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error launching a defined domain '
'with XML: %s',
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def sync_guest_time(self):
"""Try to set VM time to the current value. This is typically useful
when clock wasn't running on the VM for some time (e.g. during
suspension or migration), especially if the time delay exceeds NTP
tolerance.
It is not guaranteed that the time is actually set (it depends on guest
environment, especially QEMU agent presence) or that the set time is
very precise (NTP in the guest should take care of it if needed).
"""
t = time.time()
seconds = int(t)
nseconds = int((t - seconds) * 10 ** 9)
try:
self._domain.setTime(time={'seconds': seconds,
'nseconds': nseconds})
except libvirt.libvirtError as e:
code = e.get_error_code()
if code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
LOG.debug('Failed to set time: QEMU agent unresponsive',
instance_uuid=self.uuid)
elif code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.debug('Failed to set time: not supported',
instance_uuid=self.uuid)
elif code == libvirt.VIR_ERR_ARGUMENT_UNSUPPORTED:
LOG.debug('Failed to set time: agent not configured',
instance_uuid=self.uuid)
else:
LOG.warning('Failed to set time: %(reason)s',
{'reason': e}, instance_uuid=self.uuid)
except Exception as ex:
# The highest priority is not to let this method crash and thus
# disrupt its caller in any way. So we swallow this error here,
# to be absolutely safe.
LOG.debug('Failed to set time: %(reason)s',
{'reason': ex}, instance_uuid=self.uuid)
else:
LOG.debug('Time updated to: %d.%09d', seconds, nseconds,
instance_uuid=self.uuid)
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a paused guest."""
self._domain.resume()
def enable_hairpin(self):
"""Enables hairpin mode for this guest."""
interfaces = self.get_interfaces()
try:
for interface in interfaces:
libvirt_privsep.enable_hairpin(interface)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error enabling hairpin mode with XML: %s',
self._encoded_xml, errors='ignore')
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_interface_by_cfg(self, cfg):
"""Lookup a full LibvirtConfigGuestInterface with
LibvirtConfigGuestInterface generated
by nova.virt.libvirt.vif.get_config.
:param cfg: config object that represents the guest interface.
:type cfg: LibvirtConfigGuestInterface object
:returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface instance
if found, else None
"""
if cfg:
interfaces = self.get_all_devices(
vconfig.LibvirtConfigGuestInterface)
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuestInterface get from domain and
# LibvirtConfigGuestInterface generated by
# nova.virt.libvirt.vif.get_config must be identical.
if (interface.mac_addr == cfg.mac_addr and
interface.net_type == cfg.net_type and
interface.source_dev == cfg.source_dev and
interface.target_dev == cfg.target_dev and
interface.vhostuser_path == cfg.vhostuser_path):
return interface
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self, support_uefi=False):
"""Undefines a domain from hypervisor."""
try:
flags = libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
if support_uefi:
flags |= libvirt.VIR_DOMAIN_UNDEFINE_NVRAM
self._domain.undefineFlags(flags)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags. %d"
"Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
device_xml = conf.to_xml()
if six.PY3 and isinstance(device_xml, six.binary_type):
device_xml = device_xml.decode('utf-8')
LOG.debug("attach device xml: %s", device_xml)
self._domain.attachDeviceFlags(device_xml, flags=flags)
def get_config(self):
"""Returns the config instance for a guest
:returns: LibvirtConfigGuest instance
"""
config = vconfig.LibvirtConfigGuest()
config.parse_str(self._domain.XMLDesc(0))
return config
def get_disk(self, device):
"""Returns the disk mounted at device
:returns LivirtConfigGuestDisk: mounted at device or None
"""
try:
doc = etree.fromstring(self._domain.XMLDesc(0))
except Exception:
return None
# FIXME(lyarwood): Workaround for the device being either a target dev
# when called via swap_volume or source file when called via
# live_snapshot. This should be removed once both are refactored to use
# only the target dev of the device.
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is None:
node = doc.find("./devices/disk/source[@file='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
def get_all_disks(self):
"""Returns all the disks for a guest
:returns: a list of LibvirtConfigGuestDisk instances
"""
return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
def get_all_devices(self, devtype=None):
"""Returns all devices for a guest
:param devtype: a LibvirtConfigGuestDevice subclass class
:returns: a list of LibvirtConfigGuestDevice instances
"""
try:
config = vconfig.LibvirtConfigGuest()
config.parse_str(
self._domain.XMLDesc(0))
except Exception:
return []
devs = []
for dev in config.devices:
if (devtype is None or
isinstance(dev, devtype)):
devs.append(dev)
return devs
def detach_device_with_retry(self, get_device_conf_func, device, live,
max_retry_count=7, inc_sleep_time=2,
max_sleep_time=30,
alternative_device_name=None):
"""Detaches a device from the guest. After the initial detach request,
a function is returned which can be used to ensure the device is
successfully removed from the guest domain (retrying the removal as
necessary).
:param get_device_conf_func: function which takes device as a parameter
and returns the configuration for device
:param device: device to detach
:param live: bool to indicate whether it affects the guest in running
state
:param max_retry_count: number of times the returned function will
retry a detach before failing
:param inc_sleep_time: incremental time to sleep in seconds between
detach retries
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param alternative_device_name: This is an alternative identifier for
the device if device is not an ID, used solely for error messages.
"""
alternative_device_name = alternative_device_name or device
def _try_detach_device(conf, persistent=False, live=False):
# Raise DeviceNotFound if the device isn't found during detach
try:
self.detach_device(conf, persistent=persistent, live=live)
LOG.debug('Successfully detached device %s from guest. '
'Persistent? %s. Live? %s',
device, persistent, live)
except libvirt.libvirtError as ex:
with excutils.save_and_reraise_exception():
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
errmsg = ex.get_error_message()
if 'not found' in errmsg:
# This will be raised if the live domain
# detach fails because the device is not found
raise exception.DeviceNotFound(
device=alternative_device_name)
elif errcode == libvirt.VIR_ERR_INVALID_ARG:
errmsg = ex.get_error_message()
if 'no target device' in errmsg:
# This will be raised if the persistent domain
# detach fails because the device is not found
raise exception.DeviceNotFound(
device=alternative_device_name)
conf = get_device_conf_func(device)
if conf is None:
raise exception.DeviceNotFound(device=alternative_device_name)
persistent = self.has_persistent_configuration()
LOG.debug('Attempting initial detach for device %s',
alternative_device_name)
try:
_try_detach_device(conf, persistent, live)
except exception.DeviceNotFound:
# NOTE(melwitt): There are effectively two configs for an instance.
# The persistent config (affects instance upon next boot) and the
# live config (affects running instance). When we detach a device,
# we need to detach it from both configs if the instance has a
# persistent config and a live config. If we tried to detach the
# device with persistent=True and live=True and it was not found,
# we should still try to detach from the live config, so continue.
if persistent and live:
pass
else:
raise
LOG.debug('Start retrying detach until device %s is gone.',
alternative_device_name)
@loopingcall.RetryDecorator(max_retry_count=max_retry_count,
inc_sleep_time=inc_sleep_time,
max_sleep_time=max_sleep_time,
exceptions=exception.DeviceDetachFailed)
def _do_wait_and_retry_detach():
config = get_device_conf_func(device)
if config is not None:
# Device is already detached from persistent domain
# and only transient domain needs update
_try_detach_device(config, persistent=False, live=live)
reason = _("Unable to detach from guest transient domain.")
raise exception.DeviceDetachFailed(
device=alternative_device_name, reason=reason)
return _do_wait_and_retry_detach
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
device_xml = conf.to_xml()
if six.PY3 and isinstance(device_xml, six.binary_type):
device_xml = device_xml.decode('utf-8')
LOG.debug("detach device xml: %s", device_xml)
self._domain.detachDeviceFlags(device_xml, flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
def set_user_password(self, user, new_pass):
"""Configures a new user password."""
self._domain.setUserPassword(user, new_pass, 0)
def _get_domain_info(self, host):
"""Returns information on Guest
:param host: a host.Host object with current
connection. Unfortunately we need to pass it
because of a workaround with < version 1.2..11
:returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
"""
return compat.get_domain_info(libvirt, host, self._domain)
def get_info(self, host):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
:returns hardware.InstanceInfo:
"""
try:
dom_info = self._get_domain_info(host)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=self.uuid)
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': self.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
return hardware.InstanceInfo(
state=LIBVIRT_POWER_STATE[dom_info[0]],
internal_id=self.id)
def get_power_state(self, host):
return self.get_info(host).state
def is_active(self):
"Determines whether guest is currently running."
return self._domain.isActive()
def freeze_filesystems(self):
"""Freeze filesystems within guest."""
self._domain.fsFreeze()
def thaw_filesystems(self):
"""Thaw filesystems within guest."""
self._domain.fsThaw()
def snapshot(self, conf, no_metadata=False,
disk_only=False, reuse_ext=False, quiesce=False):
"""Creates a guest snapshot.
:param conf: libvirt.LibvirtConfigGuestSnapshotDisk
:param no_metadata: Make snapshot without remembering it
:param disk_only: Disk snapshot, no system checkpoint
:param reuse_ext: Reuse any existing external files
:param quiesce: Use QGA to quiece all mounted file systems
"""
flags = no_metadata and (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
or 0)
flags |= disk_only and (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
or 0)
flags |= reuse_ext and (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
or 0)
flags |= quiesce and libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE or 0
device_xml = conf.to_xml()
if six.PY3 and isinstance(device_xml, six.binary_type):
device_xml = device_xml.decode('utf-8')
self._domain.snapshotCreateXML(device_xml, flags=flags)
def shutdown(self):
"""Shutdown guest"""
self._domain.shutdown()
def pause(self):
"""Suspends an active guest
Process is frozen without further access to CPU resources and
I/O but the memory used by the domain at the hypervisor level
will stay allocated.
See method "resume()" to reactive guest.
"""
self._domain.suspend()
def migrate(self, destination, migrate_uri=None, params=None, flags=0,
domain_xml=None, bandwidth=0):
"""Migrate guest object from its current host to the destination
:param destination: URI of host destination where guest will be migrate
:param migrate_uri: URI for invoking the migration
:param flags: May be one of more of the following:
VIR_MIGRATE_LIVE Do not pause the VM during migration
VIR_MIGRATE_PEER2PEER Direct connection between source &
destination hosts
VIR_MIGRATE_TUNNELLED Tunnel migration data over the
libvirt RPC channel
VIR_MIGRATE_PERSIST_DEST If the migration is successful,
persist the domain on the
destination host.
VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful,
undefine the domain on the
source host.
VIR_MIGRATE_PAUSED Leave the domain suspended on the remote
side.
VIR_MIGRATE_NON_SHARED_DISK Migration with non-shared
storage with full disk copy
VIR_MIGRATE_NON_SHARED_INC Migration with non-shared
storage with incremental disk
copy
VIR_MIGRATE_CHANGE_PROTECTION Protect against domain
configuration changes during
the migration process (set
automatically when
supported).
VIR_MIGRATE_UNSAFE Force migration even if it is considered
unsafe.
VIR_MIGRATE_OFFLINE Migrate offline
:param domain_xml: Changing guest configuration during migration
:param bandwidth: The maximun bandwidth in MiB/s
"""
if domain_xml is None:
self._domain.migrateToURI(
destination, flags=flags, bandwidth=bandwidth)
else:
if params:
# Due to a quirk in the libvirt python bindings,
# VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is
# interpreted as "block migrate all writable disks" rather than
# "don't block migrate any disks". This includes attached
# volumes, which will potentially corrupt data on those
# volumes. Consequently we need to explicitly unset
# VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block
# migrated.
if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and
not params.get('migrate_disks')):
flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
# In migrateToURI3 these parameters are extracted from the
# `params` dict
if migrate_uri:
params['migrate_uri'] = migrate_uri
params['bandwidth'] = bandwidth
# In the python2 libvirt bindings, strings passed to
# migrateToURI3 via params must not be unicode.
if six.PY2:
params = {key: str(value) if isinstance(value, unicode)
else value
for key, value in params.items()}
self._domain.migrateToURI3(
destination, params=params, flags=flags)
else:
self._domain.migrateToURI2(
destination, miguri=migrate_uri, dxml=domain_xml,
flags=flags, bandwidth=bandwidth)
def abort_job(self):
"""Requests to abort current background job"""
self._domain.abortJob()
def migrate_configure_max_downtime(self, mstime):
"""Sets maximum time for which domain is allowed to be paused
:param mstime: Downtime in milliseconds.
"""
self._domain.migrateSetMaxDowntime(mstime)
def migrate_configure_max_speed(self, bandwidth):
"""The maximum bandwidth that will be used to do migration
:param bw: Bandwidth in MiB/s
"""
self._domain.migrateSetMaxSpeed(bandwidth)
def migrate_start_postcopy(self):
"""Switch running live migration to post-copy mode"""
self._domain.migrateStartPostCopy()
def get_job_info(self):
"""Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
:returns: a JobInfo of guest
"""
if JobInfo._have_job_stats:
try:
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
JobInfo._have_job_stats = False
return JobInfo._get_job_stats_compat(self._domain)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
JobInfo._have_job_stats = False
return JobInfo._get_job_stats_compat(self._domain)
else:
return JobInfo._get_job_stats_compat(self._domain)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async=False, pivot=False):
"""Request to cancel a live block device job
:param async: Cancel the block device job (e.g. 'copy' or
'commit'), and return as soon as possible, without
waiting for job completion
:param pivot: Pivot to the destination image when ending a
'copy' or "active commit" (meaning: merging the
contents of current active disk into its backing
file) job
"""
flags = async and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo, or None if no job exists
:raises: libvirt.libvirtError on error fetching block job info
"""
# libvirt's blockJobInfo() raises libvirt.libvirtError if there was an
# error. It returns {} if the job no longer exists, or a fully
# populated dict if the job exists.
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
# The job no longer exists
if not status:
return None
return BlockDeviceJobInfo(
job=status['type'],
bandwidth=status['bandwidth'],
cur=status['cur'],
end=status['end'])
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False, copy_dev=False):
"""Copy data from backing chain into a new disk
This copies data from backing file(s) into overlay(s), giving
control over several aspects like what part of a disk image
chain to be copied, whether to reuse an existing destination
file, etc. And updates the backing file to the new disk
:param shallow: Limit copy to top of the source backing chain
:param reuse_ext: Reuse an existing external file that was
pre-created
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
:param copy_dev: Treat the destination as type="block"
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= copy_dev and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Merge data from overlays into backing file
This live merges (or "commits") contents from backing files into
overlays, thus reducing the length of a disk image chain.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size_kb):
"""Resize block device to KiB size"""
self._guest._domain.blockResize(self._disk, size_kb)
def is_job_complete(self):
"""Return True if the job is complete, False otherwise
:returns: True if the job is complete, False otherwise
:raises: libvirt.libvirtError on error fetching block job info
"""
# NOTE(mdbooth): This method polls for block job completion. It returns
# true if either we get a status which indicates completion, or there
# is no longer a record of the job. Ideally this method and its
# callers would be rewritten to consume libvirt events from the job.
# This would provide a couple of advantages. Firstly, as it would no
# longer be polling it would notice completion immediately rather than
# at the next 0.5s check, and would also consume fewer resources.
# Secondly, with the current method we only know that 'no job'
# indicates completion. It does not necessarily indicate successful
# completion: the job could have failed, or been cancelled. When
# polling for block job info we have no way to detect this, so we
# assume success.
status = self.get_job_info()
# If the job no longer exists, it is because it has completed
# NOTE(mdbooth): See comment above: it may not have succeeded.
if status is None:
return True
# NOTE(slaweq): because of bug in libvirt, which is described in
# http://www.redhat.com/archives/libvir-list/2016-September/msg00017.html
# if status.end == 0 job is not started yet so it is not finished
# NOTE(mdbooth): The fix was committed upstream here:
# http://libvirt.org/git/?p=libvirt.git;a=commit;h=988218c
# The earliest tag which contains this commit is v2.3.0-rc1, so we
# should be able to remove this workaround when MIN_LIBVIRT_VERSION
# reaches 2.3.0, or we move to handling job events instead.
# NOTE(lyarwood): Use the mirror element to determine if we can pivot
# to the new disk once blockjobinfo reports progress as complete.
if status.end != 0 and status.cur == status.end:
disk = self._guest.get_disk(self._disk)
if disk and disk.mirror:
return disk.mirror.ready == 'yes'
return False
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
class JobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_iteration = kwargs.get("memory_iteration", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
| 41.409044
| 81
| 0.614231
|
4a1819f33976cfe18a675386ae50d970a4e64af0
| 2,771
|
py
|
Python
|
src/azure-cli-core/setup.py
|
mahakjain314/azure-cli
|
d0ce4954cffad6f2eaaa485e2e1b78d3a4e1eb14
|
[
"MIT"
] | 1
|
2021-09-07T18:51:21.000Z
|
2021-09-07T18:51:21.000Z
|
src/azure-cli-core/setup.py
|
mahakjain314/azure-cli
|
d0ce4954cffad6f2eaaa485e2e1b78d3a4e1eb14
|
[
"MIT"
] | 1
|
2020-08-08T03:56:56.000Z
|
2020-08-08T03:56:56.000Z
|
src/azure-cli-core/setup.py
|
mahakjain314/azure-cli
|
d0ce4954cffad6f2eaaa485e2e1b78d3a4e1eb14
|
[
"MIT"
] | 1
|
2022-02-16T18:23:11.000Z
|
2022-02-16T18:23:11.000Z
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
VERSION = "2.31.0"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/core/__init__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
import sys
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/core/__init__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'argcomplete~=1.8',
'azure-cli-telemetry==1.0.6.*',
'azure-mgmt-core>=1.2.0,<2',
'cryptography',
'humanfriendly~=10.0',
'jmespath',
'knack~=0.9.0',
'msal-extensions>=0.3.0,<0.4',
'msal>=1.16.0,<2.0.0',
'paramiko>=2.0.8,<3.0.0',
'pkginfo>=1.5.0.1',
'PyJWT>=2.1.0',
'pyopenssl>=17.1.0', # https://github.com/pyca/pyopenssl/pull/612
'requests[socks]'
]
# dependencies for specific OSes
if not sys.platform.startswith('cygwin'):
DEPENDENCIES.append('psutil~=5.8')
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
setup(
name='azure-cli-core',
version=VERSION,
description='Microsoft Azure Command-Line Tools Core Module',
long_description=README,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests", "azure", "azure.cli"]),
install_requires=DEPENDENCIES,
python_requires='>=3.6.0',
package_data={'azure.cli.core': ['auth/landing_pages/*.html']}
)
| 31.850575
| 103
| 0.595814
|
4a181a19516de01feca5f88c2fe22b987ecdf12a
| 112
|
py
|
Python
|
lazy_dataset/__init__.py
|
thequilo/lazy_dataset
|
d4c56d3212ee387b8e721f2dae6d16b1bff18543
|
[
"MIT"
] | null | null | null |
lazy_dataset/__init__.py
|
thequilo/lazy_dataset
|
d4c56d3212ee387b8e721f2dae6d16b1bff18543
|
[
"MIT"
] | null | null | null |
lazy_dataset/__init__.py
|
thequilo/lazy_dataset
|
d4c56d3212ee387b8e721f2dae6d16b1bff18543
|
[
"MIT"
] | null | null | null |
from .core import (
new,
concatenate,
Dataset,
from_dict,
from_list,
FilterException,
)
| 12.444444
| 20
| 0.607143
|
4a181a33513b7ec307055725ef0ba00320a00c2d
| 380
|
py
|
Python
|
factorialrec.py
|
annanymaus/babysteps
|
39d5a7b1027f7361899466b879fcd8746cacea0b
|
[
"MIT"
] | 2
|
2021-03-02T13:53:23.000Z
|
2021-03-16T20:37:13.000Z
|
factorialrec.py
|
annanymaus/babysteps
|
39d5a7b1027f7361899466b879fcd8746cacea0b
|
[
"MIT"
] | 1
|
2021-03-16T12:15:21.000Z
|
2021-03-16T17:39:54.000Z
|
factorialrec.py
|
annanymaus/babysteps
|
39d5a7b1027f7361899466b879fcd8746cacea0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#program to find factorial of a given number
#take input from user
n = input("Enter a number : ")
#recursive func. to calculate factorial
def fact(i):
#base check for 0! and 1!
if (int (i) < 2):
return 1
#calculation for numbers greater than/equal to 2
c = i * fact(i-1)
return c
#print the result
print (fact(int (n)))
| 19
| 56
| 0.634211
|
4a181a4b386b861b3204dfcca40b3b0860fa7de6
| 565
|
py
|
Python
|
protonfixes/gamefixes/409720.py
|
Sirmentio/protonfixes
|
1ca6452ed1a9910ed0afc8c544ce90dfc699a678
|
[
"BSD-2-Clause"
] | 213
|
2018-10-06T01:40:26.000Z
|
2022-03-16T16:17:37.000Z
|
protonfixes/gamefixes/409720.py
|
Sirmentio/protonfixes
|
1ca6452ed1a9910ed0afc8c544ce90dfc699a678
|
[
"BSD-2-Clause"
] | 88
|
2018-10-06T17:38:56.000Z
|
2022-02-19T13:27:26.000Z
|
protonfixes/gamefixes/409720.py
|
Sirmentio/protonfixes
|
1ca6452ed1a9910ed0afc8c544ce90dfc699a678
|
[
"BSD-2-Clause"
] | 67
|
2018-10-09T16:57:16.000Z
|
2022-03-14T13:06:25.000Z
|
""" Game fix for BioShock 2 Remastered
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Disable ESYNC, disable intro's
"""
# After loading the game, or a save file, a key needs to be pressed
# to continue. That screen does not respond to keyboard or mouse,
# so there is no way to continue. -nointro disables that screen
# (but also the intro's at the start of the game).
util.append_argument('-nointro')
# ESYNC causes texture problems and frequent hangs.
util.set_environment('PROTON_NO_ESYNC', '1')
| 29.736842
| 71
| 0.693805
|
4a181c00c023fa9174b591f2c0bba844d63df51a
| 20,168
|
py
|
Python
|
homeassistant/components/frontend/__init__.py
|
GotoCode/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/frontend/__init__.py
|
GotoCode/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | 125
|
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
homeassistant/components/frontend/__init__.py
|
y1ngyang/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | null | null | null |
"""
Handle the frontend for Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/frontend/
"""
import asyncio
import hashlib
import json
import logging
import os
from urllib.parse import urlparse
from aiohttp import web
import voluptuous as vol
import jinja2
import homeassistant.helpers.config_validation as cv
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_AUTHENTICATED
from homeassistant.config import find_config_file, load_yaml_config_file
from homeassistant.const import CONF_NAME, EVENT_THEMES_UPDATED
from homeassistant.core import callback
from homeassistant.helpers.translation import async_get_translations
from homeassistant.loader import bind_hass
REQUIREMENTS = ['home-assistant-frontend==20180426.0']
DOMAIN = 'frontend'
DEPENDENCIES = ['api', 'websocket_api', 'http', 'system_log']
URL_PANEL_COMPONENT_FP = '/frontend/panels/{}-{}.html'
CONF_THEMES = 'themes'
CONF_EXTRA_HTML_URL = 'extra_html_url'
CONF_EXTRA_HTML_URL_ES5 = 'extra_html_url_es5'
CONF_FRONTEND_REPO = 'development_repo'
CONF_JS_VERSION = 'javascript_version'
JS_DEFAULT_OPTION = 'auto'
JS_OPTIONS = ['es5', 'latest', 'auto']
DEFAULT_THEME_COLOR = '#03A9F4'
MANIFEST_JSON = {
'background_color': '#FFFFFF',
'description': 'Open-source home automation platform running on Python 3.',
'dir': 'ltr',
'display': 'standalone',
'icons': [],
'lang': 'en-US',
'name': 'Home Assistant',
'short_name': 'Assistant',
'start_url': '/states',
'theme_color': DEFAULT_THEME_COLOR
}
for size in (192, 384, 512, 1024):
MANIFEST_JSON['icons'].append({
'src': '/static/icons/favicon-{}x{}.png'.format(size, size),
'sizes': '{}x{}'.format(size, size),
'type': 'image/png'
})
DATA_FINALIZE_PANEL = 'frontend_finalize_panel'
DATA_PANELS = 'frontend_panels'
DATA_JS_VERSION = 'frontend_js_version'
DATA_EXTRA_HTML_URL = 'frontend_extra_html_url'
DATA_EXTRA_HTML_URL_ES5 = 'frontend_extra_html_url_es5'
DATA_THEMES = 'frontend_themes'
DATA_DEFAULT_THEME = 'frontend_default_theme'
DEFAULT_THEME = 'default'
PRIMARY_COLOR = 'primary-color'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_FRONTEND_REPO): cv.isdir,
vol.Optional(CONF_THEMES): vol.Schema({
cv.string: {cv.string: cv.string}
}),
vol.Optional(CONF_EXTRA_HTML_URL):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXTRA_HTML_URL_ES5):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_JS_VERSION, default=JS_DEFAULT_OPTION):
vol.In(JS_OPTIONS)
}),
}, extra=vol.ALLOW_EXTRA)
SERVICE_SET_THEME = 'set_theme'
SERVICE_RELOAD_THEMES = 'reload_themes'
SERVICE_SET_THEME_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
})
class AbstractPanel:
"""Abstract class for panels."""
# Name of the webcomponent
component_name = None
# Icon to show in the sidebar (optional)
sidebar_icon = None
# Title to show in the sidebar (optional)
sidebar_title = None
# Url to the webcomponent (depending on JS version)
webcomponent_url_es5 = None
webcomponent_url_latest = None
# Url to show the panel in the frontend
frontend_url_path = None
# Config to pass to the webcomponent
config = None
@asyncio.coroutine
def async_register(self, hass):
"""Register panel with HASS."""
panels = hass.data.get(DATA_PANELS)
if panels is None:
panels = hass.data[DATA_PANELS] = {}
if self.frontend_url_path in panels:
_LOGGER.warning("Overwriting component %s", self.frontend_url_path)
if DATA_FINALIZE_PANEL in hass.data:
yield from hass.data[DATA_FINALIZE_PANEL](self)
panels[self.frontend_url_path] = self
@callback
def async_register_index_routes(self, router, index_view):
"""Register routes for panel to be served by index view."""
router.add_route(
'get', '/{}'.format(self.frontend_url_path), index_view.get)
router.add_route(
'get', '/{}/{{extra:.+}}'.format(self.frontend_url_path),
index_view.get)
def to_response(self, hass, request):
"""Panel as dictionary."""
result = {
'component_name': self.component_name,
'icon': self.sidebar_icon,
'title': self.sidebar_title,
'url_path': self.frontend_url_path,
'config': self.config,
}
if _is_latest(hass.data[DATA_JS_VERSION], request):
result['url'] = self.webcomponent_url_latest
else:
result['url'] = self.webcomponent_url_es5
return result
class BuiltInPanel(AbstractPanel):
"""Panel that is part of hass_frontend."""
def __init__(self, component_name, sidebar_title, sidebar_icon,
frontend_url_path, config):
"""Initialize a built-in panel."""
self.component_name = component_name
self.sidebar_title = sidebar_title
self.sidebar_icon = sidebar_icon
self.frontend_url_path = frontend_url_path or component_name
self.config = config
@asyncio.coroutine
def async_finalize(self, hass, frontend_repository_path):
"""Finalize this panel for usage.
If frontend_repository_path is set, will be prepended to path of
built-in components.
"""
if frontend_repository_path is None:
import hass_frontend
import hass_frontend_es5
self.webcomponent_url_latest = \
'/frontend_latest/panels/ha-panel-{}-{}.html'.format(
self.component_name,
hass_frontend.FINGERPRINTS[self.component_name])
self.webcomponent_url_es5 = \
'/frontend_es5/panels/ha-panel-{}-{}.html'.format(
self.component_name,
hass_frontend_es5.FINGERPRINTS[self.component_name])
else:
# Dev mode
self.webcomponent_url_es5 = self.webcomponent_url_latest = \
'/home-assistant-polymer/panels/{}/ha-panel-{}.html'.format(
self.component_name, self.component_name)
class ExternalPanel(AbstractPanel):
"""Panel that is added by a custom component."""
REGISTERED_COMPONENTS = set()
def __init__(self, component_name, path, md5, sidebar_title, sidebar_icon,
frontend_url_path, config):
"""Initialize an external panel."""
self.component_name = component_name
self.path = path
self.md5 = md5
self.sidebar_title = sidebar_title
self.sidebar_icon = sidebar_icon
self.frontend_url_path = frontend_url_path or component_name
self.config = config
@asyncio.coroutine
def async_finalize(self, hass, frontend_repository_path):
"""Finalize this panel for usage.
frontend_repository_path is set, will be prepended to path of built-in
components.
"""
try:
if self.md5 is None:
self.md5 = yield from hass.async_add_job(
_fingerprint, self.path)
except OSError:
_LOGGER.error('Cannot find or access %s at %s',
self.component_name, self.path)
hass.data[DATA_PANELS].pop(self.frontend_url_path)
return
self.webcomponent_url_es5 = self.webcomponent_url_latest = \
URL_PANEL_COMPONENT_FP.format(self.component_name, self.md5)
if self.component_name not in self.REGISTERED_COMPONENTS:
hass.http.register_static_path(
self.webcomponent_url_latest, self.path,
# if path is None, we're in prod mode, so cache static assets
frontend_repository_path is None)
self.REGISTERED_COMPONENTS.add(self.component_name)
@bind_hass
@asyncio.coroutine
def async_register_built_in_panel(hass, component_name, sidebar_title=None,
sidebar_icon=None, frontend_url_path=None,
config=None):
"""Register a built-in panel."""
panel = BuiltInPanel(component_name, sidebar_title, sidebar_icon,
frontend_url_path, config)
yield from panel.async_register(hass)
@bind_hass
@asyncio.coroutine
def async_register_panel(hass, component_name, path, md5=None,
sidebar_title=None, sidebar_icon=None,
frontend_url_path=None, config=None):
"""Register a panel for the frontend.
component_name: name of the web component
path: path to the HTML of the web component
(required unless url is provided)
md5: the md5 hash of the web component (for versioning in URL, optional)
sidebar_title: title to show in the sidebar (optional)
sidebar_icon: icon to show next to title in sidebar (optional)
url_path: name to use in the URL (defaults to component_name)
config: config to be passed into the web component
"""
panel = ExternalPanel(component_name, path, md5, sidebar_title,
sidebar_icon, frontend_url_path, config)
yield from panel.async_register(hass)
@bind_hass
@callback
def add_extra_html_url(hass, url, es5=False):
"""Register extra html url to load."""
key = DATA_EXTRA_HTML_URL_ES5 if es5 else DATA_EXTRA_HTML_URL
url_set = hass.data.get(key)
if url_set is None:
url_set = hass.data[key] = set()
url_set.add(url)
def add_manifest_json_key(key, val):
"""Add a keyval to the manifest.json."""
MANIFEST_JSON[key] = val
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the serving of the frontend."""
hass.http.register_view(ManifestJSONView)
conf = config.get(DOMAIN, {})
repo_path = conf.get(CONF_FRONTEND_REPO)
is_dev = repo_path is not None
hass.data[DATA_JS_VERSION] = js_version = conf.get(CONF_JS_VERSION)
if is_dev:
for subpath in ["src", "build-translations", "build-temp", "build",
"hass_frontend", "bower_components", "panels",
"hassio"]:
hass.http.register_static_path(
"/home-assistant-polymer/{}".format(subpath),
os.path.join(repo_path, subpath),
False)
hass.http.register_static_path(
"/static/translations",
os.path.join(repo_path, "build-translations/output"), False)
sw_path_es5 = os.path.join(repo_path, "build-es5/service_worker.js")
sw_path_latest = os.path.join(repo_path, "build/service_worker.js")
static_path = os.path.join(repo_path, 'hass_frontend')
frontend_es5_path = os.path.join(repo_path, 'build-es5')
frontend_latest_path = os.path.join(repo_path, 'build')
else:
import hass_frontend
import hass_frontend_es5
sw_path_es5 = os.path.join(hass_frontend_es5.where(),
"service_worker.js")
sw_path_latest = os.path.join(hass_frontend.where(),
"service_worker.js")
# /static points to dir with files that are JS-type agnostic.
# ES5 files are served from /frontend_es5.
# ES6 files are served from /frontend_latest.
static_path = hass_frontend.where()
frontend_es5_path = hass_frontend_es5.where()
frontend_latest_path = static_path
hass.http.register_static_path(
"/service_worker_es5.js", sw_path_es5, False)
hass.http.register_static_path(
"/service_worker.js", sw_path_latest, False)
hass.http.register_static_path(
"/robots.txt", os.path.join(static_path, "robots.txt"), not is_dev)
hass.http.register_static_path("/static", static_path, not is_dev)
hass.http.register_static_path(
"/frontend_latest", frontend_latest_path, not is_dev)
hass.http.register_static_path(
"/frontend_es5", frontend_es5_path, not is_dev)
local = hass.config.path('www')
if os.path.isdir(local):
hass.http.register_static_path("/local", local, not is_dev)
index_view = IndexView(repo_path, js_version)
hass.http.register_view(index_view)
@asyncio.coroutine
def finalize_panel(panel):
"""Finalize setup of a panel."""
yield from panel.async_finalize(hass, repo_path)
panel.async_register_index_routes(hass.http.app.router, index_view)
yield from asyncio.wait([
async_register_built_in_panel(hass, panel)
for panel in ('dev-event', 'dev-info', 'dev-service', 'dev-state',
'dev-template', 'dev-mqtt', 'kiosk')], loop=hass.loop)
hass.data[DATA_FINALIZE_PANEL] = finalize_panel
# Finalize registration of panels that registered before frontend was setup
# This includes the built-in panels from line above.
yield from asyncio.wait(
[finalize_panel(panel) for panel in hass.data[DATA_PANELS].values()],
loop=hass.loop)
if DATA_EXTRA_HTML_URL not in hass.data:
hass.data[DATA_EXTRA_HTML_URL] = set()
if DATA_EXTRA_HTML_URL_ES5 not in hass.data:
hass.data[DATA_EXTRA_HTML_URL_ES5] = set()
for url in conf.get(CONF_EXTRA_HTML_URL, []):
add_extra_html_url(hass, url, False)
for url in conf.get(CONF_EXTRA_HTML_URL_ES5, []):
add_extra_html_url(hass, url, True)
async_setup_themes(hass, conf.get(CONF_THEMES))
hass.http.register_view(TranslationsView)
return True
def async_setup_themes(hass, themes):
"""Set up themes data and services."""
hass.http.register_view(ThemesView)
hass.data[DATA_DEFAULT_THEME] = DEFAULT_THEME
if themes is None:
hass.data[DATA_THEMES] = {}
return
hass.data[DATA_THEMES] = themes
@callback
def update_theme_and_fire_event():
"""Update theme_color in manifest."""
name = hass.data[DATA_DEFAULT_THEME]
themes = hass.data[DATA_THEMES]
if name != DEFAULT_THEME and PRIMARY_COLOR in themes[name]:
MANIFEST_JSON['theme_color'] = themes[name][PRIMARY_COLOR]
else:
MANIFEST_JSON['theme_color'] = DEFAULT_THEME_COLOR
hass.bus.async_fire(EVENT_THEMES_UPDATED, {
'themes': themes,
'default_theme': name,
})
@callback
def set_theme(call):
"""Set backend-preferred theme."""
data = call.data
name = data[CONF_NAME]
if name == DEFAULT_THEME or name in hass.data[DATA_THEMES]:
_LOGGER.info("Theme %s set as default", name)
hass.data[DATA_DEFAULT_THEME] = name
update_theme_and_fire_event()
else:
_LOGGER.warning("Theme %s is not defined.", name)
@callback
def reload_themes(_):
"""Reload themes."""
path = find_config_file(hass.config.config_dir)
new_themes = load_yaml_config_file(path)[DOMAIN].get(CONF_THEMES, {})
hass.data[DATA_THEMES] = new_themes
if hass.data[DATA_DEFAULT_THEME] not in new_themes:
hass.data[DATA_DEFAULT_THEME] = DEFAULT_THEME
update_theme_and_fire_event()
hass.services.async_register(
DOMAIN, SERVICE_SET_THEME, set_theme, schema=SERVICE_SET_THEME_SCHEMA)
hass.services.async_register(DOMAIN, SERVICE_RELOAD_THEMES, reload_themes)
class IndexView(HomeAssistantView):
"""Serve the frontend."""
url = '/'
name = 'frontend:index'
requires_auth = False
extra_urls = ['/states', '/states/{extra}']
def __init__(self, repo_path, js_option):
"""Initialize the frontend view."""
self.repo_path = repo_path
self.js_option = js_option
self._template_cache = {}
def get_template(self, latest):
"""Get template."""
if self.repo_path is not None:
root = self.repo_path
elif latest:
import hass_frontend
root = hass_frontend.where()
else:
import hass_frontend_es5
root = hass_frontend_es5.where()
tpl = self._template_cache.get(root)
if tpl is None:
with open(os.path.join(root, 'index.html')) as file:
tpl = jinja2.Template(file.read())
# Cache template if not running from repository
if self.repo_path is None:
self._template_cache[root] = tpl
return tpl
@asyncio.coroutine
def get(self, request, extra=None):
"""Serve the index view."""
hass = request.app['hass']
latest = self.repo_path is not None or \
_is_latest(self.js_option, request)
if request.path == '/':
panel = 'states'
else:
panel = request.path.split('/')[1]
if panel == 'states':
panel_url = ''
elif latest:
panel_url = hass.data[DATA_PANELS][panel].webcomponent_url_latest
else:
panel_url = hass.data[DATA_PANELS][panel].webcomponent_url_es5
no_auth = '1'
if hass.config.api.api_password and not request[KEY_AUTHENTICATED]:
# do not try to auto connect on load
no_auth = '0'
template = yield from hass.async_add_job(self.get_template, latest)
extra_key = DATA_EXTRA_HTML_URL if latest else DATA_EXTRA_HTML_URL_ES5
resp = template.render(
no_auth=no_auth,
panel_url=panel_url,
panels=hass.data[DATA_PANELS],
theme_color=MANIFEST_JSON['theme_color'],
extra_urls=hass.data[extra_key],
)
return web.Response(text=resp, content_type='text/html')
class ManifestJSONView(HomeAssistantView):
"""View to return a manifest.json."""
requires_auth = False
url = '/manifest.json'
name = 'manifestjson'
@asyncio.coroutine
def get(self, request): # pylint: disable=no-self-use
"""Return the manifest.json."""
msg = json.dumps(MANIFEST_JSON, sort_keys=True)
return web.Response(text=msg, content_type="application/manifest+json")
class ThemesView(HomeAssistantView):
"""View to return defined themes."""
requires_auth = False
url = '/api/themes'
name = 'api:themes'
@callback
def get(self, request):
"""Return themes."""
hass = request.app['hass']
return self.json({
'themes': hass.data[DATA_THEMES],
'default_theme': hass.data[DATA_DEFAULT_THEME],
})
class TranslationsView(HomeAssistantView):
"""View to return backend defined translations."""
url = '/api/translations/{language}'
name = 'api:translations'
@asyncio.coroutine
def get(self, request, language):
"""Return translations."""
hass = request.app['hass']
resources = yield from async_get_translations(hass, language)
return self.json({
'resources': resources,
})
def _fingerprint(path):
"""Fingerprint a file."""
with open(path) as fil:
return hashlib.md5(fil.read().encode('utf-8')).hexdigest()
def _is_latest(js_option, request):
"""
Return whether we should serve latest untranspiled code.
Set according to user's preference and URL override.
"""
import hass_frontend
if request is None:
return js_option == 'latest'
# latest in query
if 'latest' in request.query or (
request.headers.get('Referer') and
'latest' in urlparse(request.headers['Referer']).query):
return True
# es5 in query
if 'es5' in request.query or (
request.headers.get('Referer') and
'es5' in urlparse(request.headers['Referer']).query):
return False
# non-auto option in config
if js_option != 'auto':
return js_option == 'latest'
useragent = request.headers.get('User-Agent')
return useragent and hass_frontend.version(useragent)
| 33.613333
| 79
| 0.649098
|
4a181d72f2abaa5e546422715e27cb362c030d30
| 42,587
|
py
|
Python
|
tensorflow/python/ops/linalg/linear_operator.py
|
where-is-brett/tensorflow
|
5da8599b2cf9edfb9fac4431c705501bf7ceccd8
|
[
"Apache-2.0"
] | 50
|
2020-03-15T01:04:36.000Z
|
2021-11-21T23:25:44.000Z
|
tensorflow/python/ops/linalg/linear_operator.py
|
where-is-brett/tensorflow
|
5da8599b2cf9edfb9fac4431c705501bf7ceccd8
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/python/ops/linalg/linear_operator.py
|
where-is-brett/tensorflow
|
5da8599b2cf9edfb9fac4431c705501bf7ceccd8
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
@six.add_metaclass(abc.ABCMeta)
class LinearOperator(module.Module):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`matmul(x, adjoint=False, name="matmul")` a subclass should implement
`_matmul(x, adjoint=False)`.
#### Performance contract
Subclasses should only implement the assert methods
(e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
time.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` subclasses should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some subclasses may not support batching.
Examples:
`x` is a batch matrix with compatible shape for `matmul` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_abs_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.matmul(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
# TODO(b/143910018) Remove graph_parents in V3.
@deprecation.deprecated_args(None, "Do not pass `graph_parents`. They will "
" no longer be used.", "graph_parents")
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
`solve` will have to be this type.
graph_parents: (Deprecated) Python list of graph prerequisites of this
`LinearOperator` Typically tensors that are passed during initialization
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
if is_non_singular:
if is_square is False:
raise ValueError("A non-singular matrix is always square.")
is_square = True
if is_self_adjoint:
if is_square is False:
raise ValueError("A self-adjoint matrix is always square.")
is_square = True
self._is_square_set_or_implied_by_hints = is_square
if graph_parents is not None:
self._set_graph_parents(graph_parents)
else:
self._graph_parents = []
self._dtype = dtypes.as_dtype(dtype).base_dtype if dtype else dtype
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
@contextlib.contextmanager
def _name_scope(self, name=None):
"""Helper function to standardize op scope."""
full_name = self.name
if name is not None:
full_name += "/" + name
with ops.name_scope(full_name) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
@deprecation.deprecated(None, "Do not call `graph_parents`.")
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
auto_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_or_implied_by_hints is False and auto_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_or_implied_by_hints is None:
return auto_square_check
return self._is_square_set_or_implied_by_hints
@abc.abstractmethod
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.shape`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
def _shape_tensor(self):
# This is not an abstractmethod, since we want derived classes to be able to
# override this with optional kwargs, which can reduce the number of
# `convert_to_tensor` calls. See derived classes for examples.
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
return linear_operator_util.shape_tensor(self.shape.as_list())
else:
return self._shape_tensor()
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self._batch_shape_tensor()
def _batch_shape_tensor(self, shape=None):
# `shape` may be passed in if this can be pre-computed in a
# more efficient manner, e.g. without excessive Tensor conversions.
if self.batch_shape.is_fully_defined():
return linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
shape = self.shape_tensor() if shape is None else shape
return shape[:-2]
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self._tensor_rank_tensor()
def _tensor_rank_tensor(self, shape=None):
# `shape` may be passed in if this can be pre-computed in a
# more efficient manner, e.g. without excessive Tensor conversions.
if self.tensor_rank is not None:
return ops.convert_to_tensor(self.tensor_rank)
else:
shape = self.shape_tensor() if shape is None else shape
return array_ops.size(shape)
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.rank is None:
return tensor_shape.Dimension(None)
else:
return self.shape.dims[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self._domain_dimension_tensor()
def _domain_dimension_tensor(self, shape=None):
# `shape` may be passed in if this can be pre-computed in a
# more efficient manner, e.g. without excessive Tensor conversions.
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
shape = self.shape_tensor() if shape is None else shape
return shape[-1]
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.dims:
return self.shape.dims[-2]
else:
return tensor_shape.Dimension(None)
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self._range_dimension_tensor()
def _range_dimension_tensor(self, shape=None):
# `shape` may be passed in if this can be pre-computed in a
# more efficient manner, e.g. without excessive Tensor conversions.
dim_value = tensor_shape.dimension_value(self.range_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
shape = self.shape_tensor() if shape is None else shape
return shape[-2]
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
def _max_condition_number_to_be_non_singular(self):
"""Return the maximum condition number that we consider nonsingular."""
with ops.name_scope("max_nonsingular_condition_number"):
dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
eps = math_ops.cast(
math_ops.reduce_max([
100.,
math_ops.cast(self.range_dimension_tensor(), self.dtype),
math_ops.cast(self.domain_dimension_tensor(), self.dtype)
]), self.dtype) * dtype_eps
return 1. / eps
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular.
This operator is considered non-singular if
```
ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
eps := np.finfo(self.dtype.as_numpy_dtype).eps
```
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is singular.
"""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
"""Default implementation of _assert_positive_definite."""
logging.warn(
"Using (possibly slow) default implementation of "
"assert_positive_definite."
" Requires conversion to a dense matrix and O(N^3) operations.")
# If the operator is self-adjoint, then checking that
# Cholesky decomposition succeeds + results in positive diag is necessary
# and sufficient.
if self.is_self_adjoint:
return check_ops.assert_positive(
array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
message="Matrix was not positive definite.")
# We have no generic check for positive definite.
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
dense = self.to_dense()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint.
Here we check that this operator is *exactly* equal to its hermitian
transpose.
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not self-adjoint.
"""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype.base_dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg))
@abc.abstractmethod
def _matmul(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_matmul is not implemented.")
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as
`self`. See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`.
"""
if isinstance(x, LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = x.adjoint() if adjoint_arg else x
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(left_operator, right_operator)
with self._name_scope(name):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
x.shape[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def __matmul__(self, other):
return self.matmul(other)
def _matvec(self, x, adjoint=False):
x_mat = array_ops.expand_dims(x, axis=-1)
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
`x` is treated as a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(x.shape[-1])
return self._matvec(x, adjoint=adjoint)
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self.to_dense())
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Solve by conversion to a dense matrix."""
if self.is_square is False: # pylint: disable=g-bool-id-comparison
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linalg_ops.cholesky_solve(
linalg_ops.cholesky(self.to_dense()), rhs)
return linear_operator_util.matrix_solve_with_broadcast(
self.to_dense(), rhs, adjoint=adjoint)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
`rhs` is treated like a [batch] matrix meaning for every set of leading
dimensions, the last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
if isinstance(rhs, LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = rhs.adjoint() if adjoint_arg else rhs
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `rhs` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.solve(left_operator, right_operator)
with self._name_scope(name):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.shape[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
"""Default implementation of _solvevec."""
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator.
`rhs` is treated like a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector. See class docstring
for definition of compatibility regarding batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1])
return self._solvevec(rhs, adjoint=adjoint)
def adjoint(self, name="adjoint"):
"""Returns the adjoint of the current `LinearOperator`.
Given `A` representing this `LinearOperator`, return `A*`.
Note that calling `self.adjoint()` and `self.H` are equivalent.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the adjoint of this `LinearOperator`.
"""
if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison
return self
with self._name_scope(name):
return linear_operator_algebra.adjoint(self)
# self.H is equivalent to self.adjoint().
H = property(adjoint, None)
def inverse(self, name="inverse"):
"""Returns the Inverse of this `LinearOperator`.
Given `A` representing this `LinearOperator`, return a `LinearOperator`
representing `A^-1`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`LinearOperator` representing inverse of this matrix.
Raises:
ValueError: When the `LinearOperator` is not hinted to be `non_singular`.
"""
if self.is_square is False: # pylint: disable=g-bool-id-comparison
raise ValueError("Cannot take the Inverse: This operator represents "
"a non square matrix.")
if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison
raise ValueError("Cannot take the Inverse: This operator represents "
"a singular matrix.")
with self._name_scope(name):
return linear_operator_algebra.inverse(self)
def cholesky(self, name="cholesky"):
"""Returns a Cholesky factor as a `LinearOperator`.
Given `A` representing this `LinearOperator`, if `A` is positive definite
self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
decomposition.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the lower triangular matrix
in the Cholesky decomposition.
Raises:
ValueError: When the `LinearOperator` is not hinted to be positive
definite and self adjoint.
"""
if not self._can_use_cholesky():
raise ValueError("Cannot take the Cholesky decomposition: "
"Not a positive definite self adjoint matrix.")
with self._name_scope(name):
return linear_operator_algebra.cholesky(self)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
n = dim_value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.matmul(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.linalg.diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _trace(self):
return math_ops.reduce_sum(self.diag_part(), axis=-1)
def trace(self, name="trace"):
"""Trace of the linear operator, equal to sum of `self.diag_part()`.
If the operator is square, this is also the sum of the eigenvalues.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._trace()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
def _eigvals(self):
return linalg_ops.self_adjoint_eigvals(self.to_dense())
def eigvals(self, name="eigvals"):
"""Returns the eigenvalues of this linear operator.
If the operator is marked as self-adjoint (via `is_self_adjoint`)
this computation can be more efficient.
Note: This currently only supports self-adjoint operators.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`.
"""
if not self.is_self_adjoint:
raise NotImplementedError("Only self-adjoint matrices are supported.")
with self._name_scope(name):
return self._eigvals()
def _cond(self):
if not self.is_self_adjoint:
# In general the condition number is the ratio of the
# absolute value of the largest and smallest singular values.
vals = linalg_ops.svd(self.to_dense(), compute_uv=False)
else:
# For self-adjoint matrices, and in general normal matrices,
# we can use eigenvalues.
vals = math_ops.abs(self._eigvals())
return (math_ops.reduce_max(vals, axis=-1) /
math_ops.reduce_min(vals, axis=-1))
def cond(self, name="cond"):
"""Returns the condition number of this linear operator.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._cond()
def _can_use_cholesky(self):
return self.is_self_adjoint and self.is_positive_definite
def _set_graph_parents(self, graph_parents):
"""Set self._graph_parents. Called during derived class init.
This method allows derived classes to set graph_parents, without triggering
a deprecation warning (which is invoked if `graph_parents` is passed during
`__init__`.
Args:
graph_parents: Iterable over Tensors.
"""
# TODO(b/143910018) Remove this function in V3.
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not (linear_operator_util.is_ref(t) or
tensor_util.is_tensor(t)):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._graph_parents = graph_parents
# Overrides for tf.linalg functions. This allows a LinearOperator to be used in
# place of a Tensor.
# For instance tf.trace(linop) and linop.trace() both work.
@dispatch.dispatch_for_types(linalg.adjoint, LinearOperator)
def _adjoint(matrix, name=None):
return matrix.adjoint(name)
@dispatch.dispatch_for_types(linalg.cholesky, LinearOperator)
def _cholesky(input, name=None): # pylint:disable=redefined-builtin
return input.cholesky(name)
# The signature has to match with the one in python/op/array_ops.py,
# so we have k, padding_value, and align even though we don't use them here.
# pylint:disable=unused-argument
@dispatch.dispatch_for_types(linalg.diag_part, LinearOperator)
def _diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0,
align="RIGHT_LEFT"):
return input.diag_part(name)
# pylint:enable=unused-argument
@dispatch.dispatch_for_types(linalg.det, LinearOperator)
def _det(input, name=None): # pylint:disable=redefined-builtin
return input.determinant(name)
@dispatch.dispatch_for_types(linalg.inv, LinearOperator)
def _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin
inv = input.inverse(name)
if adjoint:
inv = inv.adjoint()
return inv
@dispatch.dispatch_for_types(linalg.logdet, LinearOperator)
def _logdet(matrix, name=None):
if matrix.is_positive_definite and matrix.is_self_adjoint:
return matrix.log_abs_determinant(name)
raise ValueError("Expected matrix to be self-adjoint positive definite.")
@dispatch.dispatch_for_types(math_ops.matmul, LinearOperator)
def _matmul( # pylint:disable=missing-docstring
a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
if transpose_a or transpose_b:
raise ValueError("Transposing not supported at this time.")
if a_is_sparse or b_is_sparse:
raise ValueError("Sparse methods not supported at this time.")
if not isinstance(a, LinearOperator):
# We use the identity (B^HA^H)^H = AB
adjoint_matmul = b.matmul(
a,
adjoint=(not adjoint_b),
adjoint_arg=(not adjoint_a),
name=name)
return linalg.adjoint(adjoint_matmul)
return a.matmul(
b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name)
@dispatch.dispatch_for_types(linalg.solve, LinearOperator)
def _solve(
matrix,
rhs,
adjoint=False,
name=None):
if not isinstance(matrix, LinearOperator):
raise ValueError("Passing in `matrix` as a Tensor and `rhs` as a "
"LinearOperator is not supported.")
return matrix.solve(rhs, adjoint=adjoint, name=name)
@dispatch.dispatch_for_types(linalg.trace, LinearOperator)
def _trace(x, name=None):
return x.trace(name)
| 34.878788
| 99
| 0.676145
|
4a181d8cff9abd5eb694eb9200e9cf855b14f2cb
| 3,759
|
py
|
Python
|
tests/template_backends/test_dummy.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 7
|
2015-09-08T22:23:36.000Z
|
2022-03-08T09:24:40.000Z
|
tests/template_backends/test_dummy.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8
|
2017-04-19T16:20:47.000Z
|
2022-03-28T14:40:11.000Z
|
tests/template_backends/test_dummy.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3
|
2020-07-13T04:49:16.000Z
|
2021-12-22T21:15:14.000Z
|
import re
from django.forms import CharField, Form, Media
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CsrfViewMiddleware, _compare_masked_tokens as equivalent_tokens, get_token,
)
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.dummy import TemplateStrings
from django.test import SimpleTestCase
class TemplateStringsTests(SimpleTestCase):
engine_class = TemplateStrings
backend_name = 'dummy'
options = {}
@classmethod
def setUpClass(cls):
super().setUpClass()
params = {
'DIRS': [],
'APP_DIRS': True,
'NAME': cls.backend_name,
'OPTIONS': cls.options,
}
cls.engine = cls.engine_class(params)
def test_from_string(self):
template = self.engine.from_string("Hello!\n")
content = template.render()
self.assertEqual(content, "Hello!\n")
def test_get_template(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'world'})
self.assertEqual(content, "Hello world!\n")
def test_get_template_nonexistent(self):
with self.assertRaises(TemplateDoesNotExist) as e:
self.engine.get_template('template_backends/nonexistent.html')
self.assertEqual(e.exception.backend, self.engine)
def test_get_template_syntax_error(self):
# There's no way to trigger a syntax error with the dummy backend.
# The test still lives here to factor it between other backends.
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('template_backends/syntax_error.html')
def test_html_escaping(self):
template = self.engine.get_template('template_backends/hello.html')
context = {'name': '<script>alert("XSS!");</script>'}
content = template.render(context)
self.assertIn('<script>', content)
self.assertNotIn('<script>', content)
def test_django_html_escaping(self):
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
class TestForm(Form):
test_field = CharField()
media = Media(js=['my-script.js'])
form = TestForm()
template = self.engine.get_template('template_backends/django_escaping.html')
content = template.render({'media': media, 'test_form': form})
expected = '{}\n\n{}\n\n{}'.format(media, form, form['test_field'])
self.assertHTMLEqual(content, expected)
def test_csrf_token(self):
request = HttpRequest()
CsrfViewMiddleware(lambda req: HttpResponse()).process_view(request, lambda r: None, (), {})
template = self.engine.get_template('template_backends/csrf.html')
content = template.render(request=request)
expected = '<input type="hidden" name="csrfmiddlewaretoken" value="([^"]+)">'
match = re.match(expected, content) or re.match(expected.replace('"', "'"), content)
self.assertTrue(match, "hidden csrftoken field not found in output")
self.assertTrue(equivalent_tokens(match[1], get_token(request)))
def test_no_directory_traversal(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('../forbidden/template_backends/hello.html')
def test_non_ascii_characters(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'Jérôme'})
self.assertEqual(content, "Hello Jérôme!\n")
| 38.752577
| 100
| 0.672253
|
4a18210b40d2d8fa022753fbc56a8d7669eebbf7
| 7,576
|
py
|
Python
|
paddlehub/commands/run.py
|
Austendeng/PaddleHub
|
b363eaedaf77d21152920cce652c719278ec809d
|
[
"Apache-2.0"
] | null | null | null |
paddlehub/commands/run.py
|
Austendeng/PaddleHub
|
b363eaedaf77d21152920cce652c719278ec809d
|
[
"Apache-2.0"
] | null | null | null |
paddlehub/commands/run.py
|
Austendeng/PaddleHub
|
b363eaedaf77d21152920cce652c719278ec809d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import six
from paddlehub.commands.base_command import BaseCommand, ENTRY
from paddlehub.io.parser import yaml_parser, txt_parser
from paddlehub.module.manager import default_module_manager
from paddlehub.common import utils
from paddlehub.common.arg_helper import add_argument, print_arguments
import paddlehub as hub
class RunCommand(BaseCommand):
name = "run"
def __init__(self, name):
super(RunCommand, self).__init__(name)
self.show_in_help = True
self.description = "Run the specific module."
self.parser = self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__,
prog='%s %s <module>' % (ENTRY, name),
usage='%(prog)s',
add_help=False)
def parse_args_with_module(self, module, argv):
module_type = module.type.lower()
# yapf: disable
if module_type.startswith("cv"):
self.add_arg('--config', str, None, "config file in yaml format" )
self.add_arg('--signature', str, None, "signature to run" )
self.add_arg('--input_path', str, None, "path of image to predict" )
self.add_arg('--input_file', str, None, "file contain paths of images" )
self.args = self.parser.parse_args(argv)
self.args.data = self.args.input_path
self.args.dataset = self.args.input_file
elif module_type.startswith("nlp"):
self.add_arg('--config', str, None, "config file in yaml format" )
self.add_arg('--signature', str, None, "signature to run" )
self.add_arg('--input_text', str, None, "text to predict" )
self.add_arg('--input_file', str, None, "file contain texts" )
self.args = self.parser.parse_args(argv)
self.args.data = self.args.input_text
self.args.dataset = self.args.input_file
# yapf: enable
def demo_with_module(self, module):
module_type = module.type.lower()
entry = hub.commands.base_command.ENTRY
if module_type.startswith("cv"):
demo = "%s %s %s --input_path <IMAGE_PATH>" % (entry, self.name,
module.name)
elif module_type.startswith("nlp"):
demo = "%s %s %s --input_text \"TEXT_TO_PREDICT\"" % (
entry, self.name, module.name)
else:
demo = "%s %s %s" % (entry, self.name, module.name)
return demo
def execute(self, argv):
if not argv:
print("ERROR: Please specify a module name.\n")
self.help()
return False
module_name = argv[0]
module_dir = default_module_manager.search_module(module_name)
if not module_dir:
if os.path.exists(module_name):
module_dir = module_name
else:
print("Install Module %s" % module_name)
result, tips, module_dir = default_module_manager.install_module(
module_name)
print(tips)
if not result:
return False
try:
module = hub.Module(module_dir=module_dir)
except:
print(
"ERROR! %s is a model. The command run is only for the module type but not the model type."
% module_name)
sys.exit(0)
self.parse_args_with_module(module, argv[1:])
if not module.default_signature:
print("ERROR! Module %s is not able to predict." % module_name)
return False
if not self.args.signature:
self.args.signature = module.default_signature.name
# module processor check
module.check_processor()
expect_data_format = module.processor.data_format(self.args.signature)
# get data dict
if self.args.data:
input_data_key = list(expect_data_format.keys())[0]
origin_data = {input_data_key: [self.args.data]}
elif self.args.dataset:
input_data_key = list(expect_data_format.keys())[0]
origin_data = {input_data_key: txt_parser.parse(self.args.dataset)}
else:
print("ERROR! Please specify data to predict.\n")
print("Summary:\n %s\n" % module.summary)
print("Example:\n %s" % self.demo_with_module(module))
return False
# data_format check
if not self.args.config:
if len(expect_data_format) != 1:
raise RuntimeError(
"Module requires %d inputs, please use config file to specify mappings for data and inputs."
% len(expect_data_format))
origin_data_key = list(origin_data.keys())[0]
input_data_key = list(expect_data_format.keys())[0]
input_data = {input_data_key: origin_data[origin_data_key]}
config = {}
else:
yaml_config = yaml_parser.parse(self.args.config)
if len(expect_data_format) == 1:
origin_data_key = list(origin_data.keys())[0]
input_data_key = list(expect_data_format.keys())[0]
input_data = {input_data_key: origin_data[origin_data_key]}
else:
input_data_format = yaml_config['input_data']
if len(input_data_format) != len(expect_data_format):
raise ValueError(
"Module requires %d inputs, but the input file gives %d."
% (len(expect_data_format), len(input_data_format)))
for key, value in expect_data_format.items():
if key not in input_data_format:
raise KeyError(
"Input file gives an unexpected input %s" % key)
if value['type'] != hub.DataType.type(
input_data_format[key]['type']):
raise TypeError(
"Module expect Type %s for %s, but the input file gives %s"
% (value['type'], key,
hub.DataType.type(
input_data_format[key]['type'])))
input_data = {}
for key, value in yaml_config['input_data'].items():
input_data[key] = origin_data[value['key']]
config = yaml_config.get("config", {})
# run module with data
results = module(
sign_name=self.args.signature, data=input_data, **config)
if six.PY2:
print(repr(results).decode('string_escape'))
else:
print(results)
command = RunCommand.instance()
| 41.856354
| 112
| 0.584609
|
4a1821ae3afa4c2c5386e744fa2ee34ccad0b742
| 8,641
|
py
|
Python
|
tests/unit/test_pools.py
|
AndreyKlychnikov/aiovk
|
c435aa97725f8456634aaf302cc688fddbc64b71
|
[
"MIT"
] | 2
|
2020-10-05T18:14:48.000Z
|
2020-10-11T10:35:58.000Z
|
tests/unit/test_pools.py
|
AndreyKlychnikov/aiovk
|
c435aa97725f8456634aaf302cc688fddbc64b71
|
[
"MIT"
] | null | null | null |
tests/unit/test_pools.py
|
AndreyKlychnikov/aiovk
|
c435aa97725f8456634aaf302cc688fddbc64b71
|
[
"MIT"
] | null | null | null |
import os
from unittest import IsolatedAsyncioTestCase
from dotenv import load_dotenv
from aiovk.pools import AsyncResult, AsyncVkExecuteRequestPool
load_dotenv(
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), ".env")
)
token1 = os.getenv('TEST_TOKEN_1')
token2 = os.getenv('TEST_TOKEN_2')
class ExecutePoolTestCase(IsolatedAsyncioTestCase):
async def test_one_call_per_request(self):
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call('users.get', token1, {'user_ids': 1})
self.assertIsInstance(result, AsyncResult)
self.assertIsNotNone(result.result)
self.assertEqual(1, result.result[0]['id'])
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call('users.get', token1, {'user_ids': 1})
self.assertIsInstance(result, AsyncResult)
result2 = pool.call('users.get', token2, {'user_ids': 2})
self.assertIsInstance(result2, AsyncResult)
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(1, result.result[0]['id'])
self.assertTrue(result2.ok)
self.assertIsNotNone(result2.result)
self.assertEqual(2, result2.result[0]['id'])
async def test_less_or_equal_than_25_calls_per_token(self):
users = []
async with AsyncVkExecuteRequestPool() as pool:
for i in range(1, 2):
result = pool.call('users.get', token1, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i, result in enumerate(users, start=1):
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(i, result.result[0]['id'])
users = []
async with AsyncVkExecuteRequestPool() as pool:
for i in range(1, 25):
result = pool.call('users.get', token1, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i, result in enumerate(users, start=1):
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(i, result.result[0]['id'])
users = []
async with AsyncVkExecuteRequestPool() as pool:
for i in range(1, 26):
result = pool.call('users.get', token1, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i in range(26, 51):
result = pool.call('users.get', token2, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i, result in enumerate(users, start=1):
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(i, result.result[0]['id'])
async def test_greater_than_25_calls_per_token(self):
users = []
async with AsyncVkExecuteRequestPool() as pool:
for i in range(1, 26):
result = pool.call('users.get', token1, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i, result in enumerate(users, start=1):
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(i, result.result[0]['id'])
users = []
async with AsyncVkExecuteRequestPool() as pool:
for i in range(1, 50):
result = pool.call('users.get', token1, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i, result in enumerate(users, start=1):
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(i, result.result[0]['id'])
users = []
async with AsyncVkExecuteRequestPool() as pool:
for i in range(1, 51):
result = pool.call('users.get', token1, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i in range(51, 99):
result = pool.call('users.get', token2, {'user_ids': i})
users.append(result)
self.assertIsInstance(result, AsyncResult)
for i, result in enumerate(users, start=1):
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(i, result.result[0]['id'])
async def test_error_requests(self):
async with AsyncVkExecuteRequestPool() as pool:
error_result = pool.call('users.get', token1, {'user_ids': -1})
self.assertIsInstance(error_result, AsyncResult)
self.assertFalse(error_result.ok)
self.assertIsNone(error_result.result)
self.assertIsNotNone(error_result.error)
self.assertDictEqual({
'method': 'users.get', 'error_code': 113, 'error_msg': 'Invalid user id'
}, error_result.error)
async with AsyncVkExecuteRequestPool() as pool:
error_result = pool.call('users.get', token1, {'user_ids': -1})
success_result = pool.call('users.get', token2, {'user_ids': 1})
self.assertFalse(error_result.ok)
self.assertIsNone(error_result.result)
self.assertIsNotNone(error_result.error)
self.assertDictEqual({
'method': 'users.get', 'error_code': 113, 'error_msg': 'Invalid user id'
}, error_result.error)
self.assertTrue(success_result.ok)
self.assertIsNotNone(success_result.result)
self.assertEqual(1, success_result.result[0]['id'])
async def test_request_without_values(self):
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call('users.get', token1)
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
async def test_false_cast_response(self):
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call(
"groups.isMember",
token1,
{"user_id": 1, "group_id": 1},
)
self.assertTrue(result.ok)
self.assertIsNotNone(result.result)
self.assertEqual(0, result.result)
async def test_equal_requests(self):
"""Тестирование того, что одинаковые запросы для одного токена будут выполняться только один раз"""
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call(
"groups.isMember",
token1,
{"user_id": 1, "group_id": 1},
)
result2 = pool.call(
"groups.isMember",
token1,
{"user_id": 1, "group_id": 1},
)
result3 = pool.call(
"groups.isMember",
token1,
{"user_id": 1, "group_id": 1},
)
self.assertEqual(1, len(pool.pool[token1]))
self.assertIs(result, result2)
self.assertIs(result, result3)
async def test_invalid_token(self):
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call(
"groups.isMember",
'invalid_token',
{"user_id": 1, "group_id": 1},
)
self.assertEqual(5, result.error["error_code"])
self.assertEqual("groups.isMember", result.error["method"])
async def test_invalid_call(self):
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call(
"groups.isMember",
token1,
{"user_id": -1, "group_id": 1},
)
self.assertEqual(100, result.error['error_code'])
async def test_invalid_token_type(self):
"""Вызов метода, который доступен только с токеном пользователя, с токеном группы"""
async with AsyncVkExecuteRequestPool() as pool:
result = pool.call(
"likes.isLiked",
token1,
{
"user_id": 1,
"owner_id": -1,
"type": "post",
"item_id": 396449,
},
)
self.assertIsNone(result.result)
self.assertIsNotNone(result.error)
self.assertEqual(27, result.error['error_code'])
self.assertEqual('likes.isLiked', result.error['method'])
| 38.748879
| 107
| 0.582919
|
4a1822ccb9408b9267b00a5ef99fc3c977bda769
| 2,038
|
py
|
Python
|
piccolo/query/methods/create_index.py
|
0scarB/piccolo
|
27539219431874bae99b7206df48133fbe1a27eb
|
[
"MIT"
] | 750
|
2019-01-03T16:02:48.000Z
|
2022-03-30T19:53:03.000Z
|
piccolo/query/methods/create_index.py
|
0scarB/piccolo
|
27539219431874bae99b7206df48133fbe1a27eb
|
[
"MIT"
] | 311
|
2019-01-14T13:07:13.000Z
|
2022-03-31T07:43:08.000Z
|
piccolo/query/methods/create_index.py
|
0scarB/piccolo
|
27539219431874bae99b7206df48133fbe1a27eb
|
[
"MIT"
] | 48
|
2020-12-18T08:13:50.000Z
|
2022-03-24T03:18:06.000Z
|
from __future__ import annotations
import typing as t
from piccolo.columns import Column
from piccolo.columns.indexes import IndexMethod
from piccolo.query.base import DDL
if t.TYPE_CHECKING: # pragma: no cover
from piccolo.table import Table
class CreateIndex(DDL):
def __init__(
self,
table: t.Type[Table],
columns: t.List[t.Union[Column, str]],
method: IndexMethod = IndexMethod.btree,
if_not_exists: bool = False,
**kwargs,
):
self.columns = columns
self.method = method
self.if_not_exists = if_not_exists
super().__init__(table, **kwargs)
@property
def column_names(self) -> t.List[str]:
return [
i._meta.db_column_name if isinstance(i, Column) else i
for i in self.columns
]
@property
def prefix(self) -> str:
prefix = "CREATE INDEX"
if self.if_not_exists:
prefix += " IF NOT EXISTS"
return prefix
@property
def postgres_ddl(self) -> t.Sequence[str]:
column_names = self.column_names
index_name = self.table._get_index_name(column_names)
tablename = self.table._meta.tablename
method_name = self.method.value
column_names_str = ", ".join(column_names)
return [
(
f"{self.prefix} {index_name} ON {tablename} USING "
f"{method_name} ({column_names_str})"
)
]
@property
def sqlite_ddl(self) -> t.Sequence[str]:
column_names = self.column_names
index_name = self.table._get_index_name(column_names)
tablename = self.table._meta.tablename
method_name = self.method.value
if method_name != "btree":
raise ValueError("SQLite only support btree indexes.")
column_names_str = ", ".join(column_names)
return [
(
f"{self.prefix} {index_name} ON {tablename} "
f"({column_names_str})"
)
]
| 28.305556
| 67
| 0.59421
|
4a1822da3a4920f0f64ae8d34a077c15dfe5474c
| 11,925
|
py
|
Python
|
rllib/evaluation/tests/test_trajectory_view_api.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:02:58.000Z
|
2021-04-08T12:02:58.000Z
|
rllib/evaluation/tests/test_trajectory_view_api.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
rllib/evaluation/tests/test_trajectory_view_api.py
|
carlos-aguayo/ray
|
fedbdd5dc6a47aa9cba170816f8c0950193b4fd6
|
[
"Apache-2.0"
] | null | null | null |
import copy
from gym.spaces import Box, Discrete
import time
import unittest
import ray
import ray.rllib.agents.ppo as ppo
from ray.rllib.examples.env.debug_counter_env import MultiAgentDebugCounterEnv
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.examples.policy.episode_env_aware_policy import \
EpisodeEnvAwarePolicy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import framework_iterator
class TestTrajectoryViewAPI(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_traj_view_normal_case(self):
"""Tests, whether Model and Policy return the correct ViewRequirements.
"""
config = ppo.DEFAULT_CONFIG.copy()
for _ in framework_iterator(config, frameworks="torch"):
trainer = ppo.PPOTrainer(config, env="CartPole-v0")
policy = trainer.get_policy()
view_req_model = policy.model.inference_view_requirements
view_req_policy = policy.training_view_requirements
assert len(view_req_model) == 1
assert len(view_req_policy) == 10
for key in [
SampleBatch.OBS, SampleBatch.ACTIONS, SampleBatch.REWARDS,
SampleBatch.DONES, SampleBatch.NEXT_OBS,
SampleBatch.VF_PREDS, "advantages", "value_targets",
SampleBatch.ACTION_DIST_INPUTS, SampleBatch.ACTION_LOGP
]:
assert key in view_req_policy
# None of the view cols has a special underlying data_col,
# except next-obs.
if key != SampleBatch.NEXT_OBS:
assert view_req_policy[key].data_col is None
else:
assert view_req_policy[key].data_col == SampleBatch.OBS
assert view_req_policy[key].shift == 1
trainer.stop()
def test_traj_view_lstm_prev_actions_and_rewards(self):
"""Tests, whether Policy/Model return correct LSTM ViewRequirements.
"""
config = ppo.DEFAULT_CONFIG.copy()
config["model"] = config["model"].copy()
# Activate LSTM + prev-action + rewards.
config["model"]["use_lstm"] = True
config["model"]["lstm_use_prev_action_reward"] = True
for _ in framework_iterator(config, frameworks="torch"):
trainer = ppo.PPOTrainer(config, env="CartPole-v0")
policy = trainer.get_policy()
view_req_model = policy.model.inference_view_requirements
view_req_policy = policy.training_view_requirements
assert len(view_req_model) == 7 # obs, prev_a, prev_r, 4xstates
assert len(view_req_policy) == 16
for key in [
SampleBatch.OBS, SampleBatch.ACTIONS, SampleBatch.REWARDS,
SampleBatch.DONES, SampleBatch.NEXT_OBS,
SampleBatch.VF_PREDS, SampleBatch.PREV_ACTIONS,
SampleBatch.PREV_REWARDS, "advantages", "value_targets",
SampleBatch.ACTION_DIST_INPUTS, SampleBatch.ACTION_LOGP
]:
assert key in view_req_policy
if key == SampleBatch.PREV_ACTIONS:
assert view_req_policy[key].data_col == SampleBatch.ACTIONS
assert view_req_policy[key].shift == -1
elif key == SampleBatch.PREV_REWARDS:
assert view_req_policy[key].data_col == SampleBatch.REWARDS
assert view_req_policy[key].shift == -1
elif key not in [
SampleBatch.NEXT_OBS, SampleBatch.PREV_ACTIONS,
SampleBatch.PREV_REWARDS
]:
assert view_req_policy[key].data_col is None
else:
assert view_req_policy[key].data_col == SampleBatch.OBS
assert view_req_policy[key].shift == 1
trainer.stop()
def test_traj_view_lstm_performance(self):
"""Test whether PPOTrainer runs faster w/ `_use_trajectory_view_api`.
"""
config = copy.deepcopy(ppo.DEFAULT_CONFIG)
action_space = Discrete(2)
obs_space = Box(-1.0, 1.0, shape=(700, ))
from ray.rllib.examples.env.random_env import RandomMultiAgentEnv
from ray.tune import register_env
register_env("ma_env", lambda c: RandomMultiAgentEnv({
"num_agents": 2,
"p_done": 0.01,
"action_space": action_space,
"observation_space": obs_space
}))
config["num_workers"] = 3
config["num_envs_per_worker"] = 8
config["num_sgd_iter"] = 6
config["model"]["use_lstm"] = True
config["model"]["lstm_use_prev_action_reward"] = True
config["model"]["max_seq_len"] = 100
policies = {
"pol0": (None, obs_space, action_space, {}),
}
def policy_fn(agent_id):
return "pol0"
config["multiagent"] = {
"policies": policies,
"policy_mapping_fn": policy_fn,
}
num_iterations = 1
# Only works in torch so far.
for _ in framework_iterator(config, frameworks="torch"):
print("w/ traj. view API (and time-major)")
config["_use_trajectory_view_api"] = True
config["model"]["_time_major"] = True
trainer = ppo.PPOTrainer(config=config, env="ma_env")
learn_time_w = 0.0
sampler_perf = {}
start = time.time()
for i in range(num_iterations):
out = trainer.train()
sampler_perf_ = out["sampler_perf"]
sampler_perf = {
k: sampler_perf.get(k, 0.0) + sampler_perf_[k]
for k, v in sampler_perf_.items()
}
delta = out["timers"]["learn_time_ms"] / 1000
learn_time_w += delta
print("{}={}s".format(i, delta))
sampler_perf = {
k: sampler_perf[k] / (num_iterations if "mean_" in k else 1)
for k, v in sampler_perf.items()
}
duration_w = time.time() - start
print("Duration: {}s "
"sampler-perf.={} learn-time/iter={}s".format(
duration_w, sampler_perf, learn_time_w / num_iterations))
trainer.stop()
print("w/o traj. view API (and w/o time-major)")
config["_use_trajectory_view_api"] = False
config["model"]["_time_major"] = False
trainer = ppo.PPOTrainer(config=config, env="ma_env")
learn_time_wo = 0.0
sampler_perf = {}
start = time.time()
for i in range(num_iterations):
out = trainer.train()
sampler_perf_ = out["sampler_perf"]
sampler_perf = {
k: sampler_perf.get(k, 0.0) + sampler_perf_[k]
for k, v in sampler_perf_.items()
}
delta = out["timers"]["learn_time_ms"] / 1000
learn_time_wo += delta
print("{}={}s".format(i, delta))
sampler_perf = {
k: sampler_perf[k] / (num_iterations if "mean_" in k else 1)
for k, v in sampler_perf.items()
}
duration_wo = time.time() - start
print("Duration: {}s "
"sampler-perf.={} learn-time/iter={}s".format(
duration_wo, sampler_perf,
learn_time_wo / num_iterations))
trainer.stop()
# Assert `_use_trajectory_view_api` is much faster.
self.assertLess(duration_w, duration_wo)
self.assertLess(learn_time_w, learn_time_wo * 0.6)
def test_traj_view_lstm_functionality(self):
action_space = Box(-float("inf"), float("inf"), shape=(2, ))
obs_space = Box(float("-inf"), float("inf"), (4, ))
max_seq_len = 50
policies = {
"pol0": (EpisodeEnvAwarePolicy, obs_space, action_space, {}),
}
def policy_fn(agent_id):
return "pol0"
rollout_worker = RolloutWorker(
env_creator=lambda _: MultiAgentDebugCounterEnv({"num_agents": 4}),
policy_config={
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_fn,
},
"_use_trajectory_view_api": True,
"model": {
"use_lstm": True,
"_time_major": True,
"max_seq_len": max_seq_len,
},
},
policy=policies,
policy_mapping_fn=policy_fn,
num_envs=1,
)
for i in range(100):
pc = rollout_worker.sampler.sample_collector. \
policy_sample_collectors["pol0"]
sample_batch_offset_before = pc.sample_batch_offset
buffers = pc.buffers
result = rollout_worker.sample()
pol_batch = result.policy_batches["pol0"]
self.assertTrue(result.count == 100)
self.assertTrue(pol_batch.count >= 100)
self.assertFalse(0 in pol_batch.seq_lens)
# Check prev_reward/action, next_obs consistency.
for t in range(max_seq_len):
obs_t = pol_batch["obs"][t]
r_t = pol_batch["rewards"][t]
if t > 0:
next_obs_t_m_1 = pol_batch["new_obs"][t - 1]
self.assertTrue((obs_t == next_obs_t_m_1).all())
if t < max_seq_len - 1:
prev_rewards_t_p_1 = pol_batch["prev_rewards"][t + 1]
self.assertTrue((r_t == prev_rewards_t_p_1).all())
# Check the sanity of all the buffers in the un underlying
# PerPolicy collector.
for sample_batch_slot, agent_slot in enumerate(
range(sample_batch_offset_before, pc.sample_batch_offset)):
t_buf = buffers["t"][:, agent_slot]
obs_buf = buffers["obs"][:, agent_slot]
# Skip empty seqs at end (these won't be part of the batch
# and have been copied to new agent-slots (even if seq-len=0)).
if sample_batch_slot < len(pol_batch.seq_lens):
seq_len = pol_batch.seq_lens[sample_batch_slot]
# Make sure timesteps are always increasing within the seq.
assert all(t_buf[1] + j == n + 1
for j, n in enumerate(t_buf)
if j < seq_len and j != 0)
# Make sure all obs within seq are non-0.0.
assert all(
any(obs_buf[j] != 0.0) for j in range(1, seq_len + 1))
# Check seq-lens.
for agent_slot, seq_len in enumerate(pol_batch.seq_lens):
if seq_len < max_seq_len - 1:
# At least in the beginning, the next slots should always
# be empty (once all agent slots have been used once, these
# may be filled with "old" values (from longer sequences)).
if i < 10:
self.assertTrue(
(pol_batch["obs"][seq_len +
1][agent_slot] == 0.0).all())
print(end="")
self.assertFalse(
(pol_batch["obs"][seq_len][agent_slot] == 0.0).all())
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 42.895683
| 79
| 0.548763
|
4a182441a318dfc3a49ce4422e5ab0aaf5976da3
| 2,107
|
py
|
Python
|
command/avatar.py
|
andrewnijmeh/utilitybot
|
9d5171479949240d633e1fe566e3cf8e4f34e693
|
[
"MIT"
] | 1
|
2021-09-24T22:48:33.000Z
|
2021-09-24T22:48:33.000Z
|
command/avatar.py
|
FFlop/utilitybot
|
9d5171479949240d633e1fe566e3cf8e4f34e693
|
[
"MIT"
] | null | null | null |
command/avatar.py
|
FFlop/utilitybot
|
9d5171479949240d633e1fe566e3cf8e4f34e693
|
[
"MIT"
] | null | null | null |
"""MIT License
Copyright (c) 2020 utilitybot.co
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import discord
from discord.ext import commands
class Avatar(commands.Cog):
def __init__(self, bot):
self.bot_check
@commands.command(alias=["av"])
async def avatar(self, ctx, *, user: discord.Member):
if not user:
user = ctx.author
member = None
if ctx.guild:
member = ctx.guild.get_member(user.id)
await ctx.send(embed=discord.Embed(
color=member.color if member else ctx.author.color
).set_image(
url=str(user.avatar_url_as(static_format='png', size=2048))
))
if user is None:
await ctx.send(embed=discord.Embed(
color=member.color if member else ctx.author.color.set_image(
url=str(user.avatar_url_as(static_format='png', size=2048))
)
))
def setup(bot):
bot.add_cog(Avatar(bot))
bot.logging.info(f'Loaded avatar command!')
| 36.327586
| 79
| 0.672995
|
4a18258ee163042c8ec7e45c3f381798de5da124
| 21,394
|
py
|
Python
|
abps/agents/dqn/dqn_agent.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 7
|
2020-03-15T12:14:07.000Z
|
2021-12-01T07:01:09.000Z
|
abps/agents/dqn/dqn_agent.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 25
|
2020-07-25T08:53:09.000Z
|
2022-03-12T00:43:02.000Z
|
abps/agents/dqn/dqn_agent.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A DQN Agent.
Implements the DQN algorithm from
"Human level control through deep reinforcement learning"
Mnih et al., 2015
https://deepmind.com/research/dqn/
"""
import collections
from abps import tf_agent
import gin
import tensorflow.compat.v2 as tf
from tf_agents.policies import boltzmann_policy
from tf_agents.policies import epsilon_greedy_policy
from tf_agents.policies import greedy_policy
from tf_agents.policies import q_policy
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
from tf_agents.utils import value_ops
class DqnLossInfo(
collections.namedtuple('DqnLossInfo', ('td_loss', 'td_error'))):
"""DqnLossInfo is stored in the `extras` field of the LossInfo instance.
Both `td_loss` and `td_error` have a validity mask applied to ensure that
no loss or error is calculated for episode boundaries.
td_loss: The **weighted** TD loss (depends on choice of loss metric and
any weights passed to the DQN loss function.
td_error: The **unweighted** TD errors, which are just calculated as:
```
td_error = td_targets - q_values
```
These can be used to update Prioritized Replay Buffer priorities.
Note that, unlike `td_loss`, `td_error` may contain a time dimension when
training with RNN mode. For `td_loss`, this axis is averaged out.
"""
pass
# this file. Move them to utils/common or utils/losses.
def element_wise_squared_loss(x, y):
return tf.compat.v1.losses.mean_squared_error(
x, y, reduction=tf.compat.v1.losses.Reduction.NONE)
def element_wise_huber_loss(x, y):
return tf.compat.v1.losses.huber_loss(
x, y, reduction=tf.compat.v1.losses.Reduction.NONE)
def compute_td_targets(next_q_values, rewards, discounts):
return tf.stop_gradient(rewards + discounts * next_q_values)
@gin.configurable
class DqnAgent(tf_agent.TFAgent):
"""A DQN Agent.
Implements the DQN algorithm from
"Human level control through deep reinforcement learning"
Mnih et al., 2015
https://deepmind.com/research/dqn/
This agent also implements n-step updates. See "Rainbow: Combining
Improvements in Deep Reinforcement Learning" by Hessel et al., 2017, for a
discussion on its benefits: https://arxiv.org/abs/1710.02298
"""
def __init__(
self,
time_step_spec,
action_spec,
q_network,
optimizer,
epsilon_greedy=0.1,
n_step_update=1,
boltzmann_temperature=None,
emit_log_probability=False,
update_period=None,
# Params for target network updates
target_update_tau=1.0,
target_update_period=1,
# Params for training.
td_errors_loss_fn=None,
gamma=1.0,
reward_scale_factor=1.0,
gradient_clipping=None,
# Params for debugging
debug_summaries=False,
enable_functions=True,
summarize_grads_and_vars=False,
train_step_counter=None,
name=None):
"""Creates a DQN Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
q_network: A tf_agents.network.Network to be used by the agent. The
network will be called with call(observation, step_type).
optimizer: The optimizer to use for training.
epsilon_greedy: probability of choosing a random action in the default
epsilon-greedy collect policy (used only if a wrapper is not provided to
the collect_policy method).
n_step_update: The number of steps to consider when computing TD error and
TD loss. Defaults to single-step updates. Note that this requires the
user to call train on Trajectory objects with a time dimension of
`n_step_update + 1`. However, note that we do not yet support
`n_step_update > 1` in the case of RNNs (i.e., non-empty
`q_network.state_spec`).
boltzmann_temperature: Temperature value to use for Boltzmann sampling of
the actions during data collection. The closer to 0.0, the higher the
probability of choosing the best action.
emit_log_probability: Whether policies emit log probabilities or not.
update_period: Update period.
target_update_tau: Factor for soft update of the target networks.
target_update_period: Period for soft update of the target networks.
td_errors_loss_fn: A function for computing the TD errors loss. If None, a
default value of element_wise_huber_loss is used. This function takes as
input the target and the estimated Q values and returns the loss for
each element of the batch.
gamma: A discount factor for future rewards.
reward_scale_factor: Multiplicative scale for the reward.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
enable_functions: A bool to decide whether or not to enable tf function
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
name: The name of this agent. All variables in this module will fall under
that name. Defaults to the class name.
Raises:
ValueError: If the action spec contains more than one action or action
spec minimum is not equal to 0.
NotImplementedError: If `q_network` has non-empty `state_spec` (i.e., an
RNN is provided) and `n_step_update > 1`.
"""
tf.Module.__init__(self, name=name)
flat_action_spec = tf.nest.flatten(action_spec)
self._num_actions = [
spec.maximum - spec.minimum + 1 for spec in flat_action_spec
]
if len(flat_action_spec) > 1 or flat_action_spec[0].shape.ndims > 1:
raise ValueError('Only one dimensional actions are supported now.')
if not all(spec.minimum == 0 for spec in flat_action_spec):
raise ValueError(
'Action specs should have minimum of 0, but saw: {0}'.format(
[spec.minimum for spec in flat_action_spec]))
if epsilon_greedy is not None and boltzmann_temperature is not None:
raise ValueError(
'Configured both epsilon_greedy value {} and temperature {}, '
'however only one of them can be used for exploration.'.format(
epsilon_greedy, boltzmann_temperature))
self._q_network = q_network
self._target_q_network = self._q_network.copy(name='TargetQNetwork')
self._epsilon_greedy = epsilon_greedy
self._n_step_update = n_step_update
self._boltzmann_temperature = boltzmann_temperature
self._optimizer = optimizer
self._td_errors_loss_fn = td_errors_loss_fn or element_wise_huber_loss
self._gamma = gamma
self._reward_scale_factor = reward_scale_factor
self._gradient_clipping = gradient_clipping
self._update_target = self._get_target_updater(target_update_tau,
target_update_period)
policy = q_policy.QPolicy(
time_step_spec,
action_spec,
q_network=self._q_network,
emit_log_probability=emit_log_probability)
if boltzmann_temperature is not None:
collect_policy = boltzmann_policy.BoltzmannPolicy(
policy, temperature=self._boltzmann_temperature)
else:
collect_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
policy, epsilon=self._epsilon_greedy)
policy = greedy_policy.GreedyPolicy(policy)
if q_network.state_spec and n_step_update != 1:
raise NotImplementedError(
'DqnAgent does not currently support n-step updates with stateful '
'networks (i.e., RNNs), but n_step_update = {}'.format(n_step_update))
train_sequence_length = (
n_step_update + 1 if not q_network.state_spec else None)
super(DqnAgent, self).__init__(
time_step_spec,
action_spec,
policy,
collect_policy,
train_sequence_length=train_sequence_length,
update_period=update_period,
debug_summaries=debug_summaries,
enable_functions=enable_functions,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter)
tf.compat.v1.summary.scalar(
'epsilon/' + self.name,
self._epsilon_greedy,
collections=['train_' + self.name])
def _initialize_v1(self):
self._q_network.create_variables()
if self._target_q_network:
self._target_q_network.create_variables()
return common.soft_variables_update(
self._q_network.variables, self._target_q_network.variables, tau=1.0)
def _initialize(self):
common.soft_variables_update(
self._q_network.variables, self._target_q_network.variables, tau=1.0)
def _get_target_updater(self, tau=1.0, period=1):
"""Performs a soft update of the target network parameters.
For each weight w_s in the q network, and its corresponding
weight w_t in the target_q_network, a soft update is:
w_t = (1 - tau) * w_t + tau * w_s
Args:
tau: A float scalar in [0, 1]. Default `tau=1.0` means hard update.
period: Step interval at which the target network is updated.
Returns:
A callable that performs a soft update of the target network parameters.
"""
with tf.name_scope('update_targets'):
def update():
return common.soft_variables_update(self._q_network.variables,
self._target_q_network.variables,
tau)
return common.Periodically(update, period, 'periodic_update_targets')
def _experience_to_transitions(self, experience):
transitions = trajectory.to_transition(experience)
# Remove time dim if we are not using a recurrent network.
if not self._q_network.state_spec:
transitions = tf.nest.map_structure(lambda x: tf.squeeze(x, [1]),
transitions)
time_steps, policy_steps, next_time_steps = transitions
actions = policy_steps.action
return time_steps, actions, next_time_steps
# Use @common.function in graph mode or for speeding up.
def _train(self, experience, weights):
with tf.GradientTape() as tape:
loss_info = self._loss(
experience,
td_errors_loss_fn=self._td_errors_loss_fn,
gamma=self._gamma,
reward_scale_factor=self._reward_scale_factor,
weights=weights)
tf.debugging.check_numerics(loss_info[0], 'Loss is inf or nan')
variables_to_train = self._q_network.trainable_weights
assert list(variables_to_train), "No variables in the agent's q_network."
grads = tape.gradient(loss_info.loss, variables_to_train)
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(grads, variables_to_train))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,
self._gradient_clipping)
if self._summarize_grads_and_vars:
eager_utils.add_variables_summaries(grads_and_vars,
self.train_step_counter)
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
self._optimizer.apply_gradients(
grads_and_vars, global_step=self.train_step_counter)
self._update_target()
return loss_info
def _train_v1(self, experience, weights):
with tf.GradientTape() as tape:
loss_info = self._loss(
experience,
td_errors_loss_fn=self._td_errors_loss_fn,
gamma=self._gamma,
reward_scale_factor=self._reward_scale_factor,
weights=weights)
tf.debugging.check_numerics(loss_info[0], 'Loss is inf or nan')
variables_to_train = self._q_network.trainable_weights
assert list(variables_to_train), "No variables in the agent's q_network."
grads = tape.gradient(loss_info.loss, variables_to_train)
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(grads, variables_to_train))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,
self._gradient_clipping)
if self._summarize_grads_and_vars:
eager_utils.add_variables_summaries(grads_and_vars,
self.train_step_counter)
eager_utils.add_gradients_summaries(grads_and_vars,
self.train_step_counter)
train_op = self._optimizer.apply_gradients(
grads_and_vars, global_step=self.train_step_counter)
update_op = self._update_target()
train_op = tf.group(train_op, update_op)
return train_op, loss_info
def _loss(self,
experience,
td_errors_loss_fn=element_wise_huber_loss,
gamma=1.0,
reward_scale_factor=1.0,
weights=None):
"""Computes loss for DQN training.
Args:
experience: A batch of experience data in the form of a `Trajectory`. The
structure of `experience` must match that of `self.policy.step_spec`.
All tensors in `experience` must be shaped `[batch, time, ...]` where
`time` must be equal to `self.train_sequence_length` if that property is
not `None`.
td_errors_loss_fn: A function(td_targets, predictions) to compute the
element wise loss.
gamma: Discount for future rewards.
reward_scale_factor: Multiplicative factor to scale rewards.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights. The output td_loss will be scaled by these weights, and the
final scalar loss is the mean of these values.
Returns:
loss: An instance of `DqnLossInfo`.
Raises:
ValueError:
if the number of actions is greater than 1.
"""
# Check that `experience` includes two outer dimensions [B, T, ...]. This
# method requires `experience` to include the time dimension.
self._check_trajectory_dimensions(experience)
if self._n_step_update == 1:
time_steps, actions, next_time_steps = self._experience_to_transitions(
experience)
else:
# To compute n-step returns, we need the first time steps, the first
# actions, and the last time steps. Therefore we extract the first and
# last transitions from our Trajectory.
first_two_steps = tf.nest.map_structure(lambda x: x[:, :2], experience)
last_two_steps = tf.nest.map_structure(lambda x: x[:, -2:], experience)
time_steps, actions, _ = self._experience_to_transitions(first_two_steps)
_, _, next_time_steps = self._experience_to_transitions(last_two_steps)
with tf.name_scope('loss'):
actions = tf.nest.flatten(actions)[0]
q_values, _ = self._q_network(time_steps.observation,
time_steps.step_type)
# Handle action_spec.shape=(), and shape=(1,) by using the
# multi_dim_actions param.
multi_dim_actions = tf.nest.flatten(self._action_spec)[0].shape.ndims > 0
q_values = common.index_with_actions(
q_values,
tf.cast(actions, dtype=tf.int32),
multi_dim_actions=multi_dim_actions)
next_q_values = self._compute_next_q_values(next_time_steps)
if self._n_step_update == 1:
# Special case for n = 1 to avoid a loss of performance.
td_targets = compute_td_targets(
next_q_values,
rewards=reward_scale_factor * next_time_steps.reward,
discounts=gamma * next_time_steps.discount)
else:
# When computing discounted return, we need to throw out the last time
# index of both reward and discount, which are filled with dummy values
# to match the dimensions of the observation.
# TODO(b/131557265): Replace value_ops.discounted_return with a method
# that only computes the single value needed.
n_step_return = value_ops.discounted_return(
rewards=reward_scale_factor * experience.reward[:, :-1],
discounts=gamma * experience.discount[:, :-1],
final_value=next_q_values,
time_major=False)
# We only need the first value within the time dimension which
# corresponds to the full final return. The remaining values are only
# partial returns.
td_targets = n_step_return[:, 0]
valid_mask = tf.cast(~time_steps.is_last(), tf.float32)
td_error = valid_mask * (td_targets - q_values)
td_loss = valid_mask * td_errors_loss_fn(td_targets, q_values)
if nest_utils.is_batched_nested_tensors(
time_steps, self.time_step_spec, num_outer_dims=2):
# Do a sum over the time dimension.
td_loss = tf.reduce_sum(input_tensor=td_loss, axis=1)
if weights is not None:
td_loss *= weights
# Average across the elements of the batch.
# Note: We use an element wise loss above to ensure each element is always
# weighted by 1/N where N is the batch size, even when some of the
# weights are zero due to boundary transitions. Weighting by 1/K where K
# is the actual number of non-zero weight would artificially increase
# their contribution in the loss. Think about what would happen as
# the number of boundary samples increases.
loss = tf.reduce_mean(input_tensor=td_loss)
with tf.name_scope('Losses/'):
tf.compat.v1.summary.scalar(
'loss_' + self.name, loss, collections=['train_' + self.name])
# family=self.name)
if self._summarize_grads_and_vars:
with tf.name_scope('Variables/'):
for var in self._q_network.trainable_weights:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter)
if self._debug_summaries:
diff_q_values = q_values - next_q_values
common.generate_tensor_summaries('td_error', td_error,
self.train_step_counter)
common.generate_tensor_summaries('td_loss', td_loss,
self.train_step_counter)
common.generate_tensor_summaries('q_values', q_values,
self.train_step_counter)
common.generate_tensor_summaries('next_q_values', next_q_values,
self.train_step_counter)
common.generate_tensor_summaries('diff_q_values', diff_q_values,
self.train_step_counter)
return tf_agent.LossInfo(loss,
DqnLossInfo(td_loss=td_loss, td_error=td_error))
def _compute_next_q_values(self, next_time_steps):
"""Compute the q value of the next state for TD error computation.
Args:
next_time_steps: A batch of next timesteps
Returns:
A tensor of Q values for the given next state.
"""
next_target_q_values, _ = self._target_q_network(
next_time_steps.observation, next_time_steps.step_type)
# Reduce_max below assumes q_values are [BxF] or [BxTxF]
assert next_target_q_values.shape.ndims in [2, 3]
return tf.reduce_max(input_tensor=next_target_q_values, axis=-1)
@gin.configurable
class DdqnAgent(DqnAgent):
"""A Double DQN Agent.
Implements the Double-DQN algorithm from
"Deep Reinforcement Learning with Double Q-learning"
Hasselt et al., 2015
https://arxiv.org/abs/1509.06461
"""
def _compute_next_q_values(self, next_time_steps):
"""Compute the q value of the next state for TD error computation.
Args:
next_time_steps: A batch of next timesteps
Returns:
A tensor of Q values for the given next state.
"""
# TODO(b/117175589): Add binary tests for DDQN.
next_q_values, _ = self._q_network(next_time_steps.observation,
next_time_steps.step_type)
best_next_actions = tf.cast(
tf.argmax(input=next_q_values, axis=-1), dtype=tf.int32)
next_target_q_values, _ = self._target_q_network(
next_time_steps.observation, next_time_steps.step_type)
multi_dim_actions = best_next_actions.shape.ndims > 1
return common.index_with_actions(
next_target_q_values,
best_next_actions,
multi_dim_actions=multi_dim_actions)
| 40.366038
| 80
| 0.688698
|
4a182664cfb361faab81c76e42ac8b826e13de5b
| 5,696
|
py
|
Python
|
utils/LM.py
|
tanyinghui/Minimal-Hand-pytorch
|
3e991af9be0475ebc761fec3f13d00f81146631a
|
[
"MIT"
] | 158
|
2021-03-02T15:16:33.000Z
|
2022-03-27T12:06:02.000Z
|
utils/LM.py
|
maitetsu/Minimal-Hand-pytorch
|
12f2664e94b94c95196a4ad789077946350f5e7c
|
[
"MIT"
] | 55
|
2021-03-23T18:47:51.000Z
|
2022-03-28T14:56:56.000Z
|
utils/LM.py
|
maitetsu/Minimal-Hand-pytorch
|
12f2664e94b94c95196a4ad789077946350f5e7c
|
[
"MIT"
] | 47
|
2021-03-03T01:38:27.000Z
|
2022-03-26T05:23:43.000Z
|
# Copyright (c) Hao Meng. All Rights Reserved.
# import time
import numpy as np
import torch
from manopth.manolayer import ManoLayer
from utils import bone
class LM_Solver():
def __init__(self, num_Iter=500, th_beta=None, th_pose=None, lb_target=None,
weight=0.01):
self.count = 0
# self.time_start = time.time()
# self.time_in_mano = 0
self.minimal_loss = 9999
self.best_beta = np.zeros([10, 1])
self.num_Iter = num_Iter
self.th_beta = th_beta
self.th_pose = th_pose
self.beta = th_beta.numpy()
self.pose = th_pose.numpy()
self.mano_layer = ManoLayer(side="right",
mano_root='mano/models', use_pca=False, flat_hand_mean=True)
self.threshold_stop = 10 ** -13
self.weight = weight
self.residual_memory = []
self.lb = np.zeros(21)
_, self.joints = self.mano_layer(self.th_pose, self.th_beta)
self.joints = self.joints.numpy().reshape(21, 3)
self.lb_target = lb_target.reshape(15, 1)
# self.test_time = 0
def update(self, beta_):
beta = beta_.copy()
self.count += 1
# now = time.time()
my_th_beta = torch.from_numpy(beta).float().reshape(1, 10)
_, joints = self.mano_layer(self.th_pose, my_th_beta)
# self.time_in_mano = time.time() - now
useful_lb = bone.caculate_length(joints, label="useful")
lb_ref = useful_lb[6]
return useful_lb, lb_ref
def new_cal_ref_bone(self, _shape):
# now = time.time()
parent_index = [0,
0, 1, 2,
0, 4, 5,
0, 7, 8,
0, 10, 11,
0, 13, 14
]
# index = [0,
# 1, 2, 3, # index
# 4, 5, 6, # middle
# 7, 8, 9, # pinky
# 10, 11, 12, # ring
# 13, 14, 15] # thumb
reoder_index = [
13, 14, 15,
1, 2, 3,
4, 5, 6,
10, 11, 12,
7, 8, 9]
shape = torch.Tensor(_shape.reshape((-1, 10)))
th_v_shaped = torch.matmul(self.mano_layer.th_shapedirs,
shape.transpose(1, 0)).permute(2, 0, 1) \
+ self.mano_layer.th_v_template
th_j = torch.matmul(self.mano_layer.th_J_regressor, th_v_shaped)
temp1 = th_j.clone().detach()
temp2 = th_j.clone().detach()[:, parent_index, :]
result = temp1 - temp2
result = torch.norm(result, dim=-1, keepdim=True)
ref_len = result[:, [4]]
result = result / ref_len
# self.time_in_mano = time.time() - now
return torch.squeeze(result, dim=-1)[:, reoder_index].cpu().numpy()
def get_residual(self, beta_):
beta = beta_.copy()
lb, lb_ref = self.update(beta)
lb = lb.reshape(45, 1)
return lb / lb_ref - self.lb_target
def get_count(self):
return self.count
def get_bones(self, beta_):
beta = beta_.copy()
lb, _ = self.update(beta)
lb = lb.reshape(15, 1)
return lb
# Vectorization implementation
def batch_get_l2_loss(self, beta_):
weight = 1e-5
beta = beta_.copy()
temp = self.new_cal_ref_bone(beta)
loss = np.transpose(temp)
loss = np.linalg.norm(loss - self.lb_target, axis=0) ** 2 + \
weight * np.linalg.norm(beta, axis=-1)
return loss
def new_get_derivative(self, beta_):
# params: beta_ 10*1
# return: 1*10
beta = beta_.copy().reshape((1, 10))
temp_shape = np.zeros((20, beta.shape[1])) # 20*10
step = 0.01
for t2 in range(10): # 位置
t3 = 10 + t2
temp_shape[t2] = beta.copy()
temp_shape[t3] = beta.copy()
temp_shape[t2, t2] += step
temp_shape[t3, t2] -= step
res = self.batch_get_l2_loss(temp_shape)
d = res[0:10] - res[10:20] # 10*1
d = d.reshape((1, 10)) / (2 * step)
return d
# LM algorithm
def LM(self):
u = 1e-2
v = 1.5
beta = self.beta.reshape(10, 1)
out_n = 1
# num_beta = np.shape(beta)[0] # the number of beta
# calculating the init Jocobian matrix
Jacobian = np.zeros([out_n, beta.shape[0]])
last_update = 0
last_loss = 0
# self.test_time = 0
for i in range(self.num_Iter):
# loss = self.new_get_loss(beta)
loss = self.batch_get_l2_loss(beta)
loss = loss[0]
if loss < self.minimal_loss:
self.minimal_loss = loss
self.best_beta = beta
if abs(loss - last_loss) < self.threshold_stop:
# self.time_total = time.time() - self.time_start
return beta
# for k in range(num_beta):
# Jacobian[:, k] = self.get_derivative(beta, k)
Jacobian = self.new_get_derivative(beta)
jtj = np.matmul(Jacobian.T, Jacobian)
jtj = jtj + u * np.eye(jtj.shape[0])
update = last_loss - loss
delta = (np.matmul(np.linalg.inv(jtj), Jacobian.T) * loss)
beta -= delta
if update > last_update and update > 0:
u /= v
else:
u *= v
last_update = update
last_loss = loss
self.residual_memory.append(loss)
return beta
def get_result(self):
return self.residual_memory
| 31.125683
| 96
| 0.515801
|
4a1826e55f928691297b32cce0fb43d9f6df4394
| 4,557
|
py
|
Python
|
bbox_helper.py
|
yhsmiley/ImageNet_Utils
|
5b4b56ca0f135b593b0d09c1874589032e6cda81
|
[
"MIT"
] | null | null | null |
bbox_helper.py
|
yhsmiley/ImageNet_Utils
|
5b4b56ca0f135b593b0d09c1874589032e6cda81
|
[
"MIT"
] | null | null | null |
bbox_helper.py
|
yhsmiley/ImageNet_Utils
|
5b4b56ca0f135b593b0d09c1874589032e6cda81
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from PIL import Image
import sys
import zipfile
import xml.etree.ElementTree as ET
import argparse
def scanAnnotationFolder(annotationFolderPath):
annotationFiles = []
for root, dirs, files in os.walk(annotationFolderPath):
for file in files:
if file.endswith('.xml'):
annotationFiles.append(os.path.join(root, file))
if len(annotationFiles) is 0:
print("check input path")
return annotationFiles
# Bounding Box Helper
class BBoxHelper:
def __init__(self, annotation_file, image_path=None):
self.annotation_file = annotation_file
xmltree = ET.parse(annotation_file)
filename = xmltree.find('filename').text
wnid = filename.split('_')[0]
image_id = filename.split('_')[1]
# create a dict to save filename, wnid, image id, etc..
self.annotation_filename = filename
self.wnid = wnid
self.image_id = image_id
# find bounding box
objects = xmltree.findall('object')
self.rects = []
for object_iter in objects:
bndbox = object_iter.find("bndbox")
self.rects.append([int(it.text) for it in bndbox])
localPath = xmltree.find('path')
self.imgPath = None
if localPath is not None and os.path.exists(localPath.text):
self.imgPath = localPath.text
if image_path is not None:
self.imgPath = image_path
def saveBoundBoxImage(self, imgPath=None, image_dir=None):
if imgPath is not None:
self.imgPath = imgPath
if imgPath is None and self.imgPath is None:
self.imgPath = self.findImagePath()
outputFolder = os.path.join(image_dir, 'bounding_box_imgs')
# annotation_file_dir = os.path.dirname(os.path.realpath(self.annotation_file))
# outputFolder = os.path.join(annotation_file_dir, savedTargetDir)
if not os.path.exists(outputFolder):
os.mkdir(outputFolder)
try:
# Get crop images
bbs = []
im = Image.open(self.imgPath)
for box in self.rects:
bbs.append(im.crop(box))
# Save them to target dir
count = 0
for box in bbs:
count = count + 1
outPath = str(os.path.join(outputFolder, self.annotation_filename + '_box' + str(count) + '.jpg'))
box.save(outPath)
print ('save to ' + outPath)
except Exception as e:
if self.imgPath is None:
print("File not found, next")
def get_BoudingBoxs(self):
return self.rects
def getWnid(self):
return self.wnid
def findImagePath(self, search_folder='./downloaded_images'):
filename = self.annotation_filename + str('.JPEG')
for root, dirs, files in os.walk(search_folder):
for file in files:
if filename == file:
return os.path.join(root, file)
print (filename + ' not found')
return None
def saveAsBoudingBoxImg(xmlfile, image_path=None, image_dir=None):
bbhelper = BBoxHelper(xmlfile)
print (bbhelper.findImagePath())
# if image_dir:
# print (bbhelper.findImagePath(image_dir))
# Search image path according to bounding box xml, and crop it
if shouldSaveBoundingBoxImg:
print (bbhelper.get_BoudingBoxs())
bbhelper.saveBoundBoxImage(image_dir=image_dir)
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Help the user to download, crop, and handle images from ImageNet')
p.add_argument('--bxmlpath', help='Boudingbox xml path')
p.add_argument('--bxmldir', help='Boudingbox dir path')
p.add_argument('--save_boundingbox', help='Search images and crop the bounding box by image paths', action='store_true', default=False)
args = p.parse_args()
# Give bounding_box XML and show its JPEG path and bounding rects
boundingbox_xml_file = args.bxmlpath
boundingbox_xml_dir = args.bxmldir
shouldSaveBoundingBoxImg = args.save_boundingbox
if boundingbox_xml_file is not None:
saveAsBoudingBoxImg(boundingbox_xml_file, image_dir=boundingbox_xml_dir)
if boundingbox_xml_dir is not None:
allAnnotationFiles = scanAnnotationFolder(os.path.join(boundingbox_xml_dir, 'Annotation'))
for xmlfile in allAnnotationFiles:
saveAsBoudingBoxImg(xmlfile, image_dir=boundingbox_xml_dir)
| 37.04878
| 139
| 0.640772
|
4a1827abf7d0e9a859230e48b0b4b7669ab02fae
| 8,563
|
py
|
Python
|
app/AnyPy.py
|
seanschneeweiss/RoSeMotion
|
4ef7997c8976a8489798a427c768af5114f6b31e
|
[
"MIT"
] | 11
|
2021-01-03T07:31:56.000Z
|
2022-03-26T20:21:25.000Z
|
app/AnyPy.py
|
seanschneeweiss/RoSeMotion
|
4ef7997c8976a8489798a427c768af5114f6b31e
|
[
"MIT"
] | 5
|
2021-01-04T07:22:32.000Z
|
2022-02-01T00:38:52.000Z
|
app/AnyPy.py
|
seanschneeweiss/RoSeMotion
|
4ef7997c8976a8489798a427c768af5114f6b31e
|
[
"MIT"
] | 3
|
2021-03-06T17:00:26.000Z
|
2022-01-18T01:37:43.000Z
|
import datetime
import glob
import os
import re
import shutil
import subprocess
from anypytools import AnyMacro
from anypytools import AnyPyProcess
from anypytools.macro_commands import (MacroCommand, Load, Dump,
SaveData, OperationRun)
from AnyWriter import AnyWriter
from AnybodyResults import AnybodyResults
from config.Configuration import env
class AnyPy:
LOAD = 'load'
INITIAL_CONDITIONS = 'initial_conditions'
KINEMATICS = 'kinematics'
INVERSE_DYNAMICS = 'inverse_dynamics'
# SET_ORDER = 'set_order'
SAVE_H5 = 'save_h5'
LOAD_H5 = 'load_h5'
DUMP_JOINT_ANGLES = 'dump_angles'
DUMP_STEPS = 'dump_steps'
DUMP_LEAP_VECTORS = 'dump_leap_vectors'
REPLAY = 'replay'
LOG_FILE = 'AnyPy{}.log'.format(datetime.datetime.today().strftime('%Y%m%d_%H%M%S'))
INTERPOL_DIR = '/Model/InterpolVec'
def __init__(self, main_filepath, template_directory):
self.any_path, self.any_model = os.path.split(main_filepath)
self.main_filepath = main_filepath
self.template_directory = template_directory
self.operations = []
self.macrolist = []
self.output = None
if env.args('any_interpol_files'):
print('Using interpolation files from "{}"'.format(os.path.normpath(self.any_path + AnyPy.INTERPOL_DIR)))
if env.args('any_bvh_file'):
print("Convert bvh file to anybody interpolation files")
from resources.pymo.pymo.parsers import BVHParser as Pymo_BVHParser
any_writer = AnyWriter(template_directory='config/anybody_templates/',
output_directory=os.path.normpath(self.any_path + AnyPy.INTERPOL_DIR) + '/')
any_writer.write(Pymo_BVHParser().parse(env.config.any_bvh_file))
if env.args('any_files_dir'):
self.copy_files()
# remove frames from start and end (cut)
if env.config.start_frame or env.config.end_frame:
start_frame = int(env.config.start_frame) - 1 if env.config.start_frame else 0
end_frame = int(env.config.end_frame) - 1 if 'end' not in env.config.end_frame.lower() else None
any_writer = AnyWriter(output_directory=os.path.normpath(self.any_path + AnyPy.INTERPOL_DIR) + '/')
any_writer.extract_frames(start_frame, end_frame)
any_writer.extract_frame_timeseries(start_frame, end_frame)
self.output_path = ''
if env.args('output_file_path'):
self.output_path = os.path.normpath(
os.path.join(os.path.split(env.config.output_file_path)[0],
os.path.split(env.config.output_file_path)[1].replace(".anydata.h5", "") + '.anydata.h5'))
self.initialize_operations()
def initialize_operations(self):
"""build the macrolist executed by AnyPyTools"""
operation_cmd = {AnyPy.LOAD: Load(self.main_filepath),
AnyPy.INITIAL_CONDITIONS: OperationRun('Main.Study.InitialConditions'),
AnyPy.KINEMATICS: OperationRun('Main.Study.Kinematics'),
AnyPy.INVERSE_DYNAMICS: OperationRun('Main.Study.InverseDynamics'),
# AnyPy.SET_ORDER: SetValue('Main.HumanModel.Mannequin.InterpolationFunctions.intorder',
# env.config.order),
AnyPy.SAVE_H5: SaveData('Main.Study', self.output_path),
AnyPy.DUMP_JOINT_ANGLES: Dump('Main.Study.Output.JointAngleOutputs'),
AnyPy.DUMP_STEPS: Dump('Main.Study.nStep'),
AnyPy.DUMP_LEAP_VECTORS: Dump('Main.HumanModel.Mannequin.Posture.Right')}
if env.config.load:
self.add_operation(AnyPy.LOAD)
if env.config.initial_conditions:
self.add_operation(AnyPy.INITIAL_CONDITIONS)
if env.config.kinematic:
self.add_operation(AnyPy.KINEMATICS)
if env.config.inverse_dynamics:
self.add_operation(AnyPy.INVERSE_DYNAMICS)
if env.config.nstep:
self.set_step()
# if env.config.order:
# self.add_operation(AnyPy.SET_ORDER)
if env.config.plot:
# requirement for plot is run of kinematic analysis
self.add_operation(AnyPy.LOAD)
self.add_operation(AnyPy.KINEMATICS)
# dump interpolated joint angles
self.add_operation(AnyPy.DUMP_JOINT_ANGLES)
# dump nStep
self.add_operation(AnyPy.DUMP_STEPS)
# dump Mannequin vectors including the joint angles from the bvh file
self.add_operation(AnyPy.DUMP_LEAP_VECTORS)
if self.output_path:
# save study output to hdf5, to view and replay analysis later
self.add_operation(AnyPy.SAVE_H5)
for operation in self.operations:
self.macrolist.append(operation_cmd[operation])
def post_operations(self):
macro_output_path = 'classoperation Main.Study.Output "Load data" --file="{}"'.format(self.output_path)
"""build the macrolist executed by AnyPyTools"""
operation_cmd = {AnyPy.LOAD: Load(self.main_filepath),
AnyPy.LOAD_H5: MacroCommand(macro_output_path),
AnyPy.REPLAY: OperationRun("Main.Study.ReplayKinematics")}
self.macrolist = []
for operation in operation_cmd:
self.macrolist.append(str(operation_cmd[operation]))
print('Starting Anybody with the macros:\n{}'.format(self.macrolist))
print('Executing "{}" in "{}"'.format(self.any_path, self.any_model))
# save current working directory and change to Anybody project folder
cwd = os.getcwd()
os.chdir(self.any_path)
# write macro file to be opened by AnyBody GUI
macro_replay_path = os.path.join(self.any_path, 'replay.anymcr')
with open(macro_replay_path, 'wb') as macro_file:
macro_file.write("\n".join(self.macrolist).encode("UTF-8"))
macro_file.flush()
anybodycmd = [os.path.realpath('C:/Program Files/AnyBody Technology/AnyBody.7.1/AnyBody.exe'),
"-m", macro_file.name]
# execute AnyBody GUI with the command from anybodycmd
subprocess.Popen(anybodycmd)
# change back to original folder
os.chdir(cwd)
def add_operation(self, operation):
"""add operation to a list if not already in the list (unique)"""
if operation not in self.operations:
self.operations.append(operation)
def copy_files(self):
""""copy interpolation files"""
for file in glob.glob(self.template_directory + r'/*.any'):
print('copying "{}" to "{}"'.format(file,
os.path.normpath(self.any_path +
AnyPy.INTERPOL_DIR + "/" + os.path.split(file)[-1])))
shutil.copy(file, self.any_path + AnyPy.INTERPOL_DIR)
def run(self):
if not self.macrolist:
print("No operation for AnyBody was selected -> will terminate now")
return False
# print('Starting Anybody with the operations: {}'.format(self.operations))
print('Starting Anybody with the macros:\n{}'.format(AnyMacro(self.macrolist)))
print('Executing "{}" in "{}"'.format(self.any_path, self.any_model))
# save current working directory and change to Anybody project folder
cwd = os.getcwd()
os.chdir(self.any_path)
app = AnyPyProcess()
self.output = app.start_macro(macrolist=self.macrolist,
logfile=AnyPy.LOG_FILE)
# change back to original folder
os.chdir(cwd)
return True
def plot(self):
"""open the plot for the joint angles"""
print('Loading the plot ...')
AnybodyResults(self.output).plot()
def set_step(self):
"""replace the nstep value with the new selected value"""
regex_step = re.compile(r'nStep\s*=.*\d+.*;')
step_setting = 'nStep = {};'.format(env.config.nstep)
with open(self.main_filepath) as file:
old_file = file.read()
new_file = re.sub(regex_step, step_setting, old_file)
with open(self.main_filepath, 'w') as file:
file.write(new_file)
print('"{}" written to "{}"'.format(step_setting, self.main_filepath))
| 43.688776
| 119
| 0.62338
|
4a1827c6dac1371a8473a3e074f8710922692a87
| 524
|
py
|
Python
|
optimade_client/__init__.py
|
CasperWA/voila-optimade-client
|
4c4b6f51b063ceee6eef7bcbe36f08f4ffe6f6ec
|
[
"MIT"
] | null | null | null |
optimade_client/__init__.py
|
CasperWA/voila-optimade-client
|
4c4b6f51b063ceee6eef7bcbe36f08f4ffe6f6ec
|
[
"MIT"
] | 236
|
2020-09-14T09:30:50.000Z
|
2022-03-30T06:40:18.000Z
|
optimade_client/__init__.py
|
CasperWA/voila-optimade-client
|
4c4b6f51b063ceee6eef7bcbe36f08f4ffe6f6ec
|
[
"MIT"
] | 2
|
2020-11-10T16:01:17.000Z
|
2022-03-15T14:31:30.000Z
|
"""
OPTIMADE Client
Voilà/Jupyter client for searching through OPTIMADE databases.
"""
from .informational import OptimadeClientFAQ, HeaderDescription, OptimadeLog
from .query_provider import OptimadeQueryProviderWidget
from .query_filter import OptimadeQueryFilterWidget
from .summary import OptimadeSummaryWidget
__version__ = "2021.12.2"
__all__ = (
"HeaderDescription",
"OptimadeClientFAQ",
"OptimadeLog",
"OptimadeQueryProviderWidget",
"OptimadeQueryFilterWidget",
"OptimadeSummaryWidget",
)
| 24.952381
| 76
| 0.791985
|
4a1828346e9d22b00c43cfcacc282ba5349f2bc1
| 3,870
|
py
|
Python
|
ETL/scripts/utils_religion_features.py
|
qangelot/projet_Nantes
|
6cd63aec0acc5de77683832dd20f66ec3aa9e3eb
|
[
"MIT"
] | null | null | null |
ETL/scripts/utils_religion_features.py
|
qangelot/projet_Nantes
|
6cd63aec0acc5de77683832dd20f66ec3aa9e3eb
|
[
"MIT"
] | null | null | null |
ETL/scripts/utils_religion_features.py
|
qangelot/projet_Nantes
|
6cd63aec0acc5de77683832dd20f66ec3aa9e3eb
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import re
def get_year(date):
x = re.findall('([\d]{4})', date)
if x :
return x[0]
def get_month(date):
x = re.findall('^[A-Z][a-zéû]+', date)
if x:
return x[0]
def get_day(date):
return date[-2:]
def merge_religious_events(data, date_col, religion_dfs):
""""
merge religion dataframes based on the length of the whole time serie
"""
# generate all dates between start and end
start = data[date_col].min()
end = data[date_col].max()
religion_df = pd.date_range(start, end, freq="D").to_frame(index=False, name="date")
for rel in religion_dfs:
religion_df = pd.merge(religion_df, rel, how='left', on='date')
religion_df = religion_df.fillna(0)
return religion_df
def events_in_ago(data, date_col, data_path):
""""
add features about how close and far we are from religious events
"""
# generate all dates within start and end
start = data[date_col].min()
end = data[date_col].max()
dfs = pd.date_range(start, end, freq="D").to_frame(index=False, name="_date")
# read external holidays csv
def _parser(date):
return pd.to_datetime(date)
events = pd.read_csv(f'{data_path}', parse_dates=['date'], date_parser=_parser)
for col in ['chretiennes', 'juives', 'ramadan', 'musulmanes']:
df = dfs.copy()
event = events.copy()
event = event[["date", col]]
event = event[event[col] != 0]
event = event.drop_duplicates()
# simulate an interval based left join using pandas
# perform a cross join on temp_key
low_bound = "date"
df['temp_key'] = 1
event['temp_key'] = 1
crossjoindf = pd.merge(df, event, on=['temp_key'])
df.drop(columns=['temp_key'], inplace=True)
crossjoindf.drop(columns=['temp_key'], inplace=True)
# filter with lower_bound
conditionnal_df = crossjoindf[(crossjoindf['_date'] == crossjoindf[low_bound])]
# merge on the main df with all cols as keys to simulate left join
df_col = df.columns.values.tolist()
conditionnal_df.set_index(df_col, inplace=True)
df = df.merge(conditionnal_df, left_on=df_col, right_index=True, how='left')
# find rows index corresponding to holidays
events_index = np.where(~df[col].isnull())[0]
# compute arrays of first day and last day of holidays
events_min_index = []
events_max_index = []
i = 0
while i < len(events_index):
j = 0
while i + j < len(events_index) and (events_index[i] + j) == events_index[i + j]:
j += 1
events_min_index.append(events_index[i])
events_max_index.append(events_index[i + j - 1])
i += j
indexes = range(0, len(df))
# compute for each index row the distance with the nearest upcoming public holiday
df[col + '_dans'] = [min([i - x for i in events_min_index if i > x], default=0) for x in indexes]
# compute for each index row the distance with the latest past holidays
df[ 'depuis_' + col] = [min([x - i for i in events_max_index if i < x], default=0) for x in indexes]
# set pub_holidays_in and pub_holidays_ago to 0 during effective holidays
df.loc[~df[col].isnull(), col + '_dans'] = 0
df.loc[~df[col].isnull(), 'depuis_' + col] = 0
# we drop date col that was just useful to define lower_bound
# and we rename _date to have the same key name to join both dataframes
df.drop(columns=['date'], inplace=True)
df.rename(columns={"_date": "date"}, inplace=True)
df.set_index('date', inplace=True)
data = pd.merge(data, df[[col + '_dans', 'depuis_' + col]], on='date')
return data
| 33.362069
| 108
| 0.612403
|
4a1829820ac08ac25e909b55be16e1392270bbdf
| 4,334
|
py
|
Python
|
assignments/assignment2/layers.py
|
shereshevskiy/dlcourse_ai
|
3fb232b1b4f06fb30e222d019799e178f2b58125
|
[
"MIT"
] | 4
|
2019-03-27T09:17:18.000Z
|
2020-06-22T19:20:09.000Z
|
assignments/assignment2/layers.py
|
shereshevskiy/dlcourse_ai
|
3fb232b1b4f06fb30e222d019799e178f2b58125
|
[
"MIT"
] | 1
|
2019-03-23T20:18:42.000Z
|
2019-03-23T20:18:42.000Z
|
assignments/assignment2/layers.py
|
shereshevskiy/dlcourse_ai
|
3fb232b1b4f06fb30e222d019799e178f2b58125
|
[
"MIT"
] | null | null | null |
import numpy as np
from linear_classifer import softmax, cross_entropy_loss
def l2_regularization(W, reg_strength):
"""
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
"""
# TODO_: Copy from the previous assignment
# raise Exception("Not implemented!")
loss = (W * W).sum() * reg_strength
grad = 2 * W * reg_strength
return loss, grad
def softmax_with_cross_entropy(preds, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
preds: np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
d_preds, np array same shape as predictions - gradient of predictions by loss value
"""
# TODO_: Copy from the previous assignment
# raise Exception("Not implemented!")
preds = preds.copy()
probs = softmax(preds)
loss = cross_entropy_loss(probs, target_index).mean()
mask = np.zeros_like(preds)
mask[np.arange(len(mask)), target_index] = 1
# mask[target_index] = 1
d_preds = - (mask - softmax(preds)) / mask.shape[0]
return loss, d_preds
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class ReLULayer:
def __init__(self):
self.X = None
def forward(self, X):
# TODO_: Implement forward pass
# Hint: you'll need to save some information about X
# to use it later in the backward pass
# raise Exception("Not implemented!")
result = np.maximum(X, 0)
self.X = X
return result
def backward(self, d_out):
"""
Backward pass
Arguments:
d_out, np array (batch_size, num_features) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, num_features) - gradient
with respect to input
"""
# TODO_: Implement backward pass
# Your final implementation shouldn't have any loops
# raise Exception("Not implemented!")
d_X = (self.X > 0) * d_out
return d_X
def params(self):
# ReLU Doesn't have any parameters
return {}
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.001 * np.random.randn(n_input, n_output))
self.B = Param(0.001 * np.random.randn(1, n_output))
self.X = None
def forward(self, X):
# TODO_: Implement forward pass
# Your final implementation shouldn't have any loops
# raise Exception("Not implemented!")
W = self.W.value
B = self.B.value
self.X = Param(X)
out = np.dot(X, W) + B
return out
def backward(self, d_out):
"""
Backward pass
Computes gradient with respect to input and
accumulates gradients within self.W and self.B
Arguments:
d_out, np array (batch_size, n_output) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, n_input) - gradient
with respect to input
"""
# TODO_: Implement backward pass
# Compute both gradient with respect to input
# and gradients with respect to W and B
# Add gradients of W and B to their `grad` attribute
# It should be pretty similar to linear classifier from
# the previous assignment
# raise Exception("Not implemented!")
X = self.X.value
W = self.W.value
d_W = np.dot(X.T, d_out)
d_B = np.dot(np.ones((X.shape[0], 1)).T, d_out)
d_X = np.dot(d_out, W.T)
self.W.grad += d_W
self.B.grad += d_B
return d_X
def params(self):
return {'W': self.W, 'B': self.B}
| 27.43038
| 89
| 0.612367
|
4a18298dcca6f54b4c0ab6e0cdbdbcec9667148a
| 896
|
py
|
Python
|
exercicios_resolvidos3/exercicios3/capitulo 07/exercicio-07-05.py
|
tiagosm1/Python_Nilo_Ney
|
b5380dcc8fcf64e9c047ebc353585caba3d7b397
|
[
"MIT"
] | null | null | null |
exercicios_resolvidos3/exercicios3/capitulo 07/exercicio-07-05.py
|
tiagosm1/Python_Nilo_Ney
|
b5380dcc8fcf64e9c047ebc353585caba3d7b397
|
[
"MIT"
] | null | null | null |
exercicios_resolvidos3/exercicios3/capitulo 07/exercicio-07-05.py
|
tiagosm1/Python_Nilo_Ney
|
b5380dcc8fcf64e9c047ebc353585caba3d7b397
|
[
"MIT"
] | null | null | null |
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2020
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3
#
# Site: https://python.nilo.pro.br/
#
# Arquivo: exercicios3\capitulo 07\exercicio-07-05.py
##############################################################################
primeira = input("Digite a primeira string: ")
segunda = input("Digite a segunda string: ")
terceira = ""
for letra in primeira:
if letra not in segunda:
terceira += letra
if terceira == "":
print("Todos os caracteres foram removidos.")
else:
print(f"Os caracteres {segunda} foram removidos de {primeira}, gerando: {terceira}")
| 33.185185
| 88
| 0.582589
|
4a182b67de26bfbe9bcb672cb4de6ad3da2d39c9
| 5,649
|
py
|
Python
|
test/functional/test_framework/socks5.py
|
CounosH/cch
|
880f3890127951cba9f6b235193d8c9a9536e075
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/socks5.py
|
CounosH/cch
|
880f3890127951cba9f6b235193d8c9a9536e075
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/socks5.py
|
CounosH/cch
|
880f3890127951cba9f6b235193d8c9a9536e075
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The CounosH Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Dummy Socks5 server for testing."""
import socket
import threading
import queue
import logging
logger = logging.getLogger("TestFramework.socks5")
# Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
# Utility functions
def recvall(s, n):
"""Receive n bytes from a socket, or fail."""
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
# Implementation classes
class Socks5Configuration():
"""Proxy configuration."""
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command():
"""Information about an incoming socks5 command."""
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection():
def __init__(self, serv, conn):
self.serv = serv
self.conn = conn
def handle(self):
"""Handle socks5 request according to RFC192."""
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
ver, cmd, _, atyp = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = recvall(self.conn, n)
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
logger.info('Proxy: %s', cmdin)
# Fall through to disconnect
except Exception as e:
logger.exception("socks5 request handling failed.")
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server():
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, _) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert not self.running
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| 35.086957
| 125
| 0.57373
|
4a182ce2ea31ddfbf72c7d1000d7c364888a0d72
| 79,686
|
py
|
Python
|
jvm/Instructions.py
|
mcpython4-coding/JVMBridge
|
a7652d04a77bba8c465fb5ac2255ea311c1d1b27
|
[
"MIT"
] | 1
|
2021-06-22T08:27:04.000Z
|
2021-06-22T08:27:04.000Z
|
jvm/Instructions.py
|
mcpython4-coding/JVMBridge
|
a7652d04a77bba8c465fb5ac2255ea311c1d1b27
|
[
"MIT"
] | null | null | null |
jvm/Instructions.py
|
mcpython4-coding/JVMBridge
|
a7652d04a77bba8c465fb5ac2255ea311c1d1b27
|
[
"MIT"
] | null | null | null |
import array
import copy
import dis
import typing
from abc import ABC
from mcpython.mixin.util import PyOpcodes
import jvm.Java
import jvm.util
from jvm.api import BaseInstruction, AbstractRuntime, AbstractBytecodeContainer, AbstractStack
from jvm.api import PyBytecodeBuilder
from jvm.JavaExceptionStack import StackCollectingException
class OpcodeInstruction(BaseInstruction, ABC):
"""
Base for an opcode based instruction
"""
OPCODES: typing.Set[int] = set()
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return None, 1
class CPLinkedInstruction(OpcodeInstruction, ABC):
"""
Base class for instructions containing one single constant pool reference
Used often in instructions
"""
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
pointer = jvm.util.U2.unpack(data[index: index + 2])[0] - 1
try:
return (
class_file.cp[pointer],
3,
)
except IndexError:
raise StackCollectingException(
f"during decoding instruction {cls.__name__} pointing to {pointer}"
).add_trace(f"current parsing index: {index}, class: {class_file.name}")
@AbstractBytecodeContainer.register_instruction
class NoOp(OpcodeInstruction):
# NoOp
OPCODES = {0x00}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack) -> bool:
pass
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.NOP)
@AbstractBytecodeContainer.register_instruction
class NoOpPop(OpcodeInstruction):
# C2 and C3: monitor stuff, as we are not threading, this works as it is
OPCODES = {0xC2, 0xC3}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.pop()
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.POP_TOP)
@AbstractBytecodeContainer.register_instruction
class Any2Byte(OpcodeInstruction):
# i2b
OPCODES = {0x91}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
v = stack.pop()
stack.push(int(v) if v is not None else v)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("B")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_NAME, builder.add_name("bytes"))
builder.add_instruction(PyOpcodes.ROT_TWO)
builder.add_instruction(PyOpcodes.BUILD_LIST, 1)
builder.add_instruction(PyOpcodes.CALL_FUNCTION, 1)
@AbstractBytecodeContainer.register_instruction
class Any2Float(OpcodeInstruction):
# i2f, d2f
OPCODES = {0x86, 0x90}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
v = stack.pop()
stack.push(float(v) if v is not None else v)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("F")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_NAME, builder.add_name("float"))
builder.add_instruction(PyOpcodes.ROT_TWO)
builder.add_instruction(PyOpcodes.CALL_FUNCTION, 1)
@AbstractBytecodeContainer.register_instruction
class Any2Double(Any2Float):
# i2d, f2d, l2d
OPCODES = {0x87, 0x8D, 0x8A}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("D")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_NAME, builder.add_name("float"))
builder.add_instruction(PyOpcodes.ROT_TWO)
builder.add_instruction(PyOpcodes.CALL_FUNCTION, 1)
@AbstractBytecodeContainer.register_instruction
class Any2Int(OpcodeInstruction):
# d2i, f2i, l2i
OPCODES = {0x8E, 0x8B, 0x88}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
v = stack.pop()
stack.push(int(v) if v is not None else v)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("I")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_NAME, builder.add_name("int"))
builder.add_instruction(PyOpcodes.ROT_TWO)
builder.add_instruction(PyOpcodes.CALL_FUNCTION, 1)
@AbstractBytecodeContainer.register_instruction
class Any2Long(Any2Int):
# f2l
OPCODES = {0x8C, 0x85, 0x8F}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("J")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_NAME, builder.add_name("int"))
builder.add_instruction(PyOpcodes.ROT_TWO)
builder.add_instruction(PyOpcodes.CALL_FUNCTION, 1)
class ConstPush(OpcodeInstruction, ABC):
"""
Base class for instructions pushing pre-defined objects
"""
PUSHES = None
PUSH_TYPE = None
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(cls.PUSHES)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.push(cls.PUSH_TYPE)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(cls.PUSHES))
@AbstractBytecodeContainer.register_instruction
class AConstNull(ConstPush):
OPCODES = {0x01}
PUSH_TYPE = "null"
@AbstractBytecodeContainer.register_instruction
class IConstM1(ConstPush):
OPCODES = {0x02}
PUSHES = -1
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class IConst0(ConstPush):
OPCODES = {0x03}
PUSHES = 0
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class LConst0(ConstPush):
OPCODES = {0x09}
PUSHES = 0
PUSH_TYPE = "j"
@AbstractBytecodeContainer.register_instruction
class DConst0(ConstPush):
OPCODES = {0x0E}
PUSHES = 0
PUSH_TYPE = "d"
@AbstractBytecodeContainer.register_instruction
class IConst1(ConstPush):
OPCODES = {0x04}
PUSHES = 1
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class DConst1(ConstPush):
OPCODES = {0x0F}
PUSHES = 1
PUSH_TYPE = "d"
@AbstractBytecodeContainer.register_instruction
class IConst2(ConstPush):
OPCODES = {0x05}
PUSHES = 2
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class IConst3(ConstPush):
OPCODES = {0x06}
PUSHES = 3
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class IConst4(ConstPush):
OPCODES = {0x07}
PUSHES = 4
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class IConst5(ConstPush):
OPCODES = {0x08}
PUSHES = 5
PUSH_TYPE = "i"
@AbstractBytecodeContainer.register_instruction
class LConst1(ConstPush):
OPCODES = {0x0A}
PUSHES = 1
PUSH_TYPE = "j"
@AbstractBytecodeContainer.register_instruction
class FConst0(ConstPush):
OPCODES = {0x0B}
PUSHES = 0.0
PUSH_TYPE = "f"
@AbstractBytecodeContainer.register_instruction
class FConst1(ConstPush):
OPCODES = {0x0C}
PUSHES = 1.0
PUSH_TYPE = "f"
@AbstractBytecodeContainer.register_instruction
class FConst2(ConstPush):
OPCODES = {0x0D}
PUSHES = 2.0
PUSH_TYPE = "f"
@AbstractBytecodeContainer.register_instruction
class BiPush(OpcodeInstruction):
OPCODES = {0x10}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U1_S.unpack(data[index: index + 1])[0], 2
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(data)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.push("B")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(bytes([prepared_data])))
@AbstractBytecodeContainer.register_instruction
class SiPush(OpcodeInstruction):
OPCODES = {0x11}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U2_S.unpack(data[index: index + 2])[0], 3
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(data)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.push("S")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(prepared_data))
@AbstractBytecodeContainer.register_instruction
class LDC(OpcodeInstruction):
OPCODES = {0x12}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return data[index], 2
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(
await jvm.util.decode_cp_constant(
stack.method.class_file.cp[data - 1],
version=stack.method.class_file.internal_version,
vm=stack.method.get_parent_class().vm,
)
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.push(None) # todo: add type
@classmethod
async def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(await jvm.util.decode_cp_constant(
container.method.class_file.cp[prepared_data - 1],
version=container.method.class_file.internal_version,
vm=container.method.get_parent_class().vm,
)))
@AbstractBytecodeContainer.register_instruction
class LDC_W(LDC):
OPCODES = {0x13, 0x14}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U2.unpack(data[index: index + 2])[0], 3
@AbstractBytecodeContainer.register_instruction
class ArrayLoad(OpcodeInstruction):
OPCODES = {0x32, 0x2E, 0x33, 0x31}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
index = stack.pop()
array = stack.pop()
if index is None:
raise StackCollectingException("NullPointerException: index is null")
if array is None:
raise StackCollectingException("NullPointerException: array is null")
stack.push(array[index])
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop_expect_type("i", "j")
stack.pop()
stack.push(None) # todo: add type here
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_SUBSCR)
@AbstractBytecodeContainer.register_instruction
class ArrayStore(OpcodeInstruction):
OPCODES = {0x53, 0x4F, 0x50, 0x54, 0x52, 0x51, 0x55}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
value = stack.pop()
index = stack.pop()
array = stack.pop()
if index is None:
raise StackCollectingException("NullPointerException: index is null")
if array is None:
raise StackCollectingException("NullPointerException: array is null")
if index < 0:
raise StackCollectingException(f"Array index out of range: {index} < 0")
if index >= len(array):
raise StackCollectingException(f"Array index out of range: {index} >= {len(array)}")
array[index] = value
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
stack.pop_expect_type("i", "j")
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.ROT_THREE)
builder.add_instruction(PyOpcodes.STORE_SUBSCR)
@AbstractBytecodeContainer.register_instruction
class Load(OpcodeInstruction):
OPCODES = {0x19, 0x15, 0x18, 0x17, 0x16}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U1.unpack(data[index: index + 1])[0], 2
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(stack.local_vars[data])
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if prepared_data >= container.code.max_locals:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: {prepared_data} does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.push(None)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_FAST, prepared_data)
@AbstractBytecodeContainer.register_instruction
class Load0(OpcodeInstruction):
OPCODES = {0x2A, 0x1A, 0x22, 0x26, 0x1E}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(stack.local_vars[0])
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 0:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 0 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.push(None)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_FAST, 0)
@AbstractBytecodeContainer.register_instruction
class Load1(OpcodeInstruction):
OPCODES = {0x2B, 0x1B, 0x23, 0x27, 0x1F}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(stack.local_vars[1])
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 1:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 1 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.push(None)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_FAST, 1)
@AbstractBytecodeContainer.register_instruction
class Load2(OpcodeInstruction):
OPCODES = {0x2C, 0x1C, 0x24, 0x28, 0x20}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(stack.local_vars[2])
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 2:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 2 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.push(None)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_FAST, 2)
@AbstractBytecodeContainer.register_instruction
class Load3(OpcodeInstruction):
OPCODES = {0x2D, 0x1D, 0x25, 0x29, 0x21}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(stack.local_vars[3])
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 3:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 3 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.push(None)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_FAST, 3)
@AbstractBytecodeContainer.register_instruction
class Store(OpcodeInstruction):
OPCODES = {0x3A, 0x36, 0x39, 0x38, 0x37}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U1.unpack(data[index: index + 1])[0], 2
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.local_vars[data] = stack.pop()
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if prepared_data >= container.code.max_locals:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: {prepared_data} does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.STORE_FAST, prepared_data)
@AbstractBytecodeContainer.register_instruction
class Store0(OpcodeInstruction):
OPCODES = {0x4B, 0x3B, 0x47, 0x43, 0x3F}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.local_vars[0] = stack.pop()
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 0:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 0 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.STORE_FAST, 0)
@AbstractBytecodeContainer.register_instruction
class Store1(OpcodeInstruction):
OPCODES = {0x4C, 0x3C, 0x48, 0x44, 0x40}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.local_vars[1] = stack.pop()
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 1:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 1 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.STORE_FAST, 1)
@AbstractBytecodeContainer.register_instruction
class Store2(OpcodeInstruction):
OPCODES = {0x4D, 0x3D, 0x49, 0x45, 0x41}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.local_vars[2] = stack.pop()
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 2:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 2 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.STORE_FAST, 2)
@AbstractBytecodeContainer.register_instruction
class Store3(OpcodeInstruction):
OPCODES = {0x4E, 0x3E, 0x4A, 0x46, 0x42}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.local_vars[3] = stack.pop()
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
if container.code.max_locals <= 3:
raise StackCollectingException(
f"LocalVariableIndexOutOfBounds: 3 does not fit into {container.code.max_locals}"
)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.STORE_FAST, 3)
@AbstractBytecodeContainer.register_instruction
class POP(OpcodeInstruction):
OPCODES = {0x57}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.pop()
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.POP_TOP)
@AbstractBytecodeContainer.register_instruction
class POP2(OpcodeInstruction):
OPCODES = {0x58}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
# todo: check computation type
stack.pop()
stack.pop()
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer,
stack: AbstractStack):
stack.pop()
stack.pop()
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.POP_TOP)
builder.add_instruction(PyOpcodes.POP_TOP)
@AbstractBytecodeContainer.register_instruction
class DUP(OpcodeInstruction):
OPCODES = {0x59}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
v = stack.pop()
stack.push(v)
stack.push(v)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
t = stack.pop()
stack.push(t)
stack.push(t)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.DUP_TOP)
@AbstractBytecodeContainer.register_instruction
class DUP2(OpcodeInstruction):
OPCODES = {0x5C}
# todo: check for double & long!
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
v1 = stack.pop()
v2 = stack.pop()
stack.push(v2)
stack.push(v1)
stack.push(v2)
stack.push(v1)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
v1 = stack.pop()
v2 = stack.pop()
stack.push(v2)
stack.push(v1)
stack.push(v2)
stack.push(v1)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.DUP_TOP) # todo: do real opcode here!
@AbstractBytecodeContainer.register_instruction
class DUP_X1(OpcodeInstruction):
OPCODES = {0x5A}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
a, b = stack.pop(), stack.pop()
stack.push(a)
stack.push(b)
stack.push(a)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a, b = stack.pop(), stack.pop()
stack.push(a)
stack.push(b)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.DUP_TOP)
builder.add_instruction(PyOpcodes.ROT_THREE)
@AbstractBytecodeContainer.register_instruction
class ADD(OpcodeInstruction):
OPCODES = {0x60, 0x63, 0x62}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
try:
stack.push(a + b)
except TypeError:
raise
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_ADD)
@AbstractBytecodeContainer.register_instruction
class SUB(OpcodeInstruction):
OPCODES = {0x66, 0x64, 0x67, 0x65}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(b - a)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_SUBTRACT)
@AbstractBytecodeContainer.register_instruction
class IDIV(OpcodeInstruction):
OPCODES = {0x6C}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(a // b)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_FLOOR_DIVIDE)
@AbstractBytecodeContainer.register_instruction
class FDIV(OpcodeInstruction):
OPCODES = {0x6E, 0x6F}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(a / b)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_TRUE_DIVIDE)
@AbstractBytecodeContainer.register_instruction
class Rem(OpcodeInstruction):
OPCODES = {0x70, 0x71}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(int(a - (a / b) * b))
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@AbstractBytecodeContainer.register_instruction
class SHL(OpcodeInstruction):
OPCODES = {0x78}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(a << b)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_LSHIFT)
@AbstractBytecodeContainer.register_instruction
class SHR(OpcodeInstruction):
OPCODES = {0x7A}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(a >> b)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_RSHIFT)
@AbstractBytecodeContainer.register_instruction
class AND(OpcodeInstruction):
OPCODES = {0x7E}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(a & b)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_AND)
@AbstractBytecodeContainer.register_instruction
class OR(OpcodeInstruction):
OPCODES = {0x80}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
stack.push(a | b)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push(a)
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.BINARY_OR)
@AbstractBytecodeContainer.register_instruction
class IINC(OpcodeInstruction):
OPCODES = {0x84}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Tuple[int, int], int]:
return (
data[index],
jvm.util.U1_S.unpack(data[index + 1: index + 2])[0],
), 3
@classmethod
async def invoke(cls, data: typing.Tuple[int, int], stack: AbstractStack):
stack.local_vars[data[0]] += data[1]
@classmethod
def validate(cls, command_index, prepared_data: typing.Tuple[int, int], container: AbstractBytecodeContainer):
if prepared_data[0] >= container.code.max_locals:
raise StackCollectingException(f"local var index {prepared_data[0]} out of range")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Tuple[int, int],
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.LOAD_FAST, prepared_data[0])
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(prepared_data[1]))
builder.add_instruction(PyOpcodes.INPLACE_ADD)
builder.add_instruction(PyOpcodes.STORE_FAST, prepared_data[0])
@AbstractBytecodeContainer.register_instruction
class CompareTwo(OpcodeInstruction):
OPCODES = {0x94, 0x95, 0x96, 0x97, 0x98}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
b, a = stack.pop(), stack.pop()
if a == b:
stack.push(0)
elif a > b:
stack.push(1)
else:
stack.push(-1)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
a = stack.pop()
stack.pop_expect_type(a)
stack.push("i")
@classmethod
def prepare_python_bytecode_instructions(cls, command_index, prepared_data: typing.Any,
container: AbstractBytecodeContainer, builder: PyBytecodeBuilder):
builder.add_instruction(PyOpcodes.DUP_TOP_TWO)
builder.add_instruction(PyOpcodes.COMPARE_OP, builder.add_comparator("=="))
builder.add_instruction(PyOpcodes.POP_JUMP_IF_FALSE, builder.real_from_offset(6))
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(0))
builder.add_instruction(PyOpcodes.JUMP_ABSOLUTE, builder.real_from_offset(16))
builder.add_instruction(PyOpcodes.DUP_TOP_TWO)
builder.add_instruction(PyOpcodes.COMPARE_OP, builder.add_comparator(">"))
builder.add_instruction(PyOpcodes.POP_JUMP_IF_FALSE, builder.real_from_offset(6))
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(1))
builder.add_instruction(PyOpcodes.JUMP_ABSOLUTE, builder.real_from_offset(4))
builder.add_instruction(PyOpcodes.LOAD_CONST, builder.add_const(-1))
class CompareHelper(OpcodeInstruction, ABC):
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U2_S.unpack(data[index: index + 2])[0], 3
@classmethod
def code_reference_changer(
cls,
container: AbstractBytecodeContainer,
prepared_data: int,
instruction_index: int,
old_index: int,
checker: typing.Callable[[int], int],
):
return checker(prepared_data + old_index) - instruction_index
@classmethod
def validate(cls, command_index: int, prepared_data: int, container: AbstractBytecodeContainer):
if command_index + prepared_data < 0:
raise StackCollectingException(f"opcode index {command_index + prepared_data} is < 0 (OutOfBoundError)")
elif command_index + prepared_data >= len(container.decoded_code):
raise StackCollectingException(f"opcode index {command_index + prepared_data} is >= {len(container.decoded_code)} (OutOfBoundError)")
elif container.decoded_code[command_index + prepared_data] is None:
raise StackCollectingException(f"opcode index {command_index+prepared_data} is pointing into opcode BODY, not HEAD (bound 0 <= {command_index+prepared_data} < {len(container.decoded_code)})")
class SingleCompare(CompareHelper, ABC):
@classmethod
def validate_stack(cls, command_index, prepared_data: int, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.branch(prepared_data)
class DoubleCompare(CompareHelper, ABC):
@classmethod
def validate_stack(cls, command_index, prepared_data: int, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.pop()
stack.branch(prepared_data)
@AbstractBytecodeContainer.register_instruction
class IfLT(DoubleCompare):
OPCODES = {0x97}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() > stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfGT(DoubleCompare):
OPCODES = {0xA3}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() < stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfEq0(SingleCompare):
OPCODES = {0x99}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() == 0:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfNEq0(SingleCompare):
OPCODES = {0x9A}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() != 0:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfLT0(SingleCompare):
OPCODES = {0x9B}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() < 0:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfGE0(SingleCompare):
OPCODES = {0x9C}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() >= 0:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfGT0(SingleCompare):
OPCODES = {0x9D}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() > 0:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfLE0(SingleCompare):
OPCODES = {0x9E}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() <= 0:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfEq(DoubleCompare):
OPCODES = {0x9F, 0xA5}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() == stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfNE(DoubleCompare):
OPCODES = {0xA0, 0xA6}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() != stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfLt(DoubleCompare):
OPCODES = {0xA1}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() > stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfGe(DoubleCompare):
OPCODES = {0xA2}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() <= stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfLe(DoubleCompare):
OPCODES = {0xA4}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack) -> bool:
if stack.pop() >= stack.pop():
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class Goto(CompareHelper):
OPCODES = {0xA7}
@classmethod
async def invoke(cls, data: int, stack: AbstractStack):
stack.cp += data
return True
@classmethod
def validate_stack(cls, command_index, prepared_data: int, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.cp += prepared_data
@AbstractBytecodeContainer.register_instruction
class AReturn(OpcodeInstruction):
OPCODES = {0xB0, 0xAC, 0xAE}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.end(stack.pop())
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.cp = -1
@AbstractBytecodeContainer.register_instruction
class Return(OpcodeInstruction):
OPCODES = {0xB1}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.end()
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.cp = -1
@AbstractBytecodeContainer.register_instruction
class GetStatic(CPLinkedInstruction):
OPCODES = {0xB2}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
d, i = super().decode(data, index, class_file)
return (d[1][1][1], d[2][1][1], d[2][2][1]), i
@classmethod
async def invoke(cls, data: typing.Tuple[str, str, str], stack: AbstractStack):
cls_name, name, T = data
java_class = await stack.vm.get_class(
cls_name, version=stack.method.class_file.internal_version
)
stack.push(await java_class.get_static_attribute(name, expected_type=T))
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Tuple[str, str, str], container: AbstractBytecodeContainer, stack: AbstractStack):
stack.push(prepared_data[2])
@AbstractBytecodeContainer.register_instruction
class PutStatic(CPLinkedInstruction):
OPCODES = {0xB3}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
d, i = super().decode(data, index, class_file)
return (d[1][1][1], d[2][1][1]), i
@classmethod
async def invoke(cls, data: typing.Tuple[str, str], stack: AbstractStack):
cls_name, name = data
java_class = await stack.vm.get_class(
cls_name, version=stack.method.class_file.internal_version
)
value = stack.pop()
java_class.set_static_attribute(name, value)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
@AbstractBytecodeContainer.register_instruction
class GetField(CPLinkedInstruction):
OPCODES = {0xB4}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
d, i = super().decode(data, index, class_file)
return d[2][1][1], i
@classmethod
async def invoke(cls, name: str, stack: AbstractStack):
obj = stack.pop()
if obj is None:
raise StackCollectingException(f"NullPointerException: object is None; Cannot get attribute '{name}'")
try:
stack.push(obj.get_field(name))
except (KeyError, AttributeError):
if hasattr(obj, "get_class") and isinstance(await obj.get_class(), jvm.Java.JavaBytecodeClass):
raise StackCollectingException(
f"AttributeError: object {obj} (type {type(obj)}) has no attribute '{name}'"
) from None
try:
stack.push(getattr(obj, name))
except (KeyError, AttributeError):
raise StackCollectingException(
f"AttributeError: object {obj} (type {type(obj)}) has no attribute '{name}'"
) from None
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push(None)
@AbstractBytecodeContainer.register_instruction
class PutField(CPLinkedInstruction):
OPCODES = {0xB5}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Tuple[str, str], int]:
d, i = super().decode(data, index, class_file)
return (d[2][1][1], d[1][1][1]), i
@classmethod
async def invoke(cls, d, stack: AbstractStack):
name, target_type = d
value = stack.pop()
obj = stack.pop()
if obj is None:
raise StackCollectingException(f"NullPointerException: obj is null; Cannot set field '{name}' to {value}").add_trace(target_type)
if not hasattr(obj, "set_field"):
setattr(obj, name, value)
else:
obj.set_field(name, value)
@classmethod
def validate_stack(cls, command_index, name: str, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.pop()
@AbstractBytecodeContainer.register_instruction
class InvokeVirtual(CPLinkedInstruction):
OPCODES = {0xB6}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
# todo: lookup signature and insert here
args = len(tuple(AbstractRuntime.get_arg_parts_of(prepared_data[2][2][1])))
[stack.pop() for _ in range(args + 1)]
stack.push(None)
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
# print(data)
method = await stack.vm.get_method_of_nat(
data, version=stack.method.class_file.internal_version
)
args = stack.runtime.parse_args_from_stack(method, stack, False)
obj = args[0]
if obj is not None:
if not hasattr(obj, "get_class"):
if hasattr(method, "access") and method.access & 0x0400:
raise StackCollectingException(
"invalid abstract not-implemented non-reference-able object"
+ str(obj)
)
else:
try:
cls = await obj.get_class()
except TypeError:
pass
else:
method_before = method
method = await cls.get_method(
method.name if hasattr(method, "name") else method.native_name,
method.signature
if hasattr(method, "signature")
else method.native_signature,
)
# dynamic methods need to be skipped here...
# Abstract methods as outer cannot be used, as dynamic is still better than abstract
# todo: add some better indicator here
if hasattr(method, "__name__") and method.__name__ == "dynamic" and (not method_before.access & 0x0400 if hasattr(method_before, "access") else True):
method = method_before
stack.push(await stack.runtime.run_method(method, *args, stack=stack))
@AbstractBytecodeContainer.register_instruction
class InvokeSpecial(CPLinkedInstruction):
OPCODES = {0xB7}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
arg_types = tuple(AbstractRuntime.get_arg_parts_of(prepared_data[2][2][1]))
args = len(arg_types)
[stack.pop()] + [stack.pop_expect_type(arg_types[i]) for i in range(args)]
if prepared_data[2][1][1] not in (
"<init>",
"<clinit>",
):
stack.push(None)
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
method = await stack.vm.get_method_of_nat(
data, version=stack.method.class_file.internal_version
)
result = await stack.runtime.run_method(
method, *stack.runtime.parse_args_from_stack(method, stack, False), stack=stack,
)
method_name = (method.name if hasattr(method, "name") else method.native_name)
if method_name not in (
"<init>",
"<clinit>",
):
stack.push(result)
@AbstractBytecodeContainer.register_instruction
class InvokeStatic(CPLinkedInstruction):
OPCODES = {0xB8}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
from jvm.Runtime import Runtime
args = tuple(Runtime.get_arg_parts_of(prepared_data[2][2][1]))
[stack.pop_expect_type(arg) for arg in args]
stack.push(prepared_data[2][2][1].split(")")[-1])
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
method = await stack.vm.get_method_of_nat(
data, version=stack.method.class_file.internal_version
)
stack.push(
await stack.runtime.run_method(
method, *stack.runtime.parse_args_from_stack(method, stack, static=True), stack=stack,
)
)
@AbstractBytecodeContainer.register_instruction
class InvokeInterface(CPLinkedInstruction):
OPCODES = {0xB9}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.cp = -1 # todo: implement
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return (
class_file.cp[
jvm.util.U2.unpack(data[index: index + 2])[0] - 1
],
data[index + 2],
), 5
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
method = await stack.vm.get_method_of_nat(
data[0], version=stack.method.class_file.internal_version
)
args = stack.runtime.parse_args_from_stack(method, stack, False)
obj = args[0]
try:
method = await (await obj.get_class()).get_method(
method.name if hasattr(method, "name") else method.native_name,
method.signature
if hasattr(method, "signature")
else method.native_signature,
)
except StackCollectingException as e:
e.add_trace(f"during resolving interface method for parent {method}")
raise
except AttributeError:
pass
if hasattr(method, "access") and method.access & 0x0400:
cls_file = method.class_file
# todo: move this check into method parsing
if "AbstractRuntimeVisibleAnnotations" in cls_file.attributes.attributes and any(
any(e[0] == "java/lang/FunctionalInterface" for e in attr.annotations)
for attr in cls_file.attributes.attributes["AbstractRuntimeVisibleAnnotations"]
):
args = list(args)
method = args.pop(0)
try:
stack.push(await stack.runtime.run_method(method, *args, stack=stack))
except StackCollectingException as e:
e.add_trace(f"during invoking interface {method} with {args}")
if hasattr(method, "class_file"):
e.add_trace(f"in class {method.class_file}")
raise
@AbstractBytecodeContainer.register_instruction
class InvokeDynamic(CPLinkedInstruction):
"""
InvokeDynamic
Resolves a method (mostly lambda's) onto the stack
Pops in case they are needed args from the stack
todo: cache method lookup
"""
OPCODES = {0xBA}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.cp = -1 # todo: implement
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
cp = class_file.cp[
jvm.util.U2.unpack(data[index: index + 2])[0] - 1
]
boostrap = class_file.attributes["BootstrapMethods"][0].entries[cp[1]]
# The type side for the execution
side = boostrap[0][2][1][1][1]
return (
(cp, side, boostrap),
5,
)
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
if isinstance(data, typing.Awaitable):
raise StackCollectingException(str(data))
if not isinstance(data, tuple):
raise StackCollectingException(
f"invalid InvokeDynamic target: target {data} is invalid"
)
if len(data) != 2:
raise StackCollectingException(
f"invalid InvokeDynamic target: target {data[1]} not found!"
)
else:
method, data = data
# m = await stack.vm.get_method_of_nat(data[0])
call_site = method((data[1][2], data[0], data[1]), data[1][0][2][1][1], data[1][2], stack=stack)
stack.push(call_site)
@classmethod
async def optimiser_iteration(
cls,
container: AbstractBytecodeContainer,
prepared_data: typing.Tuple[typing.Any, str],
instruction_index: int,
):
# todo: add a map here
if prepared_data[1] == "java/lang/invoke/LambdaMetafactory":
container.decoded_code[instruction_index] = (
LambdaInvokeDynamic,
prepared_data[0],
5,
)
else:
vm = container.code.class_file.vm
method = await vm.get_method_of_nat(prepared_data[2][0][2])
return method, (container.code.class_file, prepared_data)
@AbstractBytecodeContainer.register_instruction
class LambdaInvokeDynamic(BaseInstruction):
"""
Class representing the factory system for a lambda
"""
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.cp = -1 # todo: implement
class LambdaInvokeDynamicWrapper(jvm.api.AbstractMethod):
def __init__(
self, method, name: str, signature: str, extra_args: typing.Iterable
):
super().__init__()
self.method = method
self.name = name
self.signature = signature
self.extra_args = extra_args
self.access = method.access # access stays the same
def __call__(self, *args):
raise RuntimeError
async def invoke(self, args, stack=None):
return await self.method.invoke(tuple(self.extra_args)+tuple(args), stack=stack)
def __repr__(self):
return f"InvokeDynamic::CallSite(wrapping={self.method},add_args={self.extra_args})"
async def get_class(self):
return await self.method.class_file.vm.get_class("java/lang/reflect/Method")
def get_parent_class(self):
return self.method.get_parent_class()
class LambdaNewInvokeDynamicWrapper(LambdaInvokeDynamicWrapper):
def __call__(self, *args):
raise RuntimeError
async def invoke(self, args, stack=None):
instance = await self.method.class_file.create_instance()
await self.method.invoke((instance,)+tuple(self.extra_args)+tuple(args))
return instance
def __repr__(self):
return f"InvokeDynamic::CallSite::new(wrapping={self.method},add_args={self.extra_args})"
class LambdaAbstractInvokeDynamicWrapper(LambdaInvokeDynamicWrapper):
async def __call__(self, *args):
method = (
await (await args[0].get_class()).get_method(self.method.name, self.method.signature)
)
return await method(*self.extra_args, *args)
def __repr__(self):
return f"InvokeDynamic::CallSite::around_abstract(wrapping={self.method},add_args={self.extra_args})"
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
if callable(data):
stack.push(data)
return
boostrap = stack.method.class_file.attributes["BootstrapMethods"][0].entries[
data[1]
]
nat = data[2]
target_nat = boostrap[1][1][2][2]
# print("invokedynamic debug", nat, "\n", boostrap)
try:
cls_file = await stack.vm.get_class(
boostrap[1][1][2][1][1][1],
version=stack.method.class_file.internal_version,
)
method = await cls_file.get_method(target_nat[1][1], target_nat[2][1])
outer_signature = boostrap[1][0][1][1]
extra_args = []
inner_args = len(
list(
stack.runtime.get_arg_parts_of(
method.signature
if hasattr(method, "signature")
else method.native_signature
)
)
)
outer_args = len(list(stack.runtime.get_arg_parts_of(outer_signature)))
# have we args to give from the current runtime?
if inner_args > outer_args:
try:
extra_args += [stack.pop() for _ in range(inner_args - outer_args)]
except StackCollectingException as e:
e.add_trace(f"during invoke-dynamic arg pop towards method {method}")
raise
if not hasattr(method, "name") and not hasattr(method, "native_name"):
raise StackCollectingException(
f"InvokeDynamic target method is no real method: {method}, and as such cannot be InvokeDynamic-linked"
)
method_name = method.name if hasattr(method, "name") else method.native_name
# init methods are special, we need to wrap it into a special object for object creation
if method_name == "<init>":
# print("InvokeDynamic short-path <init>", method, outer_signature, extra_args)
method = cls.LambdaNewInvokeDynamicWrapper(
method, method_name, outer_signature, tuple(reversed(extra_args))
)
stack.push(method)
return
# print("long InvokeDynamic", method, outer_signature)
if not hasattr(method, "name") and not hasattr(method, "native_name"):
raise StackCollectingException(
f"InvokeDynamic target method is no real method: {method}, and as such cannot be InvokeDynamic-linked"
)
if outer_args > inner_args:
if method.access & 0x0400:
method = cls.LambdaInvokeDynamicWrapper(
cls.LambdaAbstractInvokeDynamicWrapper(
method,
method.name
if hasattr(method, "name")
else method.native_name,
outer_signature,
[],
),
method.name,
outer_signature,
tuple(reversed(extra_args)),
)
else:
method = cls.LambdaInvokeDynamicWrapper(
method,
method.name if hasattr(method, "name") else method.native_name,
outer_signature,
tuple(reversed(extra_args)),
)
method.access ^= 0x0008 # if we are dynamic but we expose object, we are no longer dynamic!
stack.push(method)
return
if not hasattr(method, "access"):
raise StackCollectingException(method)
# for non-static methods, we need to pop the object from the stack as it might reference it
# for non-static methods exposing the object attribute as first parameter
if not method.access & 0x0008:
# print("dynamic InvokeDynamic")
extra_args.append(stack.pop())
if method.access & 0x0400: # is the method abstract
# print("lambdaAroundAbstract", len(extra_args), extra_args)
# print("abstract", method)
method = cls.LambdaAbstractInvokeDynamicWrapper(
method,
method_name,
outer_signature,
tuple(reversed(extra_args)),
)
stack.push(method)
return
# If we have any prepared arguments, we need to wrap it in another structure for
# adding the args before invocation & updating the outer signature of the method to match
if len(extra_args) > 0 or outer_args > inner_args:
# print("additional", len(extra_args), extra_args)
# print("exposed signature", outer_signature)
method = cls.LambdaInvokeDynamicWrapper(
method, method_name, outer_signature, tuple(reversed(extra_args))
)
stack.push(method)
return
except StackCollectingException as e:
e.add_trace("during resolving InvokeDynamic")
e.add_trace(str(boostrap[0]))
e.add_trace(str(boostrap[1]))
e.add_trace(str(nat))
raise
except:
e = StackCollectingException("during resolving InvokeDynamic")
e.add_trace(str(boostrap[0]))
e.add_trace(str(boostrap[1]))
e.add_trace(str(nat))
raise e
stack.push(method)
@AbstractBytecodeContainer.register_instruction
class New(CPLinkedInstruction):
OPCODES = {0xBB}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.push(None)
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
c = await stack.vm.get_class(
data[1][1], version=stack.method.class_file.internal_version
)
stack.push(await c.create_instance())
@AbstractBytecodeContainer.register_instruction
class NewArray(CPLinkedInstruction):
OPCODES = {0xBC}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop_expect_type("i", "j")
stack.push(None)
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return jvm.util.U1.unpack(data[index: index + 1])[0], 2
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push([None] * stack.pop())
@AbstractBytecodeContainer.register_instruction
class ANewArray(CPLinkedInstruction):
OPCODES = {0xBD}
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop_expect_type("i", "j")
stack.push(None)
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push([None] * stack.pop())
@AbstractBytecodeContainer.register_instruction
class ArrayLength(OpcodeInstruction):
"""
Resolves the length of an array
In some contexts, this result is constant in each call
Can we detect this?
"""
OPCODES = {0xBE}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
a = stack.pop()
if a is None:
raise StackCollectingException("array is None")
stack.push(len(a))
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("i")
@AbstractBytecodeContainer.register_instruction
class AThrow(OpcodeInstruction):
"""
Throws an exception
In some cases, this raise can be moved up some instructions when no side effect is detected
"""
OPCODES = {0xBF}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
exception = stack.pop()
stack.stack.clear()
stack.push(exception)
raise StackCollectingException("User raised exception: "+str(exception), base=exception).add_trace(exception)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.cp = -1
@AbstractBytecodeContainer.register_instruction
class CheckCast(CPLinkedInstruction):
OPCODES = {0xC0}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
pass # todo: implement
@AbstractBytecodeContainer.register_instruction
class InstanceOf(CPLinkedInstruction):
OPCODES = {0xC1}
@classmethod
async def invoke(cls, data: typing.Any, stack: AbstractStack):
obj = stack.pop()
if not hasattr(obj, "get_class"):
# todo: we need a fix here!
stack.push(0)
else:
stack.push(int(obj is None or (await obj.get_class()).is_subclass_of(data[1][1])))
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
stack.push("Z")
@AbstractBytecodeContainer.register_instruction
class MultiANewArray(OpcodeInstruction):
OPCODES = {0xC5}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
return (data[index:index+2], data[index+2]), 4
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
dimensions = [stack.pop() for _ in range(data[1])]
data = [None] * dimensions.pop(0)
for e in dimensions:
data = [copy.deepcopy(data) for _ in range(e)]
stack.push(data)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
for _ in range(prepared_data[1]):
stack.pop_expect_type("i", "j")
stack.push(None)
@AbstractBytecodeContainer.register_instruction
class IfNull(SingleCompare):
OPCODES = {0xC6}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack) -> bool:
if stack.pop() is None:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class IfNonNull(SingleCompare):
OPCODES = {0xC7}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack) -> bool:
if stack.pop() is not None:
stack.cp += data
return True
@AbstractBytecodeContainer.register_instruction
class Mul(OpcodeInstruction):
OPCODES = {0x68, 0x6B, 0x6A}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(stack.pop() * stack.pop())
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
t = stack.pop()
stack.pop_expect_type(t)
stack.push(t)
@AbstractBytecodeContainer.register_instruction
class NEG(OpcodeInstruction):
OPCODES = {0x76, 0x77}
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack):
stack.push(-stack.pop())
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
t = stack.pop()
stack.push(t)
@AbstractBytecodeContainer.register_instruction
class TableSwitch(OpcodeInstruction):
"""
TableSwitch instruction
Similar to LookupSwitch, but in theory faster
"""
OPCODES = {0xAA}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
index2offset = array.ArrayType("l")
initial = index
while index % 4 != 0:
index += 1
default = jvm.util.pop_u4_s(data[index:])
index += 4
low = jvm.util.pop_u4_s(data[index:])
index += 4
high = jvm.util.pop_u4_s(data[index:])
index += 4
offsets = [
jvm.util.pop_u4_s(data[index + i * 4:])
for i in range(high - low + 1)
]
index2offset.extend(offsets)
index += (high - low + 1) * 4
return (default, low, high, index2offset), index - initial + 1
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack) -> bool:
index = stack.pop()
if index < data[1] or index > data[2]:
stack.cp += data[0]
else:
stack.cp += data[3][index - data[1]]
return True
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
for offset in prepared_data[3]:
stack.branch(offset)
stack.cp += prepared_data[0]
@classmethod
def code_reference_changer(
cls,
container: AbstractBytecodeContainer,
prepared_data: typing.Any,
instruction_index: int,
old_index: int,
checker: typing.Callable[[int], int],
):
default, low, high, offsets = prepared_data
return checker(default + old_index) - instruction_index, low, high, array.ArrayType("l", [checker(e + old_index) - instruction_index for e in offsets])
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
for offset in prepared_data[3]:
CompareHelper.validate(command_index, offset, container)
CompareHelper.validate(command_index, prepared_data[0], container)
@AbstractBytecodeContainer.register_instruction
class LookupSwitch(OpcodeInstruction):
"""
LookupSwitch Instruction
Specified by https://docs.oracle.com/javase/specs/jvms/se16/html/jvms-6.htm
Structure
0xAA [type byte]
0-3 bytes padding to make next byte align to 4 byte blocks
The next 4 bytes are the default offset, the next 4 the case counts.
Followed by the respective count of 4 bytes case key and 4 bytes case offset.
Optimisation possibilities:
- convert into tableswitch when structure is close to it
- for enums: use tableswitch with special case attribute on the enum entries
- use simple if-elif-else structure for small examples
- when block jumped to is only used to this part, we can extract it into a subroutine implemented in python when
possible
- instead of doing simple if's in code, we can use this structure with hash to decide between multile parts
Implementation details
We use a while loop and pop bytes until byte alignment is reached
We use the pop_u4_s instruction for popping the 4 byte data
We store the pairs into a dict structure
We raise a StackCollectingException when the dict construction fails, we include the amount of entries and the default offset
Safety checks
Load-time:
- all offsets must be valid
Optimisation in-place:
- jumps to head of instruction must be still valid
- subroutines must be correctly linked & returned back
Run-time:
- value must be int(-like)
Exceptions:
StackCollectingException(StackUnderflowException): when no key is on the stack
<some error during wrong offsets>
todo: somehow, this does not 100% work...
"""
OPCODES = {0xAB}
@classmethod
def decode(
cls, data: bytearray, index, class_file
) -> typing.Tuple[typing.Any, int]:
before = index
# offset binding
while index % 4 != 0:
index += 1
# the static HEAD
default = jvm.util.pop_u4_s(data[index:])
index += 4
npairs = jvm.util.pop_u4_s(data[index:])
index += 4
# And now, the key-value pairs
try:
pairs = {
jvm.util.pop_u4_s(
data[index + i * 8:]
): jvm.util.pop_u4_s(data[index + i * 8 + 4:])
for i in range(npairs)
}
index += npairs * 8
except:
raise StackCollectingException(
f"during decoding lookupswitch of {npairs} entries, defaulting to {default}"
)
return (default, pairs), index - before + 1
@classmethod
def invoke(cls, data: typing.Any, stack: AbstractStack) -> bool:
key = stack.pop()
# todo: do some clever checks here...
if key not in data[1]:
stack.cp += data[0]
else:
stack.cp += data[1][key]
return True
@classmethod
def code_reference_changer(
cls,
container: AbstractBytecodeContainer,
prepared_data: typing.Any,
instruction_index: int,
old_index: int,
checker: typing.Callable[[int], int],
):
default, pairs = prepared_data
return checker(default + old_index) - instruction_index, {e[0]: checker(e[1] + old_index) - instruction_index for e in pairs.items()}
@classmethod
def validate(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer):
for offset in prepared_data[1].values():
CompareHelper.validate(command_index, offset, container)
CompareHelper.validate(command_index, prepared_data[0], container)
@classmethod
def validate_stack(cls, command_index, prepared_data: typing.Any, container: AbstractBytecodeContainer, stack: AbstractStack):
stack.pop()
for offset in prepared_data[1].values():
stack.branch(offset)
# the default offset goes here...
stack.cp += prepared_data[0]
| 34.391886
| 203
| 0.656703
|
4a182db636bd9e72499f9f6319883076d28780b7
| 7,681
|
py
|
Python
|
spvnas/core/models/semantic_kitti/spvcnn.py
|
reinforcementdriving/e3d
|
52c6d3dede0d1134a1fb5cabef4f70b123861501
|
[
"MIT"
] | 1
|
2021-01-31T01:53:23.000Z
|
2021-01-31T01:53:23.000Z
|
spvnas/core/models/semantic_kitti/spvcnn.py
|
reinforcementdriving/e3d
|
52c6d3dede0d1134a1fb5cabef4f70b123861501
|
[
"MIT"
] | null | null | null |
spvnas/core/models/semantic_kitti/spvcnn.py
|
reinforcementdriving/e3d
|
52c6d3dede0d1134a1fb5cabef4f70b123861501
|
[
"MIT"
] | null | null | null |
import time
from collections import OrderedDict
import torch
import torch.nn as nn
import torchsparse
import torchsparse.nn as spnn
import torchsparse.nn.functional as spf
from torchsparse.sparse_tensor import SparseTensor
from torchsparse.point_tensor import PointTensor
from torchsparse.utils.kernel_region import *
from torchsparse.utils.helpers import *
from core.models.utils import *
__all__ = ['SPVCNN']
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride), spnn.BatchNorm(outc),
spnn.ReLU(True))
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
stride=stride,
transpose=True), spnn.BatchNorm(outc),
spnn.ReLU(True))
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride), spnn.BatchNorm(outc),
spnn.ReLU(True),
spnn.Conv3d(outc,
outc,
kernel_size=ks,
dilation=dilation,
stride=1), spnn.BatchNorm(outc))
self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \
nn.Sequential(
spnn.Conv3d(inc, outc, kernel_size=1, dilation=1, stride=stride),
spnn.BatchNorm(outc)
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class SPVCNN(nn.Module):
def __init__(self, **kwargs):
super().__init__()
cr = kwargs.get('cr', 1.0)
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
if 'pres' in kwargs and 'vres' in kwargs:
self.pres = kwargs['pres']
self.vres = kwargs['vres']
self.stem = nn.Sequential(
spnn.Conv3d(4, cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True),
spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True))
self.stage1 = nn.Sequential(
BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1),
ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1),
ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1),
ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1),
ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1),
)
self.stage3 = nn.Sequential(
BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1),
ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1),
ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1),
ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
)
self.up1 = nn.ModuleList([
BasicDeconvolutionBlock(cs[4], cs[5], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[5] + cs[3], cs[5], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[5], cs[5], ks=3, stride=1, dilation=1),
)
])
self.up2 = nn.ModuleList([
BasicDeconvolutionBlock(cs[5], cs[6], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[6] + cs[2], cs[6], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[6], cs[6], ks=3, stride=1, dilation=1),
)
])
self.up3 = nn.ModuleList([
BasicDeconvolutionBlock(cs[6], cs[7], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[7] + cs[1], cs[7], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[7], cs[7], ks=3, stride=1, dilation=1),
)
])
self.up4 = nn.ModuleList([
BasicDeconvolutionBlock(cs[7], cs[8], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[8] + cs[0], cs[8], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[8], cs[8], ks=3, stride=1, dilation=1),
)
])
self.classifier = nn.Sequential(nn.Linear(cs[8],
kwargs['num_classes']))
self.point_transforms = nn.ModuleList([
nn.Sequential(
nn.Linear(cs[0], cs[4]),
nn.BatchNorm1d(cs[4]),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[4], cs[6]),
nn.BatchNorm1d(cs[6]),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[6], cs[8]),
nn.BatchNorm1d(cs[8]),
nn.ReLU(True),
)
])
self.weight_initialization()
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# x: SparseTensor z: PointTensor
z = PointTensor(x.F, x.C.float())
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
z1 = voxel_to_point(x4, z0)
z1.F = z1.F + self.point_transforms[0](z0.F)
y1 = point_to_voxel(x4, z1)
y1.F = self.dropout(y1.F)
y1 = self.up1[0](y1)
y1 = torchsparse.cat([y1, x3])
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = torchsparse.cat([y2, x2])
y2 = self.up2[1](y2)
z2 = voxel_to_point(y2, z1)
z2.F = z2.F + self.point_transforms[1](z1.F)
y3 = point_to_voxel(y2, z2)
y3.F = self.dropout(y3.F)
y3 = self.up3[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up4[1](y4)
z3 = voxel_to_point(y4, z2)
z3.F = z3.F + self.point_transforms[2](z2.F)
out = self.classifier(z3.F)
return out
| 32.54661
| 81
| 0.494467
|
4a182f05080a197efe43b05c82aa5b9ba3500171
| 936
|
py
|
Python
|
Libraries/Python/requests_negotiate_sspi/v0.3.1/requests_negotiate_sspi/__init__.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2017-04-25T13:15:10.000Z
|
2017-04-25T13:15:10.000Z
|
Libraries/Python/requests_negotiate_sspi/v0.3.1/requests_negotiate_sspi/__init__.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | null | null | null |
Libraries/Python/requests_negotiate_sspi/v0.3.1/requests_negotiate_sspi/__init__.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | null | null | null |
import requests
from .requests_negotiate_sspi import HttpNegotiateAuth
__all__ = ('HttpNegotiateAuth')
HTTPResponse = requests.packages.urllib3.response.HTTPResponse
orig_HTTPResponse__init__ = HTTPResponse.__init__
def new_HTTPResponse__init__(self, *args, **kwargs):
orig_HTTPResponse__init__(self, *args, **kwargs)
try:
self.peercert = self._connection.sock.getpeercert(binary_form=True)
except AttributeError:
self.peercert = None
HTTPResponse.__init__ = new_HTTPResponse__init__
HTTPAdapter = requests.adapters.HTTPAdapter
orig_HTTPAdapter_build_response = HTTPAdapter.build_response
def new_HTTPAdapter_build_response(self, request, resp):
response = orig_HTTPAdapter_build_response(self, request, resp)
try:
response.peercert = resp.peercert
except AttributeError:
response.peercert = None
return response
HTTPAdapter.build_response = new_HTTPAdapter_build_response
| 36
| 75
| 0.794872
|
4a18312e748b3cbe4fe2cdc10f33b39b41461acd
| 2,648
|
py
|
Python
|
tools/nightly/vm/report_to_html.py
|
gatehouse/cppcms
|
61da055ffeb349b4eda14bc9ac393af9ce842364
|
[
"MIT"
] | 388
|
2017-03-01T07:39:21.000Z
|
2022-03-30T19:38:41.000Z
|
tools/nightly/vm/report_to_html.py
|
gatehouse/cppcms
|
61da055ffeb349b4eda14bc9ac393af9ce842364
|
[
"MIT"
] | 81
|
2017-03-08T20:28:00.000Z
|
2022-01-23T08:19:31.000Z
|
tools/nightly/vm/report_to_html.py
|
gatehouse/cppcms
|
61da055ffeb349b4eda14bc9ac393af9ce842364
|
[
"MIT"
] | 127
|
2017-03-05T21:53:40.000Z
|
2022-02-25T02:31:01.000Z
|
#!/usr/bin/env python
import re
import sys
import datetime
import glob
oses = { 'win7' : 'Windows 7', 'solaris' : 'Solaris 11', 'freebsd' : 'FreeBSD 11.1', 'localhost' : 'Linux Ubuntu 16.04' }
def get_failed_tests(tag):
f = open('logs/' + tag + '.log','r')
if not f:
return ''
res=[]
attach=False
for l in f.readlines():
if attach:
res.append(l)
if l.find('The following tests FAILED')!=-1:
attach=True
return '<br/>'.join(res)
def compiler_name(x):
return x.replace('mingw_','MinGW ').replace('gcc','GCC ').replace('clang','Clang ').replace('msvc','MSVC ').replace('std','/C++')
repo_url=sys.argv[1]
repo_rev=sys.argv[2]
r=r'(\w+)\s+-\s*(pass|fail)';
print """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<title>Nightly CppCMS Builds and Tests</title>
<head>
<body>
<h1>Nightly CppCMS Builds and Tests</h1>
<style>
/* Tooltip container */
.tooltip {
position: relative;
/*display: inline-block;*/
}
/* Tooltip text */
.tooltip .tooltiptext {
visibility: hidden;
width: 500px;
background-color: yellow;
color: black;
text-align: left;
padding: 5px 5px;
border-radius: 6px;
/* Position the tooltip text - see examples below! */
position: absolute;
z-index: 1;
}
/* Show the tooltip text when you mouse over the tooltip container */
.tooltip:hover .tooltiptext {
visibility: visible;
}
</style>
"""
print datetime.datetime.now().strftime('<h2>Tested at: %Y-%m-%d %H:%M</h2>')
print "<p>%s<br/>%s</p>" % (repo_url,repo_rev)
print """
<table cellpadding="3" cellspacing="0" border="1" >
<tr><th width="20%" >Operating System</th><th width="20%" >Compiler</th><th width="20%">Platform</th><th width="20%">Status</th></tr>
"""
test_re = re.compile('logs/(([^-]+)-([^-]+)-([^-]+))-status.txt')
reports=glob.glob('logs/*-status.txt')
reports.sort()
for report in reports:
m=test_re.match(report)
tag=m.group(1)
OS = oses[m.group(2)]
Compiler = compiler_name(m.group(3))
Platform = m.group(4)
status=open(report,'r').readlines()[0][0:-1]
failed = get_failed_tests(tag)
if status!='ok':
print '<tr><td>%s</td><td>%s</td><td>%s</td><td class="tooltip"><a href="./nightly-build-report/%s.txt">%s</a><span class="tooltiptext">%s</span></td></tr>' % (OS,Compiler,Platform,tag,status,failed)
else:
print '<tr><td>%s</td><td>%s</td><td>%s</td><td><a href="./nightly-build-report/%s.txt">%s</a></td></tr>' % (OS,Compiler,Platform,tag,status)
print """
</table>
</body>
</html>
"""
| 25.708738
| 207
| 0.607628
|
4a18332b0834c4e03bdd2df19b83e1aea55bd859
| 127
|
py
|
Python
|
partner/admin.py
|
YangWanjun/ebusiness
|
03d92908b4db1a305c8cb99fc27700fd4dc972bd
|
[
"Apache-2.0"
] | null | null | null |
partner/admin.py
|
YangWanjun/ebusiness
|
03d92908b4db1a305c8cb99fc27700fd4dc972bd
|
[
"Apache-2.0"
] | 3
|
2020-02-11T22:59:47.000Z
|
2021-03-19T22:03:11.000Z
|
partner/admin.py
|
YangWanjun/ebusiness
|
03d92908b4db1a305c8cb99fc27700fd4dc972bd
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
class PartnerAdmin(admin.ModelAdmin):
list_display = None
list_display_links = None
| 18.142857
| 37
| 0.771654
|
4a1833491257412f842e8474603968ff40c1d29f
| 15,736
|
py
|
Python
|
querybuilder/filters.py
|
NorthIsUp/querybuilder
|
67b0539345e280669985b90e26b4df3809e01d74
|
[
"MIT"
] | 17
|
2018-02-19T18:52:18.000Z
|
2021-09-12T15:02:45.000Z
|
querybuilder/filters.py
|
NorthIsUp/querybuilder
|
67b0539345e280669985b90e26b4df3809e01d74
|
[
"MIT"
] | 3
|
2018-04-25T09:12:27.000Z
|
2021-03-25T21:48:47.000Z
|
querybuilder/filters.py
|
NorthIsUp/querybuilder
|
67b0539345e280669985b90e26b4df3809e01d74
|
[
"MIT"
] | 7
|
2018-09-24T15:03:18.000Z
|
2021-09-22T09:35:40.000Z
|
from __future__ import absolute_import
# Standard Library
import re
from datetime import (
date,
datetime,
time,
)
from decimal import (
Context,
Decimal,
)
# External Libraries
import six
from cached_property import cached_property
# Project Library
from querybuilder.constants import (
Input,
Operator,
Type,
)
from querybuilder.core import ToDictMixin
class Filters(object):
def run_filter_for_rule(self, rule):
'''
Run the rule using the current instance of a Filters class
Args:
rule (Rule): the rule to run. This will be a 'leaf' rule without a condition or further rules to run
Returns (bool):
the result of the operator handler when run on the values in the rule.
'''
# return a boolean if the one rule is satisfied
# get the filter for the id specified in the rule
filter = Filter._filter_registry[rule.id]
# get the value returned in the filter instance
filter_operand = filter.func(self)
# check that the value is within the filter constraints
if not filter.validate(filter_operand):
return False, filter_operand
# get the value set in the rule
rule_operand = filter.python_value(rule.value)
# get the operator we are going to test with
operator_handler = filter.handler_for_operator(rule.operator)
if isinstance(rule_operand, (list, tuple)):
# allow for syntax like def between(self, value, upper, lower)
return operator_handler(filter, filter_operand, *rule_operand), filter_operand
else:
return operator_handler(filter, filter_operand, rule_operand), filter_operand
class FilterMeta(type):
'''
Metaclass for the filter
This does simple registration of operators based on Operator.handles
'''
def __new__(metacls, name, bases, attrs):
cls = super(FilterMeta, metacls).__new__(metacls, name, bases, attrs)
for name, attr in attrs.items():
if hasattr(attr, 'operator'):
# check for for the `operator` attribute that is set in Operator.handles
cls._operator_handlers[attr.operator] = attr
return cls
class Filter(six.with_metaclass(FilterMeta, ToDictMixin)):
'''
Corresponds to the Filter jQQB object.
Filters define the possible contents for a rule. This includes
- the human readable name
- help information
- what is the data type we are working with
- what are the validation criteria (also see the Validation class)
- if there are default values
- if the input is limited to a set of choices
- etc.
For detailed information see the project website.
http://querybuilder.js.org/#filters
'''
# top level registry of all the filters that exist by id
_filter_registry = {}
# per-filter class map of operator -> function
_operator_handlers = {}
_validation_functions = frozenset()
DICT_KEYS = ('id', 'type', 'field', 'label', 'description', 'optgroup', 'input', 'values', 'value_separator', 'default_value', 'input_event', 'size', 'rows', 'multiple', 'placeholder', 'vertical', 'validation', 'operators', 'plugin', 'plugin_config', 'data', 'valueSetter', 'valueGetter')
TO_PYTHON = None
def __init__(
self,
id=None,
field=None,
label=None,
description=None,
type=None,
optgroup=None,
input=None,
values=(),
value_separator=None,
default_value=None,
input_event=None,
size=None,
rows=None,
multiple=None,
placeholder=None,
vertical=None,
validation=None,
operators=(),
plugin=None,
plugin_config=None,
data=None,
valueSetter=None,
valueGetter=None,
):
'''
Args:
id (str):
Unique identifier of the filter.
By default this is the name of the function it is decorating.
field (str): ??? understand this better
Field used by the filter, multiple filters can use the same field.
label (str):
Label used to display the filter. It can be simple string or a map for localization.
description (str):
Detailed description for display as help text.
type (str or Type):
Type of the field. Available types are in `Type`
optgroup (str):
Group name to group this filter with
input (str or Input):
Type of input used. Available inputs are in `Inputs`
values ([Values]):
Required for `radio` and `checkbox` inputs. Generally needed for select inputs.
value_separator (str):
Used the split and join the value when a text input is used with an operator allowing multiple values (between for example).
default_value:
The default value.
validation ([Validation]):
Object of options for rule validation. See the `Validation` class.
operators ([Operator)]):
Array of operators types to use for this filter. If empty the filter will use all applicable operators.
data (dict):
Additional data not used by QueryBuilder but that will be added to the output rules object. Use this to store any functional data you need.
Args with only front end uses:
input_event:
Space separated list of DOM events which the builder should listen to detect value changes.
plugin:
Name of a jQuery plugin to apply on the input.
plugin_config:
Object of parameters to pass to the plugin.
valueSetter:
Function used to set the input(s) value. If provided the default function is not run. It takes 2 parameters: rule, value
valueGetter:
Function used to get the input(s) value. If provided the default function is not run. It takes 1 parameter: rule
Only for text and textarea inputs:
size: horizontal size of the input.
rows: vertical size of the input.
placeholder: placeholder to display inside the input.
Only for select inputs:
multiple: accept multiple values.
Only for radio and checkbox inputs:
vertical: display inputs vertically on not horizontally.
'''
self.id = id
self.type = Type(type) if type else type
self.field = field
self.label = label
self.description = description
self.optgroup = optgroup
self.input = Input(input) if input else input
self.values = values
if self.input in (Input.CHECKBOX, Input.RADIO) and not self.values:
raise ValueError('values are required when using input %s' % self.input)
self.value_separator = value_separator
self.default_value = default_value
self.input_event = input_event
self.size = size
self.rows = rows
self.multiple = multiple
self.placeholder = placeholder
self.vertical = vertical
self.validation = dict(validation or {}) # ensure validation is a dict
self.operators = [Operator(op) for op in operators] # cast strings to operator, this also validates
self.plugin = plugin
self.plugin_config = plugin_config
self.data = data
self.valueSetter = valueSetter
self.valueGetter = valueGetter
self.func = None
self._validation_functions = frozenset(
getattr(self, func_name)
for func_name in dir(self)
if func_name.startswith('validate_') and callable(getattr(self, func_name))
)
def __call__(self, func):
self.func = func
# set the id, label, etc
self.id = self.id or func.__name__
Filter._filter_registry[self.id] = self
return cached_property(func)
@classmethod
def all_filters(cls):
'''returns all the available filters in the registry'''
return [
filter.to_dict()
for filter
in cls._filter_registry.values()
]
@classmethod
def handler_for_operator(cls, operator):
return cls._operator_handlers.get(operator) or Filter._operator_handlers[operator]
# how to convert a rule's type to a python type
_python_types = {
Type.STRING: str, # TODO validate these converters
Type.INTEGER: int, # TODO validate these converters
Type.DOUBLE: Decimal, # TODO validate these converters
Type.DATE: date, # TODO validate these converters
Type.TIME: time, # TODO validate these converters
Type.DATETIME: datetime, # TODO validate these converters
Type.BOOLEAN: lambda x: bool(int(x) if x.isdigit() else (1 if x == 'true' else 0))
}
def python_value(self, filter_value):
'''Convert the json representation of a value to python'''
if filter_value is None:
# when value is None it is intentional and shouldn't be mapped
return None
else:
# lookup the converter in the python_types dict
return self._python_types[self.type](filter_value)
@classmethod
def filter_value(cls, python_value):
'''Convert the python representation of a value to one which is filter and json compatible'''
return python_value
def validate(self, value):
if self._validation_functions:
return all(
f(value) is not False # value must be false, not just falsy
for f in self._validation_functions
)
return True
###########################################################################
# Default handlers for operators
@Operator.EQUAL.handles
def equal(self, lop, rop):
return lop == rop
@Operator.NOT_EQUAL.handles
def not_equal(self, lop, rop):
return not self.equal(lop, rop)
@Operator.IN.handles
def _in(self, lop, rop):
return lop in rop
@Operator.NOT_IN.handles
def not_in(self, lop, rop):
return not self._in(lop, rop)
@Operator.LESS.handles
def less(self, lop, rop):
return lop < rop
@Operator.LESS_OR_EQUAL.handles
def less_or_equal(self, lop, rop):
return self.less(lop, rop) or self.equal(lop, rop)
@Operator.GREATER.handles
def greater(self, lop, rop):
return not self.less_or_equal(lop, rop)
@Operator.GREATER_OR_EQUAL.handles
def greater_or_equal(self, lop, rop):
return self.greater(lop, rop) or self.equal(lop, rop)
@Operator.BETWEEN.handles
def between(self, op, minop, maxop):
'''
minop <= op <= maxop
'''
return self.less_or_equal(minop, op) and self.less_or_equal(op, maxop)
@Operator.NOT_BETWEEN.handles
def not_between(self, op, minop, maxop):
return not self.between(op, minop, maxop)
@Operator.CONTAINS.handles
def contains(self, lop, rop):
return self._in(lop, rop)
@Operator.IS_NULL.handles
def is_null(self, op):
return op is None
@Operator.IS_NOT_NULL.handles
def is_not_null(self, op):
return not self.is_null(op)
class TypedFilter(Filter):
TYPE = NotImplemented
OPERATORS = NotImplemented
OPTIONS = NotImplemented
def __init__(self, *args, **kwargs):
kwargs.update(type=self.TYPE)
assert self.TYPE is not NotImplemented, 'TYPE must be declared in the subclass'
if self.OPERATORS is not NotImplemented:
kwargs.setdefault('operators', tuple(self.OPERATORS))
if self.OPTIONS is not NotImplemented:
for k, v in self.OPTIONS.items():
kwargs.setdefault(k, v)
super(TypedFilter, self).__init__(*args, **kwargs)
class BooleanFilter(TypedFilter):
TYPE = Type.BOOLEAN
OPERATORS = [
Operator.EQUAL,
Operator.NOT_EQUAL,
Operator.IS_NULL,
Operator.IS_NOT_NULL,
]
OPTIONS = {
'input': Input.RADIO,
'values': ({1: 'Is True'}, {0: 'Is False'}),
}
class StringFilter(TypedFilter):
TYPE = Type.STRING
OPERATORS = (
Operator.unary_comparisons
| Operator.binary_comparisons
| Operator.ternary_comparisons
| Operator.collection_comparisons
| Operator.string_comparisons
)
@cached_property
def validation_format(self):
fmt = self.validation.get('format')
if fmt is not None:
if fmt.startswith('/') and fmt.endswith('/'):
fmt = fmt[1:-1]
return re.compile(fmt)
def validate_format(self, value):
if self.validation_format is not None:
return bool(self.validation_format.match(value))
###########################################################################
# Default handlers for operators
@Operator.NOT_CONTAINS.handles
def not_contains(self, lop, rop):
return not self.contains(lop, rop)
@Operator.BEGINS_WITH.handles
def begins_with(self, lop, rop):
return lop.startswith(rop)
@Operator.NOT_BEGINS_WITH.handles
def not_begins_with(self, lop, rop):
return not lop.startswith(rop)
@Operator.ENDS_WITH.handles
def ends_with(self, lop, rop):
return lop.endswith(rop)
@Operator.NOT_ENDS_WITH.handles
def not_ends_with(self, lop, rop):
return not lop.endswith(rop)
@Operator.IS_EMPTY.handles
def is_empty(self, op):
return len(op) == 0
@Operator.IS_NOT_EMPTY.handles
def is_not_empty(self, op):
return not self.is_empty(op)
class IntegerFilter(TypedFilter):
TYPE = Type.INTEGER
OPERATORS = (
Operator.unary_comparisons
| Operator.binary_comparisons
| Operator.ternary_comparisons
)
def validate_min(self, value):
min = self.validation.get('min')
if min is not None:
return value >= Decimal(str(min))
def validate_max(self, value):
max = self.validation.get('max')
if max is not None:
return value <= Decimal(str(max))
def validate_step(self, value, _divmod=Context().divmod):
step = self.validation.get('step')
if step is not None:
_, remainder = _divmod(Decimal(str(value)), Decimal(str(step)))
return remainder == 0
class DoubleFilter(IntegerFilter):
# this isn't a thing in python, but whatever
TYPE = Type.DOUBLE
# alias Numeric to Double, these are the same concept in python
NumericFilter = DoubleFilter
class DateFilter(TypedFilter):
TYPE = Type.DATE
OPERATORS = (
Operator.unary_comparisons
| Operator.binary_comparisons
| Operator.ternary_comparisons
)
# TODO add default validator
class TimeFilter(TypedFilter):
TYPE = Type.TIME
OPERATORS = (
Operator.unary_comparisons
| Operator.binary_comparisons
| Operator.ternary_comparisons
)
# TODO add default validator
class DateTimeFilter(TypedFilter):
TYPE = Type.DATETIME
OPERATORS = (
Operator.unary_comparisons
| Operator.binary_comparisons
| Operator.ternary_comparisons
)
# TODO add default validator
__all__ = [_.__name__ for _ in globals().values() if isinstance(_, (Filter, Filters))]
| 30.976378
| 292
| 0.62106
|
4a183386561e2c9134ac491412d302608a4746c8
| 7,670
|
py
|
Python
|
cogdl/models/nn/pyg_gtn.py
|
BywinTec/cogdl
|
3c0abcfe364a69061c84c8170d4f5e6a17a4668d
|
[
"MIT"
] | 2
|
2021-06-25T08:18:36.000Z
|
2021-06-25T08:51:00.000Z
|
cogdl/models/nn/pyg_gtn.py
|
BywinTec/cogdl
|
3c0abcfe364a69061c84c8170d4f5e6a17a4668d
|
[
"MIT"
] | null | null | null |
cogdl/models/nn/pyg_gtn.py
|
BywinTec/cogdl
|
3c0abcfe364a69061c84c8170d4f5e6a17a4668d
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_sparse import spspmm
from .. import BaseModel, register_model
from .gcn import GraphConvolution
from cogdl.utils import remove_self_loops, coalesce, accuracy
class GTConv(nn.Module):
def __init__(self, in_channels, out_channels, num_nodes):
super(GTConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels))
self.bias = None
self.scale = nn.Parameter(torch.Tensor([0.1]), requires_grad=False)
self.num_nodes = num_nodes
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.weight, 1)
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, A):
filter = F.softmax(self.weight, dim=1)
num_channels = filter.shape[0]
results = []
for i in range(num_channels):
for j, (edge_index, edge_value) in enumerate(A):
if j == 0:
total_edge_index = edge_index
total_edge_value = edge_value * filter[i][j]
else:
total_edge_index = torch.cat((total_edge_index, edge_index), dim=1)
total_edge_value = torch.cat((total_edge_value, edge_value * filter[i][j]))
row, col = total_edge_index.detach()
row, col, value = coalesce(row, col, total_edge_value)
index = torch.stack([row, col])
results.append((index, value))
return results
class GTLayer(nn.Module):
def __init__(self, in_channels, out_channels, num_nodes, first=True):
super(GTLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.first = first
self.num_nodes = num_nodes
if self.first:
self.conv1 = GTConv(in_channels, out_channels, num_nodes)
self.conv2 = GTConv(in_channels, out_channels, num_nodes)
else:
self.conv1 = GTConv(in_channels, out_channels, num_nodes)
def forward(self, A, H_=None):
if self.first:
result_A = self.conv1(A)
result_B = self.conv2(A)
W = [(F.softmax(self.conv1.weight, dim=1)).detach(), (F.softmax(self.conv2.weight, dim=1)).detach()]
else:
result_A = H_
result_B = self.conv1(A)
W = [(F.softmax(self.conv1.weight, dim=1)).detach()]
H = []
device = result_A[0][0].device
for i in range(len(result_A)):
# a_edge, a_value = result_A[i][0].cpu(), result_A[i][1].cpu()
# b_edge, b_value = result_B[i][0].cpu(), result_B[i][1].cpu()
a_edge, a_value = result_A[i][0], result_A[i][1]
b_edge, b_value = result_B[i][0], result_B[i][1]
edges, values = spspmm(a_edge, a_value, b_edge, b_value, self.num_nodes, self.num_nodes, self.num_nodes)
H.append((edges.to(device), values.to(device)))
return H, W
@register_model("gtn")
class GTN(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--num-nodes", type=int)
parser.add_argument("--hidden-size", type=int, default=64)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--num-edge", type=int, default=2)
parser.add_argument("--num-channels", type=int, default=2)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_edge,
args.num_channels,
args.num_features,
args.hidden_size,
args.num_classes,
args.num_nodes,
args.num_layers,
)
def __init__(self, num_edge, num_channels, w_in, w_out, num_class, num_nodes, num_layers):
super(GTN, self).__init__()
self.num_edge = num_edge
self.num_channels = num_channels
self.num_nodes = num_nodes
self.w_in = w_in
self.w_out = w_out
self.num_class = num_class
self.num_layers = num_layers
layers = []
for i in range(num_layers):
if i == 0:
layers.append(GTLayer(num_edge, num_channels, num_nodes, first=True))
else:
layers.append(GTLayer(num_edge, num_channels, num_nodes, first=False))
self.layers = nn.ModuleList(layers)
self.cross_entropy_loss = nn.CrossEntropyLoss()
self.gcn = GraphConvolution(in_features=self.w_in, out_features=w_out)
self.linear1 = nn.Linear(self.w_out * self.num_channels, self.w_out)
self.linear2 = nn.Linear(self.w_out, self.num_class)
def normalization(self, H):
norm_H = []
for i in range(self.num_channels):
edge, value = H[i]
edge, value = remove_self_loops(edge, value)
deg_row, deg_col = self.norm(edge.detach(), self.num_nodes, value.detach())
value = deg_col * value
norm_H.append((edge, value))
return norm_H
def norm(self, edge_index, num_nodes, edge_weight, improved=False, dtype=None):
with torch.no_grad():
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype, device=edge_index.device)
edge_weight = edge_weight.view(-1)
assert edge_weight.size(0) == edge_index.size(1)
row, col = edge_index
deg = torch.zeros((num_nodes,)).to(edge_index.device)
deg = deg.scatter_add_(dim=0, src=edge_weight, index=row).squeeze()
deg_inv_sqrt = deg.pow(-1)
deg_inv_sqrt[deg_inv_sqrt == float("inf")] = 0
return deg_inv_sqrt[row], deg_inv_sqrt[col]
def forward(self, graph, target_x, target):
A = graph.adj
X = graph.x
Ws = []
for i in range(self.num_layers):
if i == 0:
H, W = self.layers[i](A)
else:
H = self.normalization(H)
H, W = self.layers[i](A, H)
Ws.append(W)
with graph.local_graph():
for i in range(self.num_channels):
if i == 0:
edge_index, edge_weight = H[i][0], H[i][1]
graph.edge_index = edge_index.detach()
graph.edge_weight = edge_weight
X_ = self.gcn(graph, X)
X_ = F.relu(X_)
else:
edge_index, edge_weight = H[i][0], H[i][1]
graph.edge_index = edge_index.detach()
graph.edge_weight = edge_weight
X_ = torch.cat((X_, F.relu(self.gcn(graph, X))), dim=1)
X_ = self.linear1(X_)
X_ = F.relu(X_)
# X_ = F.dropout(X_, p=0.5)
y = self.linear2(X_[target_x])
loss = self.cross_entropy_loss(y, target)
return loss, y, Ws
def loss(self, data):
loss, y, _ = self.forward(data, data.train_node, data.train_target)
return loss
def evaluate(self, data, nodes, targets):
loss, y, _ = self.forward(data, nodes, targets)
f1 = accuracy(y, targets)
return loss.item(), f1
| 38.737374
| 116
| 0.581095
|
4a1833f2e874f31cd4232fbe5bda65f63de3b0ac
| 43
|
py
|
Python
|
src/audisto_exporter/__init__.py
|
ZeitOnline/audisto_exporter
|
9d1b1771c9ec38f0c512f4736b97fd7f3432e904
|
[
"BSD-3-Clause"
] | null | null | null |
src/audisto_exporter/__init__.py
|
ZeitOnline/audisto_exporter
|
9d1b1771c9ec38f0c512f4736b97fd7f3432e904
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T11:32:59.000Z
|
2021-06-24T11:32:59.000Z
|
src/audisto_exporter/__init__.py
|
ZeitOnline/audisto_exporter
|
9d1b1771c9ec38f0c512f4736b97fd7f3432e904
|
[
"BSD-3-Clause"
] | null | null | null |
from audisto_exporter.exporter import main
| 21.5
| 42
| 0.883721
|
4a183564c6e587bf7da093d97841c0924d7ed194
| 1,947
|
py
|
Python
|
backend/api/tests/test_load_ops_data.py
|
kuanfan99/zeva
|
57b506a108fe57438506569d5503c90c52216b2f
|
[
"Apache-2.0"
] | 3
|
2020-03-25T03:06:20.000Z
|
2021-01-20T23:36:03.000Z
|
backend/api/tests/test_load_ops_data.py
|
kuanfan99/zeva
|
57b506a108fe57438506569d5503c90c52216b2f
|
[
"Apache-2.0"
] | 740
|
2019-12-16T15:53:39.000Z
|
2022-03-26T08:25:10.000Z
|
backend/api/tests/test_load_ops_data.py
|
kuanfan99/zeva
|
57b506a108fe57438506569d5503c90c52216b2f
|
[
"Apache-2.0"
] | 11
|
2019-11-28T20:39:15.000Z
|
2022-01-31T17:53:31.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=no-member,invalid-name,duplicate-code
import importlib
import logging
from collections import namedtuple
from django.test import TestCase
class TestLoadOpsData(TestCase):
"""
Execute specified operational scripts to validate that they work
"""
ScriptDefinition = namedtuple(
'ScriptDefinition', ('file', 'args', 'skip')
)
scripts = [
ScriptDefinition(
'api.fixtures.operational.0000_add_government_organization',
'', False
),
ScriptDefinition(
'api.fixtures.operational.0001_add_vehicle_classes',
'', False
),
ScriptDefinition(
'api.fixtures.operational.0002_add_vehicle_zev_types',
'', False
),
ScriptDefinition(
'api.fixtures.operational.0003_add_model_years',
'', False
),
ScriptDefinition(
'api.fixtures.operational.0004_add_organizations',
'', False
),
ScriptDefinition(
'api.fixtures.test.0001_add_plugin_hybrid_vehicles',
'', False
),
ScriptDefinition(
'api.fixtures.test.0002_add_battery_electric_vehicles',
'', False
),
]
logger = logging.getLogger('zeva.test')
def testOperationalScripts(self):
for script in self.scripts:
if not script.skip:
with self.subTest('testing operational script {file}'.format(
file=script.file
)):
logging.info('loading script: {file}'.format(
file=script.file
))
loaded = importlib.import_module(script.file)
instance = loaded.script_class(script.file, script.args)
instance.check_run_preconditions()
instance.run()
| 30.421875
| 77
| 0.562917
|
4a18358e3e2bd7ad774e8b4fe49faf21b86e3f38
| 29,403
|
py
|
Python
|
Tests/subset/subset_test.py
|
PeterDekkers/fonttools
|
ffc98baa0f28af7c4d4c173cca9f01b8c9baac14
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-06-30T13:23:57.000Z
|
2021-06-30T13:23:57.000Z
|
Tests/subset/subset_test.py
|
PeterDekkers/fonttools
|
ffc98baa0f28af7c4d4c173cca9f01b8c9baac14
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
Tests/subset/subset_test.py
|
PeterDekkers/fonttools
|
ffc98baa0f28af7c4d4c173cca9f01b8c9baac14
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools import subset
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.loggingTools import CapturingLogHandler
import difflib
import logging
import os
import shutil
import sys
import tempfile
import unittest
class SubsetTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
@staticmethod
def getpath(testfile):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", testfile)
def temp_path(self, suffix):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
self.num_tempfiles += 1
return os.path.join(self.tempdir,
"tmp%d%s" % (self.num_tempfiles, suffix))
def read_ttx(self, path):
lines = []
with open(path, "r", encoding="utf-8") as ttx:
for line in ttx.readlines():
# Elide ttFont attributes because ttLibVersion may change,
# and use os-native line separators so we can run difflib.
if line.startswith("<ttFont "):
lines.append("<ttFont>" + os.linesep)
else:
lines.append(line.rstrip() + os.linesep)
return lines
def expect_ttx(self, font, expected_ttx, tables):
path = self.temp_path(suffix=".ttx")
font.saveXML(path, tables=tables)
actual = self.read_ttx(path)
expected = self.read_ttx(expected_ttx)
if actual != expected:
for line in difflib.unified_diff(
expected, actual, fromfile=expected_ttx, tofile=path):
sys.stdout.write(line)
self.fail("TTX output is different from expected")
def compile_font(self, path, suffix):
savepath = self.temp_path(suffix=suffix)
font = TTFont(recalcBBoxes=False, recalcTimestamp=False)
font.importXML(path)
font.save(savepath, reorderTables=None)
return font, savepath
# -----
# Tests
# -----
def test_no_notdef_outline_otf(self):
_, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_otf.ttx"), ["CFF "])
def test_no_notdef_outline_cid(self):
_, fontpath = self.compile_font(self.getpath("TestCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_cid.ttx"), ["CFF "])
def test_no_notdef_outline_ttf(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_ttf.ttx"), ["glyf", "hmtx"])
def test_subset_ankr(self):
_, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_ankr.ttx"), ["ankr"])
def test_subset_ankr_remove(self):
_, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=two", "--output-file=%s" % subsetpath])
self.assertNotIn("ankr", TTFont(subsetpath))
def test_subset_bsln_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestBSLN-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
def test_subset_bsln_format_0_from_format_1(self):
# TestBSLN-1 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. As we request
# a subsetted font with {zero, one} and the implicit .notdef, all
# glyphs in the resulting font use the Roman baseline. In this case,
# we expect a format 0 'bsln' table because it is the most compact.
_, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"])
def test_subset_bsln_format_1(self):
# TestBSLN-1 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. We request
# a subset where the majority of glyphs use the roman baseline,
# but one single glyph (uni2EA2) is ideographic. In the resulting
# subsetted font, we expect a format 1 'bsln' table whose default
# is Roman, but with an override that uses the ideographic baseline
# for uni2EA2.
_, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_1.ttx"), ["bsln"])
def test_subset_bsln_format_2(self):
# The 'bsln' table in TestBSLN-2 refers to control points in glyph 'P'
# for defining its baselines. Therefore, the subsetted font should
# include this glyph even though it is not requested explicitly.
_, fontpath = self.compile_font(self.getpath("TestBSLN-2.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
def test_subset_bsln_format_2_from_format_3(self):
# TestBSLN-3 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two, P} use the roman
# baseline instead of the default ideographic baseline. As we request
# a subsetted font with zero and the implicit .notdef and P for
# baseline measurement, all glyphs in the resulting font use the Roman
# baseline. In this case, we expect a format 2 'bsln' table because it
# is the most compact encoding.
_, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"])
def test_subset_bsln_format_3(self):
# TestBSLN-3 defines the ideographic baseline to be the font's default,
# and specifies that glyphs {.notdef, zero, one, two} use the roman
# baseline instead of the default ideographic baseline. We request
# a subset where the majority of glyphs use the roman baseline,
# but one single glyph (uni2EA2) is ideographic. In the resulting
# subsetted font, we expect a format 1 'bsln' table whose default
# is Roman, but with an override that uses the ideographic baseline
# for uni2EA2.
_, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_bsln_3.ttx"), ["bsln"])
def test_subset_clr(self):
_, fontpath = self.compile_font(self.getpath("TestCLR-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=smileface", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_colr.ttx"), ["GlyphOrder", "hmtx", "glyf", "COLR", "CPAL"])
def test_subset_gvar(self):
_, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+002B,U+2212", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
def test_subset_gvar_notdef_outline(self):
_, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030", "--notdef_outline", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar_notdef_outline.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"])
def test_subset_lcar_remove(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("lcar", subsetfont)
def test_subset_lcar_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+FB01",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_0.ttx"), ["lcar"])
def test_subset_lcar_format_1(self):
_, fontpath = self.compile_font(self.getpath("TestLCAR-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+FB01",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_lcar_1.ttx"), ["lcar"])
def test_subset_math(self):
_, fontpath = self.compile_font(self.getpath("TestMATH-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0041,U+0028,U+0302,U+1D400,U+1D435", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_keep_math.ttx"), ["GlyphOrder", "CFF ", "MATH", "hmtx"])
def test_subset_opbd_remove(self):
# In the test font, only the glyphs 'A' and 'zero' have an entry in
# the Optical Bounds table. When subsetting, we do not request any
# of those glyphs. Therefore, the produced subsetted font should
# not contain an 'opbd' table.
_, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("opbd", subsetfont)
def test_subset_opbd_format_0(self):
_, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_opbd_0.ttx"), ["opbd"])
def test_subset_opbd_format_1(self):
_, fontpath = self.compile_font(self.getpath("TestOPBD-1.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_opbd_1.ttx"), ["opbd"])
def test_subset_prop_remove_default_zero(self):
# If all glyphs have an AAT glyph property with value 0,
# the "prop" table should be removed from the subsetted font.
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0041",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertNotIn("prop", subsetfont)
def test_subset_prop_0(self):
# If all glyphs share the same AAT glyph properties, the "prop" table
# in the subsetted font should use format 0.
#
# Unless the shared value is zero, in which case the subsetted font
# should have no "prop" table at all. But that case has already been
# tested above in test_subset_prop_remove_default_zero().
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0032", "--no-notdef-glyph",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_0.ttx"), ["prop"])
def test_subset_prop_1(self):
# If not all glyphs share the same AAT glyph properties, the subsetted
# font should contain a "prop" table in format 1. To save space, the
# DefaultProperties should be set to the most frequent value.
_, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=U+0030-0032", "--notdef-outline",
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_prop_1.ttx"), ["prop"])
def test_options(self):
# https://github.com/fonttools/fonttools/issues/413
opt1 = subset.Options()
self.assertTrue('Xyz-' not in opt1.layout_features)
opt2 = subset.Options()
opt2.layout_features.append('Xyz-')
self.assertTrue('Xyz-' in opt2.layout_features)
self.assertTrue('Xyz-' not in opt1.layout_features)
def test_google_color(self):
_, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertTrue("CBDT" in subsetfont)
self.assertTrue("CBLC" in subsetfont)
self.assertTrue("x" in subsetfont['CBDT'].strikeData[0])
self.assertFalse("y" in subsetfont['CBDT'].strikeData[0])
def test_google_color_all(self):
_, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertTrue("x" in subsetfont['CBDT'].strikeData[0])
self.assertTrue("y" in subsetfont['CBDT'].strikeData[0])
def test_timing_publishes_parts(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
options = subset.Options()
options.timing = True
subsetter = subset.Subsetter(options)
subsetter.populate(text='ABC')
font = TTFont(fontpath)
with CapturingLogHandler('fontTools.subset.timer', logging.DEBUG) as captor:
subsetter.subset(font)
logs = captor.records
self.assertTrue(len(logs) > 5)
self.assertEqual(len(logs), len([l for l in logs if 'msg' in l.args and 'time' in l.args]))
# Look for a few things we know should happen
self.assertTrue(filter(lambda l: l.args['msg'] == "load 'cmap'", logs))
self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'cmap'", logs))
self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'glyf'", logs))
def test_passthrough_tables(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
unknown_tag = 'ZZZZ'
unknown_table = newTable(unknown_tag)
unknown_table.data = b'\0'*10
font[unknown_tag] = unknown_table
font.save(fontpath)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# tables we can't subset are dropped by default
self.assertFalse(unknown_tag in subsetfont)
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--passthrough-tables", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
# unknown tables are kept if --passthrough-tables option is passed
self.assertTrue(unknown_tag in subsetfont)
def test_non_BMP_text_arg_input(self):
_, fontpath = self.compile_font(
self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
text = tostr(u"A\U0001F6D2", encoding='utf-8')
subset.main([fontpath, "--text=%s" % text, "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
def test_non_BMP_text_file_input(self):
_, fontpath = self.compile_font(
self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
text = tobytes(u"A\U0001F6D2", encoding='utf-8')
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(text)
try:
subset.main([fontpath, "--text-file=%s" % tmp.name,
"--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
finally:
os.remove(tmp.name)
self.assertEqual(subsetfont['maxp'].numGlyphs, 3)
self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2'])
def test_no_hinting_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-hinting", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_CFF.ttx"), ["CFF "])
def test_desubroutinize_CFF(self):
ttxpath = self.getpath("Lobster.subset.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_desubroutinize_CFF.ttx"), ["CFF "])
def test_desubroutinize_hinted_subrs_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"test_hinted_subrs_CFF.desub.ttx"), ["CFF "])
def test_desubroutinize_cntrmask_CFF(self):
ttxpath = self.getpath("test_cntrmask_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"test_cntrmask_CFF.desub.ttx"), ["CFF "])
def test_no_hinting_desubroutinize_CFF(self):
ttxpath = self.getpath("test_hinted_subrs_CFF.ttx")
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-hinting", "--desubroutinize", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_desubroutinize_CFF.ttx"), ["CFF "])
def test_no_hinting_TTF(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--no-hinting", "--notdef-outline",
"--output-file=%s" % subsetpath, "*"])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath(
"expect_no_hinting_TTF.ttx"), ["glyf", "maxp"])
for tag in subset.Options().hinting_tables:
self.assertTrue(tag not in subsetfont)
def test_notdef_width_cid(self):
# https://github.com/fonttools/fonttools/pull/845
_, fontpath = self.compile_font(self.getpath("NotdefWidthCID-Regular.ttx"), ".otf")
subsetpath = self.temp_path(".otf")
subset.main([fontpath, "--no-notdef-outline", "--gids=0,1", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_notdef_width_cid.ttx"), ["CFF "])
def test_recalc_timestamp_ttf(self):
ttxpath = self.getpath("TestTTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
modified = font['head'].modified
_, fontpath = self.compile_font(ttxpath, ".ttf")
subsetpath = self.temp_path(".ttf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
self.assertEqual(modified, TTFont(subsetpath)['head'].modified)
subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
self.assertLess(modified, TTFont(subsetpath)['head'].modified)
def test_recalc_timestamp_otf(self):
ttxpath = self.getpath("TestOTF-Regular.ttx")
font = TTFont()
font.importXML(ttxpath)
modified = font['head'].modified
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the modified timestamp
subset.main([fontpath, "--output-file=%s" % subsetpath, "*"])
self.assertEqual(modified, TTFont(subsetpath)['head'].modified)
subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"])
self.assertLess(modified, TTFont(subsetpath)['head'].modified)
def test_recalc_max_context(self):
ttxpath = self.getpath("Lobster.subset.ttx")
font = TTFont()
font.importXML(ttxpath)
max_context = font['OS/2'].usMaxContext
_, fontpath = self.compile_font(ttxpath, ".otf")
subsetpath = self.temp_path(".otf")
# by default, the subsetter does not recalculate the usMaxContext
subset.main([fontpath, "--drop-tables+=GSUB,GPOS",
"--output-file=%s" % subsetpath])
self.assertEqual(max_context, TTFont(subsetpath)['OS/2'].usMaxContext)
subset.main([fontpath, "--recalc-max-context",
"--drop-tables+=GSUB,GPOS",
"--output-file=%s" % subsetpath])
self.assertEqual(0, TTFont(subsetpath)['OS/2'].usMaxContext)
def test_retain_gids_ttf(self):
_, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (500, 132))
self.assertEqual(font["hmtx"]["B"], (400, 132))
self.assertGreater(font["glyf"]["A"].numberOfContours, 0)
self.assertGreater(font["glyf"]["B"].numberOfContours, 0)
subsetpath = self.temp_path(".ttf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"--glyph-names",
"A",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont.getGlyphOrder(), font.getGlyphOrder())
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], (500, 132))
self.assertEqual(hmtx["B"], (0, 0))
glyf = subsetfont["glyf"]
self.assertGreater(glyf["A"].numberOfContours, 0)
self.assertEqual(glyf["B"].numberOfContours, 0)
def test_retain_gids_cff(self):
_, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (500, 132))
self.assertEqual(font["hmtx"]["B"], (400, 132))
font["CFF "].cff[0].decompileAllCharStrings()
cs = font["CFF "].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertGreater(len(cs["B"].program), 0)
subsetpath = self.temp_path(".otf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"--glyph-names",
"A",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(subsetfont.getGlyphOrder(), font.getGlyphOrder())
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], (500, 132))
self.assertEqual(hmtx["B"], (0, 0))
subsetfont["CFF "].cff[0].decompileAllCharStrings()
cs = subsetfont["CFF "].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertEqual(cs["B"].program, ["endchar"])
def test_retain_gids_cff2(self):
fontpath = self.getpath("../../varLib/data/TestCFF2VF.otf")
font = TTFont(fontpath)
self.assertEqual(font["hmtx"]["A"], (600, 31))
self.assertEqual(font["hmtx"]["T"], (600, 41))
font["CFF2"].cff[0].decompileAllCharStrings()
cs = font["CFF2"].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertGreater(len(cs["T"].program), 0)
subsetpath = self.temp_path(".otf")
subset.main(
[
fontpath,
"--retain-gids",
"--output-file=%s" % subsetpath,
"A",
]
)
subsetfont = TTFont(subsetpath)
self.assertEqual(len(subsetfont.getGlyphOrder()), len(font.getGlyphOrder()))
hmtx = subsetfont["hmtx"]
self.assertEqual(hmtx["A"], (600, 31))
self.assertEqual(hmtx["glyph00002"], (0, 0))
subsetfont["CFF2"].cff[0].decompileAllCharStrings()
cs = subsetfont["CFF2"].cff[0].CharStrings
self.assertGreater(len(cs["A"].program), 0)
self.assertEqual(cs["glyph00002"].program, [])
def test_HVAR_VVAR(self):
_, fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_HVVAR.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
def test_HVAR_VVAR_retain_gids(self):
_, fontpath = self.compile_font(self.getpath("TestHVVAR.ttx"), ".ttf")
subsetpath = self.temp_path(".ttf")
subset.main([fontpath, "--text=BD", "--retain-gids", "--output-file=%s" % subsetpath])
subsetfont = TTFont(subsetpath)
self.expect_ttx(subsetfont, self.getpath("expect_HVVAR_retain_gids.ttx"), ["GlyphOrder", "HVAR", "VVAR", "avar", "fvar"])
if __name__ == "__main__":
sys.exit(unittest.main())
| 46.376972
| 136
| 0.624426
|
4a1835fa6762caa6a5b062b4b9da3ba10587d009
| 1,367
|
py
|
Python
|
01-SourceCode/blog/migrations/0001_initial.py
|
zoomla/ZoomlaCMS_python
|
a4e1f9282eaeb93a36d73b25889fcd9afa59e4a3
|
[
"Apache-2.0"
] | 1
|
2021-01-26T08:36:19.000Z
|
2021-01-26T08:36:19.000Z
|
01-SourceCode/blog/migrations/0001_initial.py
|
zoomla/ZoomlaCMS_python
|
a4e1f9282eaeb93a36d73b25889fcd9afa59e4a3
|
[
"Apache-2.0"
] | null | null | null |
01-SourceCode/blog/migrations/0001_initial.py
|
zoomla/ZoomlaCMS_python
|
a4e1f9282eaeb93a36d73b25889fcd9afa59e4a3
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-17 13:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publisth')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| 37.972222
| 147
| 0.607169
|
4a18376ff11660c6c5d0cd3e104eb4c59c077663
| 46,331
|
py
|
Python
|
libcloud/loadbalancer/drivers/nttcis.py
|
cheald/libcloud
|
1a3ebe5d60de6475a8a2384d864475de0abd73cf
|
[
"Apache-2.0"
] | 4
|
2017-11-14T17:24:12.000Z
|
2020-10-30T01:46:02.000Z
|
libcloud/loadbalancer/drivers/nttcis.py
|
cheald/libcloud
|
1a3ebe5d60de6475a8a2384d864475de0abd73cf
|
[
"Apache-2.0"
] | 1
|
2018-11-02T12:41:54.000Z
|
2018-11-05T07:57:45.000Z
|
libcloud/loadbalancer/drivers/nttcis.py
|
cheald/libcloud
|
1a3ebe5d60de6475a8a2384d864475de0abd73cf
|
[
"Apache-2.0"
] | 1
|
2020-02-01T10:25:54.000Z
|
2020-02-01T10:25:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance withv
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.utils.py3 import ET
from libcloud.common.nttcis import NttCisConnection
from libcloud.common.nttcis import NttCisPool
from libcloud.common.nttcis import NttCisPoolMember
from libcloud.common.nttcis import NttCisVirtualListener
from libcloud.common.nttcis import NttCisVIPNode
from libcloud.common.nttcis import NttCisDefaultHealthMonitor
from libcloud.common.nttcis import NttCisPersistenceProfile
from libcloud.common.nttcis import \
NttCisVirtualListenerCompatibility
from libcloud.common.nttcis import NttCisDefaultiRule
from libcloud.common.nttcis import API_ENDPOINTS
from libcloud.common.nttcis import DEFAULT_REGION
from libcloud.common.nttcis import TYPES_URN
from libcloud.utils.misc import reverse_dict
from libcloud.utils.xml import fixxpath, findtext, findall
from libcloud.loadbalancer.types import State
from libcloud.loadbalancer.base import Algorithm, Driver,\
LoadBalancer, DEFAULT_ALGORITHM
from libcloud.loadbalancer.base import Member
from libcloud.loadbalancer.types import Provider
class NttCisLBDriver(Driver):
"""
NttCis LB driver.
"""
selected_region = None
connectionCls = NttCisConnection
name = 'NTTC-CIS Load Balancer'
website = 'https://cloud.nttcis.com/'
type = Provider.NTTCIS
api_version = 1.0
_VALUE_TO_ALGORITHM_MAP = {
'ROUND_ROBIN': Algorithm.ROUND_ROBIN,
'LEAST_CONNECTIONS_MEMBER': Algorithm.LEAST_CONNECTIONS_MEMBER,
'LEAST_CONNECTIONS_NODE': Algorithm.LEAST_CONNECTIONS_NODE,
'OBSERVED_MEMBER': Algorithm.OBSERVED_MEMBER,
'OBSERVED_NODE': Algorithm.OBSERVED_NODE,
'PREDICTIVE_MEMBER': Algorithm.PREDICTIVE_MEMBER,
'PREDICTIVE_NODE': Algorithm.PREDICTIVE_NODE
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
_VALUE_TO_STATE_MAP = {
'NORMAL': State.RUNNING,
'PENDING_ADD': State.PENDING,
'PENDING_CHANGE': State.PENDING,
'PENDING_DELETE': State.PENDING,
'FAILED_ADD': State.ERROR,
'FAILED_CHANGE': State.ERROR,
'FAILED_DELETE': State.ERROR,
'REQUIRES_SUPPORT': State.ERROR
}
def __init__(self, key, network_domain_id, secret=None, secure=True,
host=None, port=None, api_version=None,
region=DEFAULT_REGION, **kwargs):
self.network_domain_id = network_domain_id
if region not in API_ENDPOINTS and host is None:
raise ValueError(
'Invalid region: %s, no host specified' % (region))
if region is not None:
self.selected_region = API_ENDPOINTS[region]
super(NttCisLBDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port,
api_version=api_version,
region=region,
**kwargs)
def _ex_connection_class_kwargs(self):
"""
Add the region to the kwargs before the connection is instantiated
"""
kwargs = super(NttCisLBDriver,
self)._ex_connection_class_kwargs()
kwargs['region'] = self.selected_region
return kwargs
def create_balancer(self, name, listener_port=None, port=None,
protocol=None, algorithm=None, members=None,
optimization_profile="TCP",
ex_listener_ip_address=None):
"""
Create a new load balancer instance
:param name: Name of the new load balancer (required)
:type name: ``str``
:param listener_port: An integer in the range of 1-65535.
If not supplied, it will be taken to
mean 'Any Port'
:type port: ``int
:param port: An integer in the range of 1-65535. If not supplied,
it will be taken to mean 'Any Port' Assumed that
node ports will different from listener port.
:type port: ``int``
:param protocol: Loadbalancer protocol, defaults to http.
:type protocol: ``str``
:param members: list of Members to attach to balancer (optional)
:type members: ``list`` of :class:`Member`
:param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN.
:type algorithm: :class:`.Algorithm`
:param optimization_profile: For STANDARD type and protocol TCP
an optimization type of TCP, LAN_OPT,
WAN_OPT, MOBILE_OPT, or TCP_LEGACY is
required. Default is TCP
:type protcol: ``str``
:param ex_listener_ip_address: Must be a valid IPv4 in dot-decimal
notation (x.x.x.x).
:type ex_listener_ip_address: ``str``
:rtype: :class:`LoadBalancer`
"""
network_domain_id = self.network_domain_id
if protocol is None:
protocol = 'http'
if algorithm is None:
algorithm = DEFAULT_ALGORITHM
# Create a pool first
pool = self.ex_create_pool(
network_domain_id=network_domain_id,
name=name,
ex_description=None,
balancer_method=self._ALGORITHM_TO_VALUE_MAP[algorithm])
# Attach the members to the pool as nodes
if members is not None:
for member in members:
if not isinstance(member, Member):
member = self.ex_create_node(
network_domain_id=network_domain_id,
name=member.name,
ip=member.private_ips[0],
ex_description=None)
self.ex_create_pool_member(
pool=pool,
node=member,
port=port)
# Create the virtual listener (balancer)
listener = self.ex_create_virtual_listener(
network_domain_id=network_domain_id,
name=name,
ex_description=name,
port=listener_port,
pool=pool,
protocol=protocol,
optimization_profile=optimization_profile,
listener_ip_address=ex_listener_ip_address)
return LoadBalancer(
id=listener.id,
name=listener.name,
state=State.RUNNING,
ip=listener.ip,
port=port,
driver=self,
extra={'pool_id': pool.id,
'network_domain_id': network_domain_id,
'listener_ip_address': ex_listener_ip_address}
)
def ex_update_listener(self, virtual_listener, **kwargs):
"""
Update a current virtual listener.
:param virtual_listener: The listener to be updated
:return: The edited version of the listener
"""
edit_listener_elm = ET.Element('editVirtualListener',
{'xmlns': TYPES_URN,
'id': virtual_listener.id,
'xmlns:xsi':
"http://www.w3.org/2001/"
"XMLSchema-instance"})
for k, v in kwargs.items():
if v is None:
ET.SubElement(edit_listener_elm, k, {'xsi:nil': 'true'})
else:
ET.SubElement(edit_listener_elm, k).text = v
result = self.connection.request_with_orgId_api_2(
'networkDomainVip/editVirtualListener',
method='POST',
data=ET.tostring(edit_listener_elm)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def list_balancers(self, ex_network_domain_id=None):
"""
List all loadbalancers inside a geography or in given network.
In Dimension Data terminology these are known as virtual listeners
:param ex_network_domain_id: UUID of Network Domain
if not None returns only balancers in the given network
if None then returns all pools for the organization
:type ex_network_domain_id: ``str``
:rtype: ``list`` of :class:`LoadBalancer`
"""
params = None
if ex_network_domain_id is not None:
params = {"networkDomainId": ex_network_domain_id}
return self._to_balancers(
self.connection
.request_with_orgId_api_2('networkDomainVip/virtualListener',
params=params).object)
def get_balancer(self, balancer_id):
"""
Return a :class:`LoadBalancer` object.
:param balancer_id: id of a load balancer you want to fetch
:type balancer_id: ``str``
:rtype: :class:`LoadBalancer`
"""
bal = self.connection \
.request_with_orgId_api_2('networkDomainVip/virtualListener/%s'
% balancer_id).object
return self._to_balancer(bal)
def list_protocols(self):
"""
Return a list of supported protocols.
Since all protocols are support by Dimension Data, this is a list
of common protocols.
:rtype: ``list`` of ``str``
"""
return ['http', 'https', 'tcp', 'udp', 'ftp', 'smtp']
def balancer_list_members(self, balancer):
"""
Return list of members attached to balancer.
In Dimension Data terminology these are the members of the pools
within a virtual listener.
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:rtype: ``list`` of :class:`Member`
"""
pool_members = self.ex_get_pool_members(balancer.extra['pool_id'])
members = []
for pool_member in pool_members:
members.append(Member(
id=pool_member.id,
ip=pool_member.ip,
port=pool_member.port,
balancer=balancer,
extra=None
))
return members
def balancer_attach_member(self, balancer, member):
"""
Attach a member to balancer
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:param member: Member to join to the balancer
:type member: :class:`Member`
:return: Member after joining the balancer.
:rtype: :class:`Member`
"""
node = self.ex_create_node(
network_domain_id=balancer.extra['network_domain_id'],
name='Member.' + member.ip,
ip=member.ip,
ex_description=''
)
if node is False:
return False
pool = self.ex_get_pool(balancer.extra['pool_id'])
pool_member = self.ex_create_pool_member(
pool=pool,
node=node,
port=member.port)
member.id = pool_member.id
return member
def balancer_detach_member(self, balancer, member):
"""
Detach member from balancer
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:param member: Member which should be used
:type member: :class:`Member`
:return: ``True`` if member detach was successful, otherwise ``False``.
:rtype: ``bool``
"""
create_pool_m = ET.Element('removePoolMember', {'xmlns': TYPES_URN,
'id': member.id})
result = self.connection.request_with_orgId_api_2(
'networkDomainVip/removePoolMember',
method='POST',
data=ET.tostring(create_pool_m)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def destroy_balancer(self, balancer):
"""
Destroy a load balancer (virtual listener)
:param balancer: LoadBalancer which should be used
:type balancer: :class:`LoadBalancer`
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
"""
delete_listener = ET.Element('deleteVirtualListener',
{'xmlns': TYPES_URN,
'id': balancer.id})
result = self.connection.request_with_orgId_api_2(
'networkDomainVip/deleteVirtualListener',
method='POST',
data=ET.tostring(delete_listener)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_set_current_network_domain(self, network_domain_id):
"""
Set the network domain (part of the network) of the driver
:param network_domain_id: ID of the pool (required)
:type network_domain_id: ``str``
"""
self.network_domain_id = network_domain_id
def ex_get_current_network_domain(self):
"""
Get the current network domain ID of the driver.
:return: ID of the network domain
:rtype: ``str``
"""
return self.network_domain_id
def ex_create_pool_member(self, pool, node, port=None):
"""
Create a new member in an existing pool from an existing node
:param pool: Instance of ``NttCisPool`` (required)
:type pool: ``NttCisPool``
:param node: Instance of ``NttCisVIPNode`` (required)
:type node: ``NttCisVIPNode``
:param port: Port the the service will listen on
:type port: ``str``
:return: The node member, instance of ``NttCisPoolMember``
:rtype: ``NttCisPoolMember``
"""
create_pool_m = ET.Element('addPoolMember', {'xmlns': TYPES_URN})
ET.SubElement(create_pool_m, "poolId").text = pool.id
ET.SubElement(create_pool_m, "nodeId").text = node.id
if port is not None:
ET.SubElement(create_pool_m, "port").text = str(port)
ET.SubElement(create_pool_m, "status").text = 'ENABLED'
response = self.connection.request_with_orgId_api_2(
'networkDomainVip/addPoolMember',
method='POST',
data=ET.tostring(create_pool_m)).object
member_id = None
node_name = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'poolMemberId':
member_id = info.get('value')
if info.get('name') == 'nodeName':
node_name = info.get('value')
return NttCisPoolMember(
id=member_id,
name=node_name,
status=State.RUNNING,
ip=node.ip,
port=port,
node_id=node.id
)
def ex_create_node(self,
network_domain_id,
name,
ip,
ex_description=None,
connection_limit=25000,
connection_rate_limit=2000):
"""
Create a new node
:param network_domain_id: Network Domain ID (required)
:type name: ``str``
:param name: name of the node (required)
:type name: ``str``
:param ip: IPv4 address of the node (required)
:type ip: ``str``
:param ex_description: Description of the node (required)
:type ex_description: ``str``
:param connection_limit: Maximum number
of concurrent connections per sec
:type connection_limit: ``int``
:param connection_rate_limit: Maximum number of concurrent sessions
:type connection_rate_limit: ``int``
:return: Instance of ``NttCisVIPNode``
:rtype: ``NttCisVIPNode``
"""
create_node_elm = ET.Element('createNode', {'xmlns': TYPES_URN})
ET.SubElement(create_node_elm, "networkDomainId") \
.text = network_domain_id
ET.SubElement(create_node_elm, "name").text = name
if ex_description is not None:
ET.SubElement(create_node_elm, "description").text \
= str(ex_description)
ET.SubElement(create_node_elm, "ipv4Address").text = ip
ET.SubElement(create_node_elm, "status").text = 'ENABLED'
ET.SubElement(create_node_elm, "connectionLimit") \
.text = str(connection_limit)
ET.SubElement(create_node_elm, "connectionRateLimit") \
.text = str(connection_rate_limit)
response = self.connection.request_with_orgId_api_2(
action='networkDomainVip/createNode',
method='POST',
data=ET.tostring(create_node_elm)).object
node_id = None
node_name = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'nodeId':
node_id = info.get('value')
if info.get('name') == 'name':
node_name = info.get('value')
return NttCisVIPNode(
id=node_id,
name=node_name,
status=State.RUNNING,
ip=ip
)
def ex_update_node(self, node):
"""
Update the properties of a node
:param pool: The instance of ``NttCisNode`` to update
:type pool: ``NttCisNode``
:return: The instance of ``NttCisNode``
:rtype: ``NttCisNode``
"""
create_node_elm = ET.Element('editNode', {'xmlns': TYPES_URN})
create_node_elm.set('id', node.id)
ET.SubElement(create_node_elm, 'healthMonitorId') \
.text = node.health_monitor_id
ET.SubElement(create_node_elm, "connectionLimit") \
.text = str(node.connection_limit)
ET.SubElement(create_node_elm, "connectionRateLimit") \
.text = str(node.connection_rate_limit)
self.connection.request_with_orgId_api_2(
action='networkDomainVip/editNode',
method='POST',
data=ET.tostring(create_node_elm)).object
return node
def ex_set_node_state(self, node, enabled):
"""
Change the state of a node (enable/disable)
:param pool: The instance of ``NttCisNode`` to update
:type pool: ``NttCisNode``
:param enabled: The target state of the node
:type enabled: ``bool``
:return: The instance of ``NttCisNode``
:rtype: ``NttCisNode``
"""
create_node_elm = ET.Element('editNode', {'xmlns': TYPES_URN})
ET.SubElement(create_node_elm, "status") \
.text = "ENABLED" if enabled is True else "DISABLED"
self.connection.request_with_orgId_api_2(
action='networkDomainVip/editNode',
method='POST',
data=ET.tostring(create_node_elm)).object
return node
def ex_create_pool(self,
network_domain_id,
name,
balancer_method,
ex_description,
health_monitors=None,
service_down_action='NONE',
slow_ramp_time=30):
"""
Create a new pool
:param network_domain_id: Network Domain ID (required)
:type name: ``str``
:param name: name of the node (required)
:type name: ``str``
:param balancer_method: The load balancer algorithm (required)
:type balancer_method: ``str``
:param ex_description: Description of the node (required)
:type ex_description: ``str``
:param health_monitors: A list of health monitors to use for the pool.
:type health_monitors: ``list`` of
:class:`NttCisDefaultHealthMonitor`
:param service_down_action: What to do when node
is unavailable NONE, DROP or RESELECT
:type service_down_action: ``str``
:param slow_ramp_time: Number of seconds to stagger ramp up of nodes
:type slow_ramp_time: ``int``
:return: Instance of ``NttCisPool``
:rtype: ``NttCisPool``
"""
# Names cannot contain spaces.
name.replace(' ', '_')
create_node_elm = ET.Element('createPool', {'xmlns': TYPES_URN})
ET.SubElement(create_node_elm, "networkDomainId") \
.text = network_domain_id
ET.SubElement(create_node_elm, "name").text = name
ET.SubElement(create_node_elm, "description").text \
= str(ex_description)
ET.SubElement(create_node_elm, "loadBalanceMethod") \
.text = str(balancer_method)
if health_monitors is not None:
for monitor in health_monitors:
ET.SubElement(create_node_elm, "healthMonitorId") \
.text = str(monitor.id)
ET.SubElement(create_node_elm, "serviceDownAction") \
.text = service_down_action
ET.SubElement(create_node_elm, "slowRampTime").text \
= str(slow_ramp_time)
response = self.connection.request_with_orgId_api_2(
action='networkDomainVip/createPool',
method='POST',
data=ET.tostring(create_node_elm)).object
pool_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'poolId':
pool_id = info.get('value')
return NttCisPool(
id=pool_id,
name=name,
description=ex_description,
status=State.RUNNING,
load_balance_method=str(balancer_method),
health_monitor_id=None,
service_down_action=service_down_action,
slow_ramp_time=str(slow_ramp_time)
)
def ex_create_virtual_listener(self,
network_domain_id,
name,
ex_description,
port=None,
pool=None,
listener_ip_address=None,
persistence_profile=None,
fallback_persistence_profile=None,
irule=None,
protocol='TCP',
optimization_profile="TCP",
connection_limit=25000,
connection_rate_limit=2000,
source_port_preservation='PRESERVE'):
"""
Create a new virtual listener (load balancer)
:param network_domain_id: Network Domain ID (required)
:type name: ``str``
:param name: name of the listener (required)
:type name: ``str``
:param ex_description: Description of the node (required)
:type ex_description: ``str``
:param port: An integer in the range of 1-65535. If not supplied,
it will be taken to mean 'Any Port'
:type port: ``int``
:param pool: The pool to use for the listener
:type pool: :class:`NttCisPool`
:param listener_ip_address: The IPv4 Address of the virtual listener
:type listener_ip_address: ``str``
:param persistence_profile: Persistence profile
:type persistence_profile: :class:`NttCisPersistenceProfile`
:param fallback_persistence_profile: Fallback persistence profile
:type fallback_persistence_profile:
:class:`NttCisPersistenceProfile`
:param irule: The iRule to apply
:type irule: :class:`NttCisDefaultiRule`
:param protocol: For STANDARD type, ANY, TCP or UDP
for PERFORMANCE_LAYER_4 choice of ANY, TCP, UDP, HTTP
:type protcol: ``str``
:param optimization_profile: For STANDARD type and protocol
TCP an optimization type of TCP,
LAN_OPT, WAN_OPT, MOBILE_OPT,
or TCP_LEGACY is required.
Default is 'TCP'.
:type protcol: ``str``
:param connection_limit: Maximum number
of concurrent connections per sec
:type connection_limit: ``int``
:param connection_rate_limit: Maximum number of concurrent sessions
:type connection_rate_limit: ``int``
:param source_port_preservation: Choice of PRESERVE,
PRESERVE_STRICT or CHANGE
:type source_port_preservation: ``str``
:return: Instance of the listener
:rtype: ``NttCisVirtualListener``
"""
if (port == 80) or (port == 443):
listener_type = 'PERFORMANCE_LAYER_4'
else:
listener_type = 'STANDARD'
if listener_type == 'STANDARD' and optimization_profile is None:
raise ValueError(
" CONFIGURATION_NOT_SUPPORTED: optimizationProfile is"
" required for type STANDARD and protocol TCP")
create_node_elm = ET.Element('createVirtualListener',
{'xmlns': TYPES_URN})
ET.SubElement(create_node_elm, "networkDomainId") \
.text = network_domain_id
ET.SubElement(create_node_elm, "name").text = name
ET.SubElement(create_node_elm, "description").text = \
str(ex_description)
ET.SubElement(create_node_elm, "type").text = listener_type
ET.SubElement(create_node_elm, "protocol") \
.text = protocol
if listener_ip_address is not None:
ET.SubElement(create_node_elm, "listenerIpAddress").text = \
str(listener_ip_address)
if port is not None:
ET.SubElement(create_node_elm, "port").text = str(port)
ET.SubElement(create_node_elm, "enabled").text = 'true'
ET.SubElement(create_node_elm, "connectionLimit") \
.text = str(connection_limit)
ET.SubElement(create_node_elm, "connectionRateLimit") \
.text = str(connection_rate_limit)
ET.SubElement(create_node_elm, "sourcePortPreservation") \
.text = source_port_preservation
if pool is not None:
ET.SubElement(create_node_elm, "poolId") \
.text = pool.id
if persistence_profile is not None:
ET.SubElement(create_node_elm, "persistenceProfileId") \
.text = persistence_profile.id
if optimization_profile is not None:
ET.SubElement(create_node_elm, 'optimizationProfile').text = \
optimization_profile
if fallback_persistence_profile is not None:
ET.SubElement(create_node_elm, "fallbackPersistenceProfileId") \
.text = fallback_persistence_profile.id
if irule is not None:
ET.SubElement(create_node_elm, "iruleId") \
.text = irule.id
response = self.connection.request_with_orgId_api_2(
action='networkDomainVip/createVirtualListener',
method='POST',
data=ET.tostring(create_node_elm)).object
virtual_listener_id = None
virtual_listener_ip = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'virtualListenerId':
virtual_listener_id = info.get('value')
if info.get('name') == 'listenerIpAddress':
virtual_listener_ip = info.get('value')
return NttCisVirtualListener(
id=virtual_listener_id,
name=name,
ip=virtual_listener_ip,
status=State.RUNNING
)
def ex_get_pools(self, ex_network_domain_id=None):
"""
Get all of the pools inside the current geography or
in given network.
:param ex_network_domain_id: UUID of Network Domain
if not None returns only balancers in the given network
if None then returns all pools for the organization
:type ex_network_domain_id: ``str``
:return: Returns a ``list`` of type ``NttCisPool``
:rtype: ``list`` of ``NttCisPool``
"""
params = None
if ex_network_domain_id is not None:
params = {"networkDomainId": ex_network_domain_id}
pools = self.connection \
.request_with_orgId_api_2('networkDomainVip/pool',
params=params).object
return self._to_pools(pools)
def ex_get_pool(self, pool_id):
"""
Get a specific pool inside the current geography
:param pool_id: The identifier of the pool
:type pool_id: ``str``
:return: Returns an instance of ``NttCisPool``
:rtype: ``NttCisPool``
"""
pool = self.connection \
.request_with_orgId_api_2('networkDomainVip/pool/%s'
% pool_id).object
return self._to_pool(pool)
def ex_update_pool(self, pool):
"""
Update the properties of an existing pool
only method, serviceDownAction and slowRampTime are updated
:param pool: The instance of ``NttCisPool`` to update
:type pool: ``NttCisPool``
:return: ``True`` for success, ``False`` for failure
:rtype: ``bool``
"""
create_node_elm = ET.Element('editPool', {'xmlns': TYPES_URN})
create_node_elm.set('id', pool.id)
ET.SubElement(create_node_elm, "loadBalanceMethod") \
.text = str(pool.load_balance_method)
ET.SubElement(create_node_elm, 'healthMonitorId').text \
= pool.health_monitor_id
ET.SubElement(create_node_elm, "serviceDownAction") \
.text = pool.service_down_action
ET.SubElement(create_node_elm, "slowRampTime").text \
= str(pool.slow_ramp_time)
response = self.connection.request_with_orgId_api_2(
action='networkDomainVip/editPool',
method='POST',
data=ET.tostring(create_node_elm)).object
response_code = findtext(response, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_destroy_pool(self, pool):
"""
Destroy an existing pool
:param pool: The instance of ``NttCisPool`` to destroy
:type pool: ``NttCisPool``
:return: ``True`` for success, ``False`` for failure
:rtype: ``bool``
"""
destroy_request = ET.Element('deletePool',
{'xmlns': TYPES_URN,
'id': pool.id})
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/deletePool',
method='POST',
data=ET.tostring(destroy_request)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_pool_members(self, pool_id):
"""
Get the members of a pool
:param pool: The instance of a pool
:type pool: ``NttCisPool``
:return: Returns an ``list`` of ``NttCisPoolMember``
:rtype: ``list`` of ``NttCisPoolMember``
"""
members = self.connection \
.request_with_orgId_api_2('networkDomainVip/poolMember?poolId=%s'
% pool_id).object
return self._to_members(members)
def ex_get_pool_member(self, pool_member_id):
"""
Get a specific member of a pool
:param pool: The id of a pool member
:type pool: ``str``
:return: Returns an instance of ``NttCisPoolMember``
:rtype: ``NttCisPoolMember``
"""
member = self.connection \
.request_with_orgId_api_2('networkDomainVip/poolMember/%s'
% pool_member_id).object
return self._to_member(member)
def ex_set_pool_member_state(self, member, enabled=True):
request = ET.Element('editPoolMember',
{'xmlns': TYPES_URN,
'id': member.id})
state = "ENABLED" if enabled is True else "DISABLED"
ET.SubElement(request, 'status').text = state
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/editPoolMember',
method='POST',
data=ET.tostring(request)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_destroy_pool_member(self, member, destroy_node=False):
"""
Destroy a specific member of a pool
:param pool: The instance of a pool member
:type pool: ``NttCisPoolMember``
:param destroy_node: Also destroy the associated node
:type destroy_node: ``bool``
:return: ``True`` for success, ``False`` for failure
:rtype: ``bool``
"""
# remove the pool member
destroy_request = ET.Element('removePoolMember',
{'xmlns': TYPES_URN,
'id': member.id})
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/removePoolMember',
method='POST',
data=ET.tostring(destroy_request)).object
if member.node_id is not None and destroy_node is True:
return self.ex_destroy_node(member.node_id)
else:
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_nodes(self, ex_network_domain_id=None):
"""
Get the nodes within this geography or in given network.
:param ex_network_domain_id: UUID of Network Domain
if not None returns only balancers in the given network
if None then returns all pools for the organization
:type ex_network_domain_id: ``str``
:return: Returns an ``list`` of ``NttCisVIPNode``
:rtype: ``list`` of ``NttCisVIPNode``
"""
params = None
if ex_network_domain_id is not None:
params = {"networkDomainId": ex_network_domain_id}
nodes = self.connection \
.request_with_orgId_api_2('networkDomainVip/node',
params=params).object
return self._to_nodes(nodes)
def ex_get_node(self, node_id):
"""
Get the node specified by node_id
:return: Returns an instance of ``NttCisVIPNode``
:rtype: Instance of ``NttCisVIPNode``
"""
nodes = self.connection \
.request_with_orgId_api_2('networkDomainVip/node/%s'
% node_id).object
return self._to_node(nodes)
def ex_destroy_node(self, node_id):
"""
Destroy a specific node
:param node_id: The ID of of a ``NttCisVIPNode``
:type node_id: ``str``
:return: ``True`` for success, ``False`` for failure
:rtype: ``bool``
"""
# Destroy the node
destroy_request = ET.Element('deleteNode',
{'xmlns': TYPES_URN,
'id': node_id})
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/deleteNode',
method='POST',
data=ET.tostring(destroy_request)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_wait_for_state(self, state, func, poll_interval=2,
timeout=60, *args, **kwargs):
"""
Wait for the function which returns a instance
with field status to match
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
"""
return self.connection.wait_for_state(state, func, poll_interval,
timeout, *args, **kwargs)
def ex_get_default_health_monitors(self, network_domain):
"""
Get the default health monitors available for a network domain
:param network_domain_id: The ID of of a ``NttCisNetworkDomain``
:type network_domain_id: ``str``
:rtype: `list` of :class:`NttCisDefaultHealthMonitor`
"""
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/defaultHealthMonitor',
params={'networkDomainId': network_domain},
method='GET').object
return self._to_health_monitors(result)
def ex_get_default_persistence_profiles(self, network_domain_id):
"""
Get the default persistence profiles available for a network domain
:param network_domain_id: The ID of of a ``NttCisNetworkDomain``
:type network_domain_id: ``str``
:rtype: `list` of :class:`NttCisPersistenceProfile`
"""
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/defaultPersistenceProfile',
params={'networkDomainId': network_domain_id},
method='GET').object
return self._to_persistence_profiles(result)
def ex_get_default_irules(self, network_domain_id):
"""
Get the default iRules available for a network domain
:param network_domain_id: The ID of of a ``NttCisNetworkDomain``
:type network_domain_id: ``str``
:rtype: `list` of :class:`NttCisDefaultiRule`
"""
result = self.connection.request_with_orgId_api_2(
action='networkDomainVip/defaultIrule',
params={'networkDomainId': network_domain_id},
method='GET').object
return self._to_irules(result)
def _to_irules(self, object):
irules = []
matches = object.findall(
fixxpath('defaultIrule', TYPES_URN))
for element in matches:
irules.append(self._to_irule(element))
return irules
def _to_irule(self, element):
compatible = []
matches = element.findall(
fixxpath('virtualListenerCompatibility', TYPES_URN))
for match_element in matches:
compatible.append(
NttCisVirtualListenerCompatibility(
type=match_element.get('type'),
protocol=match_element.get('protocol', None)))
irule_element = element.find(fixxpath('irule', TYPES_URN))
return NttCisDefaultiRule(
id=irule_element.get('id'),
name=irule_element.get('name'),
compatible_listeners=compatible
)
def _to_persistence_profiles(self, object):
profiles = []
matches = object.findall(
fixxpath('defaultPersistenceProfile', TYPES_URN))
for element in matches:
profiles.append(self._to_persistence_profile(element))
return profiles
def _to_persistence_profile(self, element):
compatible = []
matches = element.findall(
fixxpath('virtualListenerCompatibility', TYPES_URN))
for match_element in matches:
compatible.append(
NttCisVirtualListenerCompatibility(
type=match_element.get('type'),
protocol=match_element.get('protocol', None)))
return NttCisPersistenceProfile(
id=element.get('id'),
fallback_compatible=bool(
element.get('fallbackCompatible') == "true"),
name=findtext(element, 'name', TYPES_URN),
compatible_listeners=compatible
)
def _to_health_monitors(self, object):
monitors = []
matches = object.findall(fixxpath('defaultHealthMonitor', TYPES_URN))
for element in matches:
monitors.append(self._to_health_monitor(element))
return monitors
def _to_health_monitor(self, element):
return NttCisDefaultHealthMonitor(
id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
node_compatible=bool(
findtext(element, 'nodeCompatible', TYPES_URN) == "true"),
pool_compatible=bool(
findtext(element, 'poolCompatible', TYPES_URN) == "true"),
)
def _to_nodes(self, object):
nodes = []
for element in object.findall(fixxpath("node", TYPES_URN)):
nodes.append(self._to_node(element))
return nodes
def _to_node(self, element):
ipaddress = findtext(element, 'ipv4Address', TYPES_URN)
if ipaddress is None:
ipaddress = findtext(element, 'ipv6Address', TYPES_URN)
name = findtext(element, 'name', TYPES_URN)
try:
hm = element.find(fixxpath('healthMonitor', TYPES_URN)).get('id')
except AttributeError:
hm = None
node = NttCisVIPNode(
id=element.get('id'),
name=name,
status=self._VALUE_TO_STATE_MAP.get(
findtext(element, 'state', TYPES_URN),
State.UNKNOWN),
health_monitor=hm,
connection_rate_limit=findtext(element,
'connectionRateLimit', TYPES_URN),
connection_limit=findtext(element, 'connectionLimit', TYPES_URN),
ip=ipaddress)
return node
def _to_balancers(self, object):
loadbalancers = []
for element in object.findall(fixxpath("virtualListener", TYPES_URN)):
loadbalancers.append(self._to_balancer(element))
return loadbalancers
def _to_balancer(self, element):
ipaddress = findtext(element, 'listenerIpAddress', TYPES_URN)
name = findtext(element, 'name', TYPES_URN)
port = findtext(element, 'port', TYPES_URN)
extra = {}
pool_element = element.find(fixxpath(
'pool',
TYPES_URN))
if pool_element is None:
extra['pool_id'] = None
else:
extra['pool_id'] = pool_element.get('id')
extra['network_domain_id'] = findtext(element, 'networkDomainId',
TYPES_URN)
balancer = LoadBalancer(
id=element.get('id'),
name=name,
state=self._VALUE_TO_STATE_MAP.get(
findtext(element, 'state', TYPES_URN),
State.UNKNOWN),
ip=ipaddress,
port=port,
driver=self.connection.driver,
extra=extra
)
return balancer
def _to_members(self, object):
members = []
for element in object.findall(fixxpath("poolMember", TYPES_URN)):
members.append(self._to_member(element))
return members
def _to_member(self, element):
port = findtext(element, 'port', TYPES_URN)
if port is not None:
port = int(port)
pool_member = NttCisPoolMember(
id=element.get('id'),
name=element.find(fixxpath(
'node',
TYPES_URN)).get('name'),
status=findtext(element, 'state', TYPES_URN),
node_id=element.find(fixxpath(
'node',
TYPES_URN)).get('id'),
ip=element.find(fixxpath(
'node',
TYPES_URN)).get('ipAddress'),
port=port
)
return pool_member
def _to_pools(self, object):
pools = []
for element in object.findall(fixxpath("pool", TYPES_URN)):
pools.append(self._to_pool(element))
return pools
def _to_pool(self, element):
pool = NttCisPool(
id=element.get('id'),
name=findtext(element, 'name', TYPES_URN),
status=findtext(element, 'state', TYPES_URN),
description=findtext(element, 'description', TYPES_URN),
load_balance_method=findtext(element, 'loadBalanceMethod',
TYPES_URN),
health_monitor_id=findtext(element, 'healthMonitorId', TYPES_URN),
service_down_action=findtext(element, 'serviceDownAction',
TYPES_URN),
slow_ramp_time=findtext(element, 'slowRampTime', TYPES_URN),
)
return pool
| 37.424071
| 79
| 0.584727
|
4a1837887bfc9547ea7915652fc1d5f3f4cd55dd
| 670
|
py
|
Python
|
modules/_regular_reminder.py
|
thisnameisalreadyused2/NotifyMeBot
|
ccea868d7573b582e65421b9ea75badb3ce6de3a
|
[
"MIT"
] | null | null | null |
modules/_regular_reminder.py
|
thisnameisalreadyused2/NotifyMeBot
|
ccea868d7573b582e65421b9ea75badb3ce6de3a
|
[
"MIT"
] | null | null | null |
modules/_regular_reminder.py
|
thisnameisalreadyused2/NotifyMeBot
|
ccea868d7573b582e65421b9ea75badb3ce6de3a
|
[
"MIT"
] | null | null | null |
from DB import Database
db = Database("db")
MENU = range(1)
def reminder_handler(user_id, obj):
date, type, name = obj
db.add_event(user_id, date, "birthday" if type == "Birthday" else "regular", name)
if type == "Birthday":
db.add_reminder(user_id, date - 7 * 24 * 60 * 60, "birthday" if type == "Birthday" else "regular", name)
db.add_reminder(user_id, date - 3 * 24 * 60 * 60, "birthday" if type == "Birthday" else "regular", name)
db.add_reminder(user_id, date - 1 * 24 * 60 * 60, "birthday" if type == "Birthday" else "regular", name)
db.add_reminder(user_id, date, "birthday" if type == "Birthday" else "regular", name)
| 41.875
| 112
| 0.638806
|
4a1838aba1258a96c7ee24c21d167f06d8004265
| 39,437
|
py
|
Python
|
tests/components/universal/test_media_player.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 3
|
2019-10-02T04:40:26.000Z
|
2020-02-16T13:19:08.000Z
|
tests/components/universal/test_media_player.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 25
|
2021-10-02T10:01:14.000Z
|
2022-03-31T06:11:49.000Z
|
tests/components/universal/test_media_player.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 1
|
2021-12-10T10:33:28.000Z
|
2021-12-10T10:33:28.000Z
|
"""The tests for the Universal Media player platform."""
from copy import copy
from unittest.mock import Mock, patch
import pytest
from voluptuous.error import MultipleInvalid
from homeassistant import config as hass_config
import homeassistant.components.input_number as input_number
import homeassistant.components.input_select as input_select
import homeassistant.components.media_player as media_player
from homeassistant.components.media_player.const import MediaPlayerEntityFeature
import homeassistant.components.switch as switch
import homeassistant.components.universal.media_player as universal
from homeassistant.const import (
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNKNOWN,
)
from homeassistant.core import Context, callback
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service, get_fixture_path
CONFIG_CHILDREN_ONLY = {
"name": "test",
"platform": "universal",
"children": [
media_player.ENTITY_ID_FORMAT.format("mock1"),
media_player.ENTITY_ID_FORMAT.format("mock2"),
],
}
def validate_config(config):
"""Use the platform schema to validate configuration."""
validated_config = universal.PLATFORM_SCHEMA(config)
validated_config.pop("platform")
return validated_config
class MockMediaPlayer(media_player.MediaPlayerEntity):
"""Mock media player for testing."""
def __init__(self, hass, name):
"""Initialize the media player."""
self.hass = hass
self._name = name
self.entity_id = media_player.ENTITY_ID_FORMAT.format(name)
self._state = STATE_OFF
self._volume_level = 0
self._is_volume_muted = False
self._media_title = None
self._supported_features = 0
self._source = None
self._tracks = 12
self._media_image_url = None
self._shuffle = False
self._sound_mode = None
self.service_calls = {
"turn_on": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_TURN_ON
),
"turn_off": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_TURN_OFF
),
"mute_volume": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_VOLUME_MUTE
),
"set_volume_level": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_VOLUME_SET
),
"media_play": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_PLAY
),
"media_pause": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_PAUSE
),
"media_stop": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_STOP
),
"media_previous_track": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_PREVIOUS_TRACK
),
"media_next_track": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_NEXT_TRACK
),
"media_seek": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_SEEK
),
"play_media": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_PLAY_MEDIA
),
"volume_up": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_VOLUME_UP
),
"volume_down": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_VOLUME_DOWN
),
"media_play_pause": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_MEDIA_PLAY_PAUSE
),
"select_sound_mode": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_SELECT_SOUND_MODE
),
"select_source": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_SELECT_SOURCE
),
"toggle": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_TOGGLE
),
"clear_playlist": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_CLEAR_PLAYLIST
),
"repeat_set": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_REPEAT_SET
),
"shuffle_set": async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_SHUFFLE_SET
),
}
@property
def name(self):
"""Return the name of player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
return self._state
@property
def volume_level(self):
"""Return the volume level of player."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return true if the media player is muted."""
return self._is_volume_muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return self._supported_features
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url
@property
def shuffle(self):
"""Return true if the media player is shuffling."""
return self._shuffle
def turn_on(self):
"""Mock turn_on function."""
self._state = None
def turn_off(self):
"""Mock turn_off function."""
self._state = STATE_OFF
def mute_volume(self, mute):
"""Mock mute function."""
self._is_volume_muted = mute
def set_volume_level(self, volume):
"""Mock set volume level."""
self._volume_level = volume
def media_play(self):
"""Mock play."""
self._state = STATE_PLAYING
def media_pause(self):
"""Mock pause."""
self._state = STATE_PAUSED
def select_sound_mode(self, sound_mode):
"""Set the sound mode."""
self._sound_mode = sound_mode
def select_source(self, source):
"""Set the input source."""
self._source = source
def async_toggle(self):
"""Toggle the power on the media player."""
self._state = STATE_OFF if self._state == STATE_ON else STATE_ON
def clear_playlist(self):
"""Clear players playlist."""
self._tracks = 0
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._shuffle = shuffle
def set_repeat(self, repeat):
"""Enable/disable repeat mode."""
self._repeat = repeat
@pytest.fixture
async def mock_states(hass):
"""Set mock states used in tests."""
result = Mock()
result.mock_mp_1 = MockMediaPlayer(hass, "mock1")
result.mock_mp_1.async_schedule_update_ha_state()
result.mock_mp_2 = MockMediaPlayer(hass, "mock2")
result.mock_mp_2.async_schedule_update_ha_state()
await hass.async_block_till_done()
result.mock_mute_switch_id = switch.ENTITY_ID_FORMAT.format("mute")
hass.states.async_set(result.mock_mute_switch_id, STATE_OFF)
result.mock_state_switch_id = switch.ENTITY_ID_FORMAT.format("state")
hass.states.async_set(result.mock_state_switch_id, STATE_OFF)
result.mock_volume_id = f"{input_number.DOMAIN}.volume_level"
hass.states.async_set(result.mock_volume_id, 0)
result.mock_source_list_id = f"{input_select.DOMAIN}.source_list"
hass.states.async_set(result.mock_source_list_id, ["dvd", "htpc"])
result.mock_source_id = f"{input_select.DOMAIN}.source"
hass.states.async_set(result.mock_source_id, "dvd")
result.mock_sound_mode_list_id = f"{input_select.DOMAIN}.sound_mode_list"
hass.states.async_set(result.mock_sound_mode_list_id, ["music", "movie"])
result.mock_sound_mode_id = f"{input_select.DOMAIN}.sound_mode"
hass.states.async_set(result.mock_sound_mode_id, "music")
result.mock_shuffle_switch_id = switch.ENTITY_ID_FORMAT.format("shuffle")
hass.states.async_set(result.mock_shuffle_switch_id, STATE_OFF)
result.mock_repeat_switch_id = switch.ENTITY_ID_FORMAT.format("repeat")
hass.states.async_set(result.mock_repeat_switch_id, STATE_OFF)
return result
@pytest.fixture
def config_children_and_attr(mock_states):
"""Return configuration that references the mock states."""
return {
"name": "test",
"platform": "universal",
"children": [
media_player.ENTITY_ID_FORMAT.format("mock1"),
media_player.ENTITY_ID_FORMAT.format("mock2"),
],
"attributes": {
"is_volume_muted": mock_states.mock_mute_switch_id,
"volume_level": mock_states.mock_volume_id,
"source": mock_states.mock_source_id,
"source_list": mock_states.mock_source_list_id,
"state": mock_states.mock_state_switch_id,
"shuffle": mock_states.mock_shuffle_switch_id,
"repeat": mock_states.mock_repeat_switch_id,
"sound_mode_list": mock_states.mock_sound_mode_list_id,
"sound_mode": mock_states.mock_sound_mode_id,
},
}
async def test_config_children_only(hass):
"""Check config with only children."""
config_start = copy(CONFIG_CHILDREN_ONLY)
del config_start["platform"]
config_start["commands"] = {}
config_start["attributes"] = {}
config = validate_config(CONFIG_CHILDREN_ONLY)
assert config_start == config
async def test_config_children_and_attr(hass, config_children_and_attr):
"""Check config with children and attributes."""
config_start = copy(config_children_and_attr)
del config_start["platform"]
config_start["commands"] = {}
config = validate_config(config_children_and_attr)
assert config_start == config
async def test_config_no_name(hass):
"""Check config with no Name entry."""
response = True
try:
validate_config({"platform": "universal"})
except MultipleInvalid:
response = False
assert not response
async def test_config_bad_children(hass):
"""Check config with bad children entry."""
config_no_children = {"name": "test", "platform": "universal"}
config_bad_children = {"name": "test", "children": {}, "platform": "universal"}
config_no_children = validate_config(config_no_children)
assert [] == config_no_children["children"]
config_bad_children = validate_config(config_bad_children)
assert [] == config_bad_children["children"]
async def test_config_bad_commands(hass):
"""Check config with bad commands entry."""
config = {"name": "test", "platform": "universal"}
config = validate_config(config)
assert {} == config["commands"]
async def test_config_bad_attributes(hass):
"""Check config with bad attributes."""
config = {"name": "test", "platform": "universal"}
config = validate_config(config)
assert {} == config["attributes"]
async def test_config_bad_key(hass):
"""Check config with bad key."""
config = {"name": "test", "asdf": 5, "platform": "universal"}
config = validate_config(config)
assert "asdf" not in config
async def test_platform_setup(hass):
"""Test platform setup."""
config = {"name": "test", "platform": "universal"}
bad_config = {"platform": "universal"}
entities = []
def add_entities(new_entities):
"""Add devices to list."""
for dev in new_entities:
entities.append(dev)
setup_ok = True
try:
await universal.async_setup_platform(
hass, validate_config(bad_config), add_entities
)
except MultipleInvalid:
setup_ok = False
assert not setup_ok
assert len(entities) == 0
await universal.async_setup_platform(hass, validate_config(config), add_entities)
assert len(entities) == 1
assert entities[0].name == "test"
async def test_master_state(hass):
"""Test master state property."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.master_state is None
async def test_master_state_with_attrs(hass, config_children_and_attr, mock_states):
"""Test master state property."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.master_state == STATE_OFF
hass.states.async_set(mock_states.mock_state_switch_id, STATE_ON)
assert ump.master_state == STATE_ON
async def test_master_state_with_bad_attrs(hass, config_children_and_attr):
"""Test master state property."""
config = copy(config_children_and_attr)
config["attributes"]["state"] = "bad.entity_id"
config = validate_config(config)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.master_state == STATE_OFF
async def test_active_child_state(hass, mock_states):
"""Test active child state property."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert ump._child_state is None
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert mock_states.mock_mp_1.entity_id == ump._child_state.entity_id
mock_states.mock_mp_2._state = STATE_PLAYING
mock_states.mock_mp_2.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert mock_states.mock_mp_1.entity_id == ump._child_state.entity_id
mock_states.mock_mp_1._state = STATE_OFF
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert mock_states.mock_mp_2.entity_id == ump._child_state.entity_id
async def test_name(hass):
"""Test name property."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
assert config["name"] == ump.name
async def test_polling(hass):
"""Test should_poll property."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.should_poll is False
async def test_state_children_only(hass, mock_states):
"""Test media player state with only children."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert ump.state, STATE_OFF
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert ump.state == STATE_PLAYING
async def test_state_with_children_and_attrs(
hass, config_children_and_attr, mock_states
):
"""Test media player with children and master state."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert ump.state == STATE_OFF
hass.states.async_set(mock_states.mock_state_switch_id, STATE_ON)
await ump.async_update()
assert ump.state == STATE_ON
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert ump.state == STATE_PLAYING
hass.states.async_set(mock_states.mock_state_switch_id, STATE_OFF)
await ump.async_update()
assert ump.state == STATE_OFF
async def test_volume_level(hass, mock_states):
"""Test volume level property."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert ump.volume_level is None
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert ump.volume_level == 0
mock_states.mock_mp_1._volume_level = 1
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert ump.volume_level == 1
async def test_media_image_url(hass, mock_states):
"""Test media_image_url property."""
test_url = "test_url"
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert ump.media_image_url is None
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1._media_image_url = test_url
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
# mock_mp_1 will convert the url to the api proxy url. This test
# ensures ump passes through the same url without an additional proxy.
assert mock_states.mock_mp_1.entity_picture == ump.entity_picture
async def test_is_volume_muted_children_only(hass, mock_states):
"""Test is volume muted property w/ children only."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert not ump.is_volume_muted
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert not ump.is_volume_muted
mock_states.mock_mp_1._is_volume_muted = True
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert ump.is_volume_muted
async def test_sound_mode_list_children_and_attr(
hass, config_children_and_attr, mock_states
):
"""Test sound mode list property w/ children and attrs."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.sound_mode_list == "['music', 'movie']"
hass.states.async_set(
mock_states.mock_sound_mode_list_id, ["music", "movie", "game"]
)
assert ump.sound_mode_list == "['music', 'movie', 'game']"
async def test_source_list_children_and_attr(
hass, config_children_and_attr, mock_states
):
"""Test source list property w/ children and attrs."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.source_list == "['dvd', 'htpc']"
hass.states.async_set(mock_states.mock_source_list_id, ["dvd", "htpc", "game"])
assert ump.source_list == "['dvd', 'htpc', 'game']"
async def test_sound_mode_children_and_attr(
hass, config_children_and_attr, mock_states
):
"""Test sound modeproperty w/ children and attrs."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.sound_mode == "music"
hass.states.async_set(mock_states.mock_sound_mode_id, "movie")
assert ump.sound_mode == "movie"
async def test_source_children_and_attr(hass, config_children_and_attr, mock_states):
"""Test source property w/ children and attrs."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.source == "dvd"
hass.states.async_set(mock_states.mock_source_id, "htpc")
assert ump.source == "htpc"
async def test_volume_level_children_and_attr(
hass, config_children_and_attr, mock_states
):
"""Test volume level property w/ children and attrs."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert ump.volume_level == 0
hass.states.async_set(mock_states.mock_volume_id, 100)
assert ump.volume_level == 100
async def test_is_volume_muted_children_and_attr(
hass, config_children_and_attr, mock_states
):
"""Test is volume muted property w/ children and attrs."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
assert not ump.is_volume_muted
hass.states.async_set(mock_states.mock_mute_switch_id, STATE_ON)
assert ump.is_volume_muted
async def test_supported_features_children_only(hass, mock_states):
"""Test supported media commands with only children."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
assert ump.supported_features == 0
mock_states.mock_mp_1._supported_features = 512
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
assert ump.supported_features == 512
async def test_supported_features_children_and_cmds(
hass, config_children_and_attr, mock_states
):
"""Test supported media commands with children and attrs."""
config = copy(config_children_and_attr)
excmd = {"service": "media_player.test", "data": {}}
config["commands"] = {
"turn_on": excmd,
"turn_off": excmd,
"volume_up": excmd,
"volume_down": excmd,
"volume_mute": excmd,
"volume_set": excmd,
"select_sound_mode": excmd,
"select_source": excmd,
"repeat_set": excmd,
"shuffle_set": excmd,
"media_play": excmd,
"media_pause": excmd,
"media_stop": excmd,
"media_next_track": excmd,
"media_previous_track": excmd,
"toggle": excmd,
"play_media": excmd,
"clear_playlist": excmd,
}
config = validate_config(config)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
check_flags = (
MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.SELECT_SOUND_MODE
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.REPEAT_SET
| MediaPlayerEntityFeature.SHUFFLE_SET
| MediaPlayerEntityFeature.VOLUME_SET
| MediaPlayerEntityFeature.PLAY
| MediaPlayerEntityFeature.PAUSE
| MediaPlayerEntityFeature.STOP
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PREVIOUS_TRACK
| MediaPlayerEntityFeature.PLAY_MEDIA
| MediaPlayerEntityFeature.CLEAR_PLAYLIST
)
assert check_flags == ump.supported_features
async def test_overrides(hass, config_children_and_attr):
"""Test overrides."""
config = copy(config_children_and_attr)
excmd = {"service": "test.override", "data": {}}
config["name"] = "overridden"
config["commands"] = {
"turn_on": excmd,
"turn_off": excmd,
"volume_up": excmd,
"volume_down": excmd,
"volume_mute": excmd,
"volume_set": excmd,
"select_sound_mode": excmd,
"select_source": excmd,
"repeat_set": excmd,
"shuffle_set": excmd,
"media_play": excmd,
"media_play_pause": excmd,
"media_pause": excmd,
"media_stop": excmd,
"media_next_track": excmd,
"media_previous_track": excmd,
"clear_playlist": excmd,
"play_media": excmd,
"toggle": excmd,
}
await async_setup_component(hass, "media_player", {"media_player": config})
await hass.async_block_till_done()
service = async_mock_service(hass, "test", "override")
await hass.services.async_call(
"media_player",
"turn_on",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 1
await hass.services.async_call(
"media_player",
"turn_off",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 2
await hass.services.async_call(
"media_player",
"volume_up",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 3
await hass.services.async_call(
"media_player",
"volume_down",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 4
await hass.services.async_call(
"media_player",
"volume_mute",
service_data={
"entity_id": "media_player.overridden",
"is_volume_muted": True,
},
blocking=True,
)
assert len(service) == 5
await hass.services.async_call(
"media_player",
"volume_set",
service_data={"entity_id": "media_player.overridden", "volume_level": 1},
blocking=True,
)
assert len(service) == 6
await hass.services.async_call(
"media_player",
"select_sound_mode",
service_data={
"entity_id": "media_player.overridden",
"sound_mode": "music",
},
blocking=True,
)
assert len(service) == 7
await hass.services.async_call(
"media_player",
"select_source",
service_data={"entity_id": "media_player.overridden", "source": "video1"},
blocking=True,
)
assert len(service) == 8
await hass.services.async_call(
"media_player",
"repeat_set",
service_data={"entity_id": "media_player.overridden", "repeat": "all"},
blocking=True,
)
assert len(service) == 9
await hass.services.async_call(
"media_player",
"shuffle_set",
service_data={"entity_id": "media_player.overridden", "shuffle": True},
blocking=True,
)
assert len(service) == 10
await hass.services.async_call(
"media_player",
"media_play",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 11
await hass.services.async_call(
"media_player",
"media_pause",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 12
await hass.services.async_call(
"media_player",
"media_stop",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 13
await hass.services.async_call(
"media_player",
"media_next_track",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 14
await hass.services.async_call(
"media_player",
"media_previous_track",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 15
await hass.services.async_call(
"media_player",
"clear_playlist",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 16
await hass.services.async_call(
"media_player",
"media_play_pause",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 17
await hass.services.async_call(
"media_player",
"play_media",
service_data={
"entity_id": "media_player.overridden",
"media_content_id": 1,
"media_content_type": "channel",
},
blocking=True,
)
assert len(service) == 18
await hass.services.async_call(
"media_player",
"toggle",
service_data={"entity_id": "media_player.overridden"},
blocking=True,
)
assert len(service) == 19
async def test_supported_features_play_pause(
hass, config_children_and_attr, mock_states
):
"""Test supported media commands with play_pause function."""
config = copy(config_children_and_attr)
excmd = {"service": "media_player.test", "data": {"entity_id": "test"}}
config["commands"] = {"media_play_pause": excmd}
config = validate_config(config)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
mock_states.mock_mp_1._state = STATE_PLAYING
mock_states.mock_mp_1.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
check_flags = MediaPlayerEntityFeature.PLAY | MediaPlayerEntityFeature.PAUSE
assert check_flags == ump.supported_features
async def test_service_call_no_active_child(
hass, config_children_and_attr, mock_states
):
"""Test a service call to children with no active child."""
config = validate_config(config_children_and_attr)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
mock_states.mock_mp_1._state = STATE_OFF
mock_states.mock_mp_1.async_schedule_update_ha_state()
mock_states.mock_mp_2._state = STATE_OFF
mock_states.mock_mp_2.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
await ump.async_turn_off()
assert len(mock_states.mock_mp_1.service_calls["turn_off"]) == 0
assert len(mock_states.mock_mp_2.service_calls["turn_off"]) == 0
async def test_service_call_to_child(hass, mock_states):
"""Test service calls that should be routed to a child."""
config = validate_config(CONFIG_CHILDREN_ONLY)
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
mock_states.mock_mp_2._state = STATE_PLAYING
mock_states.mock_mp_2.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
await ump.async_turn_off()
assert len(mock_states.mock_mp_2.service_calls["turn_off"]) == 1
await ump.async_turn_on()
assert len(mock_states.mock_mp_2.service_calls["turn_on"]) == 1
await ump.async_mute_volume(True)
assert len(mock_states.mock_mp_2.service_calls["mute_volume"]) == 1
await ump.async_set_volume_level(0.5)
assert len(mock_states.mock_mp_2.service_calls["set_volume_level"]) == 1
await ump.async_media_play()
assert len(mock_states.mock_mp_2.service_calls["media_play"]) == 1
await ump.async_media_pause()
assert len(mock_states.mock_mp_2.service_calls["media_pause"]) == 1
await ump.async_media_stop()
assert len(mock_states.mock_mp_2.service_calls["media_stop"]) == 1
await ump.async_media_previous_track()
assert len(mock_states.mock_mp_2.service_calls["media_previous_track"]) == 1
await ump.async_media_next_track()
assert len(mock_states.mock_mp_2.service_calls["media_next_track"]) == 1
await ump.async_media_seek(100)
assert len(mock_states.mock_mp_2.service_calls["media_seek"]) == 1
await ump.async_play_media("movie", "batman")
assert len(mock_states.mock_mp_2.service_calls["play_media"]) == 1
await ump.async_volume_up()
assert len(mock_states.mock_mp_2.service_calls["volume_up"]) == 1
await ump.async_volume_down()
assert len(mock_states.mock_mp_2.service_calls["volume_down"]) == 1
await ump.async_media_play_pause()
assert len(mock_states.mock_mp_2.service_calls["media_play_pause"]) == 1
await ump.async_select_sound_mode("music")
assert len(mock_states.mock_mp_2.service_calls["select_sound_mode"]) == 1
await ump.async_select_source("dvd")
assert len(mock_states.mock_mp_2.service_calls["select_source"]) == 1
await ump.async_clear_playlist()
assert len(mock_states.mock_mp_2.service_calls["clear_playlist"]) == 1
await ump.async_set_repeat(True)
assert len(mock_states.mock_mp_2.service_calls["repeat_set"]) == 1
await ump.async_set_shuffle(True)
assert len(mock_states.mock_mp_2.service_calls["shuffle_set"]) == 1
await ump.async_toggle()
# Delegate to turn_off
assert len(mock_states.mock_mp_2.service_calls["turn_off"]) == 2
async def test_service_call_to_command(hass, mock_states):
"""Test service call to command."""
config = copy(CONFIG_CHILDREN_ONLY)
config["commands"] = {"turn_off": {"service": "test.turn_off", "data": {}}}
config = validate_config(config)
service = async_mock_service(hass, "test", "turn_off")
ump = universal.UniversalMediaPlayer(hass, **config)
ump.entity_id = media_player.ENTITY_ID_FORMAT.format(config["name"])
await ump.async_update()
mock_states.mock_mp_2._state = STATE_PLAYING
mock_states.mock_mp_2.async_schedule_update_ha_state()
await hass.async_block_till_done()
await ump.async_update()
await ump.async_turn_off()
assert len(service) == 1
async def test_state_template(hass):
"""Test with a simple valid state template."""
hass.states.async_set("sensor.test_sensor", STATE_ON)
await async_setup_component(
hass,
"media_player",
{
"media_player": {
"platform": "universal",
"name": "tv",
"state_template": "{{ states.sensor.test_sensor.state }}",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_ON
hass.states.async_set("sensor.test_sensor", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_OFF
async def test_device_class(hass):
"""Test device_class property."""
hass.states.async_set("sensor.test_sensor", "on")
await async_setup_component(
hass,
"media_player",
{
"media_player": {
"platform": "universal",
"name": "tv",
"device_class": "tv",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").attributes["device_class"] == "tv"
async def test_invalid_state_template(hass):
"""Test invalid state template sets state to None."""
hass.states.async_set("sensor.test_sensor", "on")
await async_setup_component(
hass,
"media_player",
{
"media_player": {
"platform": "universal",
"name": "tv",
"state_template": "{{ states.sensor.test_sensor.state + x }}",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_UNKNOWN
hass.states.async_set("sensor.test_sensor", "off")
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_UNKNOWN
async def test_master_state_with_template(hass):
"""Test the state_template option."""
hass.states.async_set("input_boolean.test", STATE_OFF)
hass.states.async_set("media_player.mock1", STATE_OFF)
templ = (
'{% if states.input_boolean.test.state == "off" %}on'
"{% else %}{{ states.media_player.mock1.state }}{% endif %}"
)
await async_setup_component(
hass,
"media_player",
{
"media_player": {
"platform": "universal",
"name": "tv",
"state_template": templ,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_ON
events = []
hass.helpers.event.async_track_state_change_event(
"media_player.tv", callback(lambda event: events.append(event))
)
context = Context()
hass.states.async_set("input_boolean.test", STATE_ON, context=context)
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_OFF
assert events[0].context == context
async def test_reload(hass):
"""Test reloading the media player from yaml."""
hass.states.async_set("input_boolean.test", STATE_OFF)
hass.states.async_set("media_player.mock1", STATE_OFF)
templ = (
'{% if states.input_boolean.test.state == "off" %}on'
"{% else %}{{ states.media_player.mock1.state }}{% endif %}"
)
await async_setup_component(
hass,
"media_player",
{
"media_player": {
"platform": "universal",
"name": "tv",
"state_template": templ,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_ON
hass.states.async_set("input_boolean.test", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("media_player.tv").state == STATE_OFF
hass.states.async_set("media_player.master_bedroom_2", STATE_OFF)
hass.states.async_set(
"remote.alexander_master_bedroom",
STATE_ON,
{"activity_list": ["act1", "act2"], "current_activity": "act2"},
)
yaml_path = get_fixture_path("configuration.yaml", "universal")
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"universal",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert hass.states.get("media_player.tv") is None
assert hass.states.get("media_player.master_bed_tv").state == "on"
assert hass.states.get("media_player.master_bed_tv").attributes["source"] == "act2"
assert (
"device_class" not in hass.states.get("media_player.master_bed_tv").attributes
)
| 32.592562
| 87
| 0.680123
|
4a18399376e999a0f930f165d8a2832e8537f817
| 1,886
|
py
|
Python
|
xml_jobs_handler.py
|
baumartig/paperboy
|
01659cda235508eac66a50a9c16c4a6c531015bd
|
[
"Apache-2.0"
] | 3
|
2015-02-26T06:39:40.000Z
|
2017-07-04T14:56:18.000Z
|
xml_jobs_handler.py
|
baumartig/paperboy
|
01659cda235508eac66a50a9c16c4a6c531015bd
|
[
"Apache-2.0"
] | null | null | null |
xml_jobs_handler.py
|
baumartig/paperboy
|
01659cda235508eac66a50a9c16c4a6c531015bd
|
[
"Apache-2.0"
] | 1
|
2018-02-21T00:12:06.000Z
|
2018-02-21T00:12:06.000Z
|
import xml.sax, xml.sax.handler
import xml.etree.ElementTree as ET
from job import Job
import os.path
import util
jobsPath = "data/jobs.xml"
class JobsHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.jobs = []
self.attributesList = []
self.buffer = ""
def startElement(self, name, attributes):
self.attributesList.append(attributes)
return
def characters(self, data):
self.buffer += data
def endElement(self, name):
attributes = self.attributesList.pop()
if name == "job":
# build job
job = Job(self.recipeRef)
executionType = attributes[u"type"]
executionTime = util.parseTime(attributes[u"time"])
executionDay = ""
if (not executionType == "daily"):
executionDay = attributes[u"day"]
job.setExecution(executionType, executionTime, executionDay)
self.jobs.append(job)
if name == "recipeRef":
self.recipeRef = self.buffer
self.buffer = ""
def loadJobs():
if os.path.isfile(jobsPath):
parser = xml.sax.make_parser()
handler = JobsHandler()
parser.setContentHandler(handler)
parser.parse(jobsPath)
return handler.jobs
else:
return []
def saveJobs(jobs):
root = ET.Element("jobs")
tree = ET.ElementTree(root)
for job in jobs:
attributes = {}
attributes["type"] = job.executionType
attributes["time"] = util.formatTime(job.executionTime)
if (not job.executionType == "daily"):
attributes["day"] = str(job.executionDay)
jobElem = ET.SubElement(root, "job", attributes)
recipeRefElem = ET.SubElement(jobElem, "recipeRef")
recipeRefElem.text = job.recipeRef
tree.write(jobsPath)
return
| 26.56338
| 72
| 0.601273
|
4a1839b61e57fa97aabe17be90f6756d21455e7d
| 537
|
py
|
Python
|
setup.py
|
anurag-jeebly/python-elastic-log-handler
|
6f54f453a2d1e753ca4c27a95db638f4fef3ccec
|
[
"Apache-2.0"
] | 2
|
2017-04-17T08:38:45.000Z
|
2017-10-22T15:52:05.000Z
|
setup.py
|
saurabh1e/python-elastic-log-handler
|
6f54f453a2d1e753ca4c27a95db638f4fef3ccec
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
saurabh1e/python-elastic-log-handler
|
6f54f453a2d1e753ca4c27a95db638f4fef3ccec
|
[
"Apache-2.0"
] | 1
|
2022-01-11T06:25:11.000Z
|
2022-01-11T06:25:11.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="python-elastic-log-handler",
version='1.0.3',
description="Logging handler to send logs to your elasticsearch",
keywords="logging handler bulk",
author="saurabh",
author_email="saurabh.1e1@gmail.com",
url="https://github.com/saurabh1e/python-elastic-log-handler/",
license="Apache License 2",
packages=find_packages(),
install_requires=[
"requests"
],
include_package_data=True,
classifiers=[]
)
| 25.571429
| 69
| 0.685289
|
4a183a0289c273fe652927e82de3885f315f896f
| 16,249
|
py
|
Python
|
src/cryptoadvance/specter/util/merkleblock.py
|
aphex3k/specter-desktop
|
f20b8447a9dcafb81461cc721e2978bf14fbc529
|
[
"MIT"
] | 683
|
2019-08-31T02:26:21.000Z
|
2022-03-31T18:43:31.000Z
|
src/cryptoadvance/specter/util/merkleblock.py
|
aphex3k/specter-desktop
|
f20b8447a9dcafb81461cc721e2978bf14fbc529
|
[
"MIT"
] | 1,100
|
2019-09-26T13:00:18.000Z
|
2022-03-31T22:29:54.000Z
|
src/cryptoadvance/specter/util/merkleblock.py
|
aphex3k/specter-desktop
|
f20b8447a9dcafb81461cc721e2978bf14fbc529
|
[
"MIT"
] | 179
|
2019-09-03T17:10:59.000Z
|
2022-03-31T16:59:13.000Z
|
# Code adopted from https://github.com/jimmysong/pb-exercises/
import hashlib
import math
from io import BytesIO
def hash256(s):
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
def read_varint(s):
"""read_varint reads a variable integer from a stream"""
i = s.read(1)[0]
if i == 0xFD:
# 0xfd means the next two bytes are the number
return little_endian_to_int(s.read(2))
elif i == 0xFE:
# 0xfe means the next four bytes are the number
return little_endian_to_int(s.read(4))
elif i == 0xFF:
# 0xff means the next eight bytes are the number
return little_endian_to_int(s.read(8))
else:
# anything else is just the integer
return i
def merkle_parent(hash1, hash2):
"""Takes the binary hashes and calculates the hash256"""
# return the hash256 of hash1 + hash2
return hash256(hash1 + hash2)
def merkle_parent_level(hashes):
"""Takes a list of binary hashes and returns a list that's half
the length"""
# if the list has exactly 1 element raise an error
if len(hashes) == 1:
raise RuntimeError("Cannot take a parent level with only 1 item")
# if the list has an odd number of elements, duplicate the last one
# and put it at the end so it has an even number of elements
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
# initialize parent level
parent_level = []
# loop over every pair (use: for i in range(0, len(hashes), 2))
for i in range(0, len(hashes), 2):
# get the merkle parent of i and i+1 hashes
parent = merkle_parent(hashes[i], hashes[i + 1])
# append parent to parent level
parent_level.append(parent)
# return parent level
return parent_level
def merkle_root(hashes):
"""Takes a list of binary hashes and returns the merkle root"""
# current level starts as hashes
current_level = hashes
# loop until there's exactly 1 element
while len(current_level) > 1:
# current level becomes the merkle parent level
current_level = merkle_parent_level(current_level)
# return the 1st item of current_level
return current_level[0]
def little_endian_to_int(b):
"""little_endian_to_int takes byte sequence as a little-endian number.
Returns an integer"""
# use the int.from_bytes(b, <endianness>) method
return int.from_bytes(b, "little")
def int_to_little_endian(n, length):
"""endian_to_little_endian takes an integer and returns the little-endian
byte sequence of length"""
# use the to_bytes method of n
return n.to_bytes(length, "little")
def bytes_to_bit_field(some_bytes):
flag_bits = []
# iterate over each byte of flags
for byte in some_bytes:
# iterate over each bit, right-to-left
for _ in range(8):
# add the current bit (byte & 1)
flag_bits.append(byte & 1)
# rightshift the byte 1
byte >>= 1
return flag_bits
class Block:
command = b"block"
def __init__(
self, version, prev_block, merkle_root, timestamp, bits, nonce, tx_hashes=None
):
self.version = version
self.prev_block = prev_block
self.merkle_root = merkle_root
self.timestamp = timestamp
self.bits = bits
self.nonce = nonce
self.tx_hashes = tx_hashes
self.merkle_tree = None
@classmethod
def parse_header(cls, s):
"""Takes a byte stream and parses a block. Returns a Block object"""
# s.read(n) will read n bytes from the stream
# version - 4 bytes, little endian, interpret as int
version = little_endian_to_int(s.read(4))
# prev_block - 32 bytes, little endian (use [::-1] to reverse)
prev_block = s.read(32)[::-1]
# merkle_root - 32 bytes, little endian (use [::-1] to reverse)
merkle_root = s.read(32)[::-1]
# timestamp - 4 bytes, little endian, interpret as int
timestamp = little_endian_to_int(s.read(4))
# bits - 4 bytes
bits = s.read(4)
# nonce - 4 bytes
nonce = s.read(4)
# initialize class
return cls(version, prev_block, merkle_root, timestamp, bits, nonce)
@classmethod
def parse(cls, s):
b = cls.parse_header(s)
num_txs = read_varint(s)
tx_hashes = []
for _ in range(num_txs):
t = Tx.parse(s)
tx_hashes.append(t.hash())
b.tx_hashes = tx_hashes
return b
def serialize(self):
"""Returns the 80 byte block header"""
# version - 4 bytes, little endian
result = int_to_little_endian(self.version, 4)
# prev_block - 32 bytes, little endian
result += self.prev_block[::-1]
# merkle_root - 32 bytes, little endian
result += self.merkle_root[::-1]
# timestamp - 4 bytes, little endian
result += int_to_little_endian(self.timestamp, 4)
# bits - 4 bytes
result += self.bits
# nonce - 4 bytes
result += self.nonce
return result
def hash(self):
"""Returns the hash256 interpreted little endian of the block"""
# serialize
s = self.serialize()
# hash256
h256 = hash256(s)
# reverse
return h256[::-1]
def id(self):
"""Human-readable hexadecimal of the block hash"""
return self.hash().hex()
def bip9(self):
"""Returns whether this block is signaling readiness for BIP9"""
# BIP9 is signalled if the top 3 bits are 001
# remember version is 32 bytes so right shift 29 (>> 29) and see if
# that is 001
return self.version >> 29 == 0b001
def bip91(self):
"""Returns whether this block is signaling readiness for BIP91"""
# BIP91 is signalled if the 5th bit from the right is 1
# shift 4 bits to the right and see if the last bit is 1
return self.version >> 4 & 1 == 1
def bip141(self):
"""Returns whether this block is signaling readiness for BIP141"""
# BIP91 is signalled if the 2nd bit from the right is 1
# shift 1 bit to the right and see if the last bit is 1
return self.version >> 1 & 1 == 1
def target(self):
"""Returns the proof-of-work target based on the bits"""
# last byte is exponent
exponent = self.bits[-1]
# the first three bytes are the coefficient in little endian
coefficient = little_endian_to_int(self.bits[:-1])
# the formula is:
# coefficient * 256**(exponent-3)
return coefficient * 256 ** (exponent - 3)
def difficulty(self):
"""Returns the block difficulty based on the bits"""
# note difficulty is (target of lowest difficulty) / (self's target)
# lowest difficulty has bits that equal 0xffff001d
lowest = 0xFFFF * 256 ** (0x1D - 3)
return lowest / self.target()
def check_pow(self):
"""Returns whether this block satisfies proof of work"""
# get the hash256 of the serialization of this block
h256 = hash256(self.serialize())
# interpret this hash as a little-endian number
proof = little_endian_to_int(h256)
# return whether this integer is less than the target
return proof < self.target()
def validate_merkle_root(self):
"""Gets the merkle root of the tx_hashes and checks that it's
the same as the merkle root of this block.
"""
# reverse all the transaction hashes (self.tx_hashes)
hashes = [h[::-1] for h in self.tx_hashes]
# get the Merkle Root
root = merkle_root(hashes)
# reverse the Merkle Root
# return whether self.merkle root is the same as
# the reverse of the calculated merkle root
return root[::-1] == self.merkle_root
class MerkleTree:
def __init__(self, total):
self.total = total
# compute max depth math.ceil(math.log(self.total, 2))
self.max_depth = math.ceil(math.log(self.total, 2))
# initialize the nodes property to hold the actual tree
self.nodes = []
# loop over the number of levels (max_depth+1)
for depth in range(self.max_depth + 1):
# the number of items at this depth is
# math.ceil(self.total / 2**(self.max_depth - depth))
num_items = math.ceil(self.total / 2 ** (self.max_depth - depth))
# create this level's hashes list with the right number of items
level_hashes = [None] * num_items
# append this level's hashes to the merkle tree
self.nodes.append(level_hashes)
# set the pointer to the root (depth=0, index=0)
self.current_depth = 0
self.current_index = 0
self.proved_txs = []
def __repr__(self):
result = []
for depth, level in enumerate(self.nodes):
items = []
for index, h in enumerate(level):
if h is None:
short = "None"
else:
short = "{}...".format(h.hex()[:8])
if depth == self.current_depth and index == self.current_index:
items.append("*{}*".format(short[:-2]))
else:
items.append("{}".format(short))
result.append(", ".join(items))
return "\n".join(result)
def up(self):
# reduce depth by 1 and halve the index
self.current_depth -= 1
self.current_index //= 2
def left(self):
# increase depth by 1 and double the index
self.current_depth += 1
self.current_index *= 2
def right(self):
# increase depth by 1 and double the index + 1
self.current_depth += 1
self.current_index = self.current_index * 2 + 1
def root(self):
return self.nodes[0][0]
def set_current_node(self, value):
self.nodes[self.current_depth][self.current_index] = value
def get_current_node(self):
return self.nodes[self.current_depth][self.current_index]
def get_left_node(self):
return self.nodes[self.current_depth + 1][self.current_index * 2]
def get_right_node(self):
return self.nodes[self.current_depth + 1][self.current_index * 2 + 1]
def is_leaf(self):
return self.current_depth == self.max_depth
def right_exists(self):
return len(self.nodes[self.current_depth + 1]) > self.current_index * 2 + 1
def populate_tree(self, flag_bits, hashes):
# populate until we have the root
while self.root() is None:
# if we are a leaf, we know this position's hash
if self.is_leaf():
# get the next bit from flag_bits: flag_bits.pop(0)
flag_bit = flag_bits.pop(0)
# get the current hash from hashes: hashes.pop(0)
current_hash = hashes.pop(0)
# set the current node in the merkle tree to the current hash
self.set_current_node(current_hash)
# if our flag bit is 1, add to the self.proved_txs array
if flag_bit == 1:
self.proved_txs.append(current_hash[::-1])
# go up a level
self.up()
# else
else:
# get the left hash
left_hash = self.get_left_node()
# if we don't have the left hash
if left_hash is None:
# if the next flag bit is 0, the next hash is our current node
if flag_bits.pop(0) == 0:
# set the current node to be the next hash
self.set_current_node(hashes.pop(0))
# sub-tree doesn't need calculation, go up
self.up()
# else
else:
# go to the left node
self.left()
elif self.right_exists():
# get the right hash
right_hash = self.get_right_node()
# if we don't have the right hash
if right_hash is None:
# go to the right node
self.right()
# else
else:
# combine the left and right hashes
self.set_current_node(merkle_parent(left_hash, right_hash))
# we've completed this sub-tree, go up
self.up()
# else
else:
# combine the left hash twice
self.set_current_node(merkle_parent(left_hash, left_hash))
# we've completed this sub-tree, go up
self.up()
if len(hashes) != 0:
raise RuntimeError("hashes not all consumed {}".format(len(hashes)))
for flag_bit in flag_bits:
if flag_bit != 0:
raise RuntimeError("flag bits not all consumed")
class MerkleBlock:
command = b"merkleblock"
def __init__(self, header, total, hashes, flags):
self.header = header
self.total = total
self.hashes = hashes
self.flags = flags
self.merkle_tree = None
def __repr__(self):
result = "{}\n".format(self.total)
for h in self.hashes:
result += "\t{}\n".format(h.hex())
result += "{}".format(self.flags.hex())
def hash(self):
return self.header.hash()
def id(self):
return self.header.id()
@classmethod
def parse(cls, s):
"""Takes a byte stream and parses a merkle block. Returns a Merkle Block object"""
# s.read(n) will read n bytes from the stream
# header - use Block.parse_header with the stream
header = Block.parse_header(s)
# total number of transactions (4 bytes, little endian)
total = little_endian_to_int(s.read(4))
# number of hashes is a varint
num_txs = read_varint(s)
# initialize the hashes array
hashes = []
# loop through the number of hashes times
for _ in range(num_txs):
# each hash is 32 bytes, little endian
hashes.append(s.read(32)[::-1])
# get the length of the flags field as a varint
flags_length = read_varint(s)
# read the flags field
flags = s.read(flags_length)
# initialize class
return cls(header, total, hashes, flags)
def is_valid(self):
"""Verifies whether the merkle tree information validates to the merkle root"""
# use bytes_to_bit_field on self.flags to get the flag_bits
flag_bits = bytes_to_bit_field(self.flags)
# set hashes to be the reversed hashes of everything in self.hashes
hashes = [h[::-1] for h in self.hashes]
# initialize the merkle tree with self.total
self.merkle_tree = MerkleTree(self.total)
# populate_tree with flag_bits and hashes
self.merkle_tree.populate_tree(flag_bits, hashes)
# check if the computed root [::-1] is the same as the merkle root
return self.merkle_tree.root()[::-1] == self.header.merkle_root
def proved_txs(self):
"""Returns the list of proven transactions from the Merkle block"""
if self.merkle_tree is None:
return []
else:
return self.merkle_tree.proved_txs
def is_valid_merkle_proof(
proof_hex, target_tx_hex, target_block_hash_hex, target_merkle_root_hex=None
):
"""
Validate a `target_tx` and `target_block_hash` are part of a BIP37 merkle `proof`
"""
mb = MerkleBlock.parse(BytesIO(bytes.fromhex(proof_hex)))
if mb.is_valid() is not True:
return False
if mb.proved_txs()[0].hex() != target_tx_hex:
return False
if target_merkle_root_hex is not None:
if mb.merkle_tree.root()[::-1].hex() != target_merkle_root_hex:
return False
if mb.hash().hex() != target_block_hash_hex:
return False
return True
| 36.18931
| 90
| 0.590683
|
4a183ba5e623f94b5c12c7a4aa8e1d85eef4e3b5
| 145
|
py
|
Python
|
LF6/zip_test.py
|
JohannesMuelle/workshops
|
af9140159e3872aff75864ced99b5163d7bba1ba
|
[
"CC0-1.0"
] | 5
|
2016-07-07T09:00:31.000Z
|
2017-03-09T22:46:33.000Z
|
LF6/zip_test.py
|
JohannesMuelle/workshops
|
af9140159e3872aff75864ced99b5163d7bba1ba
|
[
"CC0-1.0"
] | null | null | null |
LF6/zip_test.py
|
JohannesMuelle/workshops
|
af9140159e3872aff75864ced99b5163d7bba1ba
|
[
"CC0-1.0"
] | 8
|
2016-05-13T14:29:06.000Z
|
2019-10-20T16:43:32.000Z
|
import zipfile
import sys
zFile = zipfile.ZipFile("evil.zip")
try:
zFile.extractall(pwd="bloedsinn")
except:
print(sys.exc_info()[0])
| 20.714286
| 37
| 0.696552
|
4a183e6cecf30eee4f3bf6852a19f19e28eb4c23
| 316
|
py
|
Python
|
slack-vrc/vrchat-api-python-master/setup.py
|
kugiha/slack-vrc
|
b5a16ac6492eacb7a65e53c71d6dfe61981afec5
|
[
"MIT"
] | 22
|
2019-02-09T19:54:56.000Z
|
2022-03-28T10:55:29.000Z
|
slack-vrc/vrchat-api-python-master/setup.py
|
kugiha/slack-vrc
|
b5a16ac6492eacb7a65e53c71d6dfe61981afec5
|
[
"MIT"
] | 3
|
2019-02-09T16:05:12.000Z
|
2019-03-14T13:42:38.000Z
|
slack-vrc/vrchat-api-python-master/setup.py
|
kugiha/slack-vrc
|
b5a16ac6492eacb7a65e53c71d6dfe61981afec5
|
[
"MIT"
] | 5
|
2019-01-26T08:45:56.000Z
|
2021-10-09T08:18:51.000Z
|
from setuptools import setup
setup(
name="vrchat-api",
version="0.1.0",
description="An unofficial Python library for the VRChat API",
url="https://github.com/y23586/vrchat-api-python",
author="y23586",
license="MIT",
packages=["vrchat_api"],
install_requires=["requests>=2.21.0"]
)
| 24.307692
| 66
| 0.664557
|
4a183e9a4273e1776e6c18b3b567de93cfa0c31e
| 20,481
|
py
|
Python
|
src/figure_report/html_report.py
|
stephenkraemer/figure_report
|
15c9d1a346906eff329ef27f9c6dcdca5612afdb
|
[
"MIT"
] | null | null | null |
src/figure_report/html_report.py
|
stephenkraemer/figure_report
|
15c9d1a346906eff329ef27f9c6dcdca5612afdb
|
[
"MIT"
] | null | null | null |
src/figure_report/html_report.py
|
stephenkraemer/figure_report
|
15c9d1a346906eff329ef27f9c6dcdca5612afdb
|
[
"MIT"
] | null | null | null |
# TODO: avoid import of rpy2 if not necessary
# TODO: remove hard-coding to currywurst links
import shutil
from pathlib import Path
from typing import Union
import textwrap
import re
import time
import pandas as pd
from IPython.display import display
pd.options.display.max_colwidth = 10000
pd.options.display.max_rows = 40
pd.options.display.max_columns = 20
pd.options.display.float_format = "{:,.2f}".format
from matplotlib.figure import Figure
import matplotlib as mpl
import seaborn as sns
import plotnine as pn
import matplotlib.pyplot as plt
from IPython.display import Markdown, HTML
import rpy2.robjects.lib.ggplot2 as gg
import rpy2.rinterface as ri
import mouse_hema_meth.paths as mhpaths
AnyFigure = Union[mpl.figure.Figure, sns.FacetGrid, pn.ggplot, gg.GGPlot]
def pdf(s):
return s.replace(".png", ".pdf")
def svg(s):
return s.replace(".png", ".svg")
class HtmlReport:
template = """
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" type="text/css" href="./tocbot.css">
<link rel="stylesheet" type="text/css" href="./viewer.css">
<style>
table, th, td {{
border: 0px solid black;
}}
table {{
border-collapse: collapse;
text-align: right
}}
td, th {{
padding: 10px
}}
tr:nth-child(even) {{background-color: #f2f2f2;}}
thead {{ border-bottom: 1px solid #000; }}
tr {{ padding: 10px; }}
</style>
</head>
<body>
<div class="sidenav"></div>
<div class="main">
{html_body}
</div>
<!--<script src="https://cdnjs.cloudflare.com/ajax/libs/tocbot/4.1.1/tocbot.min.js"></script>-->
<script src="./tocbot.min.js"></script>
<script>
tocbot.init({{
// Where to render the table of contents.
tocSelector: '.sidenav',
// Where to grab the headings to build the table of contents.
contentSelector: '.main',
// Which headings to grab inside of the contentSelector element.
headingSelector: '{toc_headings}',
// // Where to render the table of contents.
// // Headings that match the ignoreSelector will be skipped.
// ignoreSelector: '.js-toc-ignore',
// Main class to add to links.
// linkClass: 'mylinkclass',
// // Extra classes to add to links.
// extraLinkClasses: '',
// // Class to add to active links,
// // the link corresponding to the top most heading on the page.
// activeLinkClass: 'is-active-link',
// // Main class to add to lists.
// listClass: 'toc-list',
// // Extra classes to add to lists.
// extraListClasses: '',
// // Class that gets added when a list should be collapsed.
// isCollapsedClass: 'is-collapsed',
// // Class that gets added when a list should be able
// // to be collapsed but isn't necessarily collpased.
// collapsibleClass: 'is-collapsible',
// // Class to add to list items.
// listItemClass: 'toc-list-item',
// // How many heading levels should not be collpased.
// // For example, number 6 will show everything since
// // there are only 6 heading levels and number 0 will collpase them all.
// // The sections that are hidden will open
// // and close as you scroll to headings within them.
collapseDepth: {autocollapse_depth},
// Smooth scrolling enabled.
scrollSmooth: true,
// Smooth scroll duration.
scrollSmoothDuration: 200,
// // Callback for scroll end.
// scrollEndCallback: function (e) {{ }},
// // Headings offset between the headings and the top of the document (this is meant for minor adjustments).
// headingsOffset: 100,
// // Timeout between events firing to make sure it's
// // not too rapid (for performance reasons).
// throttleTimeout: 50,
// // Element to add the positionFixedClass to.
// positionFixedSelector: null,
// // Fixed position class to add to make sidebar fixed after scrolling
// // down past the fixedSidebarOffset.
// positionFixedClass: 'is-position-fixed',
// // fixedSidebarOffset can be any number but by default is set
// // to auto which sets the fixedSidebarOffset to the sidebar
// // element's offsetTop from the top of the document on init.
// fixedSidebarOffset: 'auto',
// // includeHtml can be set to true to include the HTML markup from the
// // heading node instead of just including the textContent.
// includeHtml: false,
// // onclick function to apply to all links in toc. will be called with
// // the event as the first parameter, and this can be used to stop,
// // propagation, prevent default or perform action
// onClick: false
}});
</script>
</body>
</html>
"""
def __init__(
self,
report_path,
files_dir=None,
toc_headings="h1, h2, h3, h4",
autocollapse_depth=2,
):
"""Iteratively build a html document and save or display
This generates complete html documents, ie from <html> to </html>. However, the
documents are not stand-alone, eg linked images are not embedded.
This report assumes that three files have already been copied to the target directory:
- tocbot.css
- viewer.css
- tocbot.min.js: if this is missing, no error will be raised, but the ToC will not be filled
The report automatically collects headings into a sidebar ToC, with some nice features, based on
tocbot
Also, html tables are styled into basic striped tables.
Parameters
----------
toc_headings: eg 'h1, h2'; these headings will be collected into the toc
autocollapse_depth: the toc will be collapsed to hide headings with a higher level than this, but can be expanded by clicking, or by scrolling into the corresponding document area
"""
self.lines = []
self.toc_headings = toc_headings
self.autocollapse_depth = autocollapse_depth
self.counter = incremental_counter()
self.heading_counter = incremental_counter()
self.report_path = report_path
if files_dir is None:
files_dir = report_path.replace('.html', '_img')
self.files_dir = files_dir
# will be combined with counter to create unique paths
self.png_base_path = files_dir + "/img.png"
Path(report_path).parent.mkdir(exist_ok=True, parents=True)
Path(files_dir).mkdir(exist_ok=True, parents=True)
def h1(self, s: str):
self.lines.append(f"<h1 id={self.heading_counter()}>{s}</h1>\n")
def h2(self, s: str):
self.lines.append(f"<h2 id={self.heading_counter()}>{s}</h2>\n")
def h3(self, s: str):
self.lines.append(f"<h3 id={self.heading_counter()}>{s}</h3>\n")
def h4(self, s: str):
self.lines.append(f"<h4>{s}</h4>\n")
def h5(self, s: str):
self.lines.append(f"<h5>{s}</h5>\n")
def h6(self, s: str):
self.lines.append(f"<h6>{s}</h6>\n")
def table(self, df: pd.DataFrame):
"""Add HTML representation of dataframe"""
# Notes on table styling
# - currently, styling via simple table style in header, no hover etc.
# - alternative: use styles defined eg. in jupyter notebook or from similar source
# - this may be a version of the jupyter html export stylesheet:
# - <link rel="stylesheet" type="text/css" href="https://cdn.jupyter.org/notebook/5.1.0/style/style.min.css">
# - see: https://github.com/spatialaudio/nbsphinx/issues/182
# - see also: https://github.com/jupyter/help/issues/283
# add whitespace before and after table
self.lines.append("<br><br>")
self.lines.append(df.to_html())
self.lines.append("<br><br>")
def figure(self, fig: AnyFigure, do_display=False, **kwargs):
"""Add <img> with download links for png, pdf and svg
This could be improved by exposing save_and_display args
Parameters:
fig: figure to save
do_display: passed to save_and_display
kwargs: passed to save_and_display, except
- output is hardcoded to 'html'
- trunk_path is taken from self.trunk_path
- counter is taken from self.counter
"""
if "output" in kwargs or "counter" in kwargs or "trunk_path" in kwargs:
raise ValueError()
if 'png_path' in kwargs:
png_path = kwargs.pop('png_path')
counter = None
else:
png_path = self.png_base_path
counter = self.counter
self.lines.append(
save_and_display(
fig,
png_path=png_path,
counter=counter,
do_display=do_display,
output="html",
**kwargs,
)
)
def image(self, png_path: str, **kwargs):
self.lines.append(
display_file_html(png_path=png_path, do_display=False, **kwargs)
)
def text(self, s):
self.lines.append("<br>" + s + "<br>")
@property
def html_code(self):
# note that the \n-join is just to get a visually pleasing html source document
# when you add new elements, remember to add <div> or <br> where necessary
html_body = "\n".join(self.lines)
return self.template.format(
html_body=html_body,
toc_headings=self.toc_headings,
autocollapse_depth=self.autocollapse_depth,
)
def save(self):
"""Save to file, overwrite existing file"""
for curr_file in ["tocbot.css", "viewer.css", "tocbot.min.js"]:
curr_file_fp = Path(__file__).parent.joinpath(curr_file)
output_dir = Path(self.report_path).parent
target_file_path = output_dir / curr_file
if not target_file_path.exists():
shutil.copy(curr_file_fp, target_file_path)
Path(self.report_path).write_text(self.html_code)
def display(self):
"""Display with IPython.display"""
display(HTML(self.html_code))
def incremental_counter(start=0):
def wrapped():
nonlocal start
start += 1
return start - 1
return wrapped
def save_and_display(
fig,
png_path=None,
trunk_path=None,
additional_formats=("pdf", "svg"),
output="md",
height=None,
width=None,
display_height=None,
display_width=None,
name=None,
heading_level=None,
counter=None,
do_display=True,
layout="vertical",
show_name=True,
show_image=True,
show_download_links=True,
):
"""
Parameters
----------
fig
png_path
if png_path is relative, it will be interpreted as relative to notebook_data_dir
trunk_path
instead of png_path, trunk_path may be specified (unique path without suffix).
if trunk_path is relative, it will be interpreted as relative to notebook_data_dir.
additional_formats
in addition to png, all of these image filetypes will be saved, currently supported: pdf, svg
For ggplot, SVG is currently not supported (silently ignored if passed), due to apparent bugs
in the creation of SVGs
output
'md' or 'html'
height
width
name
heading_level
counter
do_display: display markup instead of returning it
layout
show_name
show_image
show_download_links
Returns
-------
"""
plt.close()
assert png_path is not None or trunk_path is not None
if trunk_path is not None:
png_path = trunk_path + ".png"
Path(png_path).parent.mkdir(exist_ok=True, parents=True)
if counter is not None:
png_path = re.sub("\.png$", f"_{counter()}.png", png_path)
if isinstance(
fig, (mpl.figure.Figure, sns.FacetGrid, sns.matrix.ClusterGrid, sns.PairGrid)
):
fig.savefig(png_path)
if "pdf" in additional_formats:
fig.savefig(pdf(png_path))
if "svg" in additional_formats:
fig.savefig(svg(png_path))
plt.close()
elif isinstance(fig, pn.ggplot):
size_kwargs = dict(height=height, width=width, units="in")
fig.save(png_path, **size_kwargs)
if "pdf" in additional_formats:
fig.save(pdf(png_path), **size_kwargs)
if "svg" in additional_formats:
fig.save(svg(png_path), **size_kwargs)
elif isinstance(fig, gg.GGPlot):
# noinspection PyUnresolvedReferences
size_kwargs = dict(
height=height if height else ri.NA_Logical,
width=width if width else ri.NA_Logical,
units="in",
)
fig.save(png_path, **size_kwargs)
if "pdf" in additional_formats:
fig.save(pdf(png_path), **size_kwargs)
# saving ggplot as svg seems buggy (Feb 2020)
# if 'svg' in additional_formats:
# fig.save(svg(png_path), **size_kwargs)
if output == "md":
image_link = server_markdown_link_get_str(
png_path,
image=True,
display_height=display_height,
display_width=display_width,
)
download_links = [
server_markdown_link_get_str(png_path),
server_markdown_link_get_str(pdf(png_path)),
server_markdown_link_get_str(svg(png_path)),
]
markdown_elements = [] # lines or table columns
if name is not None:
if layout == "vertical":
if heading_level is not None:
if isinstance(heading_level, int):
markdown_elements.append(f'{"#" * heading_level} {name}')
else:
markdown_elements.append(f"**{name}**")
else:
markdown_elements.append(name)
else:
markdown_elements.append(name)
if show_image:
markdown_elements.append(image_link)
if show_download_links:
if layout == "vertical":
# add another new line before download links, otherwise they are sometimes
# shown with right justification
markdown_elements.append("")
markdown_elements.append(" | ".join(download_links))
if layout == "vertical":
md_text = "\n".join(markdown_elements)
elif layout == "table_row":
md_text = "| " + " | ".join(markdown_elements) + " |"
else:
raise NotImplementedError
md_text += "\n"
if do_display:
display(Markdown(md_text))
else:
return md_text
elif output == "html":
return display_file_html(
png_path,
name,
layout,
heading_level,
show_image,
show_download_links,
do_display,
display_height=display_height,
display_width=display_width,
)
else:
raise ValueError(f"Unknown output format {output}")
def display_file_html(
png_path,
name=None,
layout="vertical",
heading_level=None,
show_image=True,
show_download_links=True,
do_display=True,
display_height=None,
display_width=None,
units="px",
):
"""
Parameters
----------
png_path
name
layout
heading_level
show_image
show_download_links
do_display: display html instead of returning it
display_height
display_width
units
Returns
-------
"""
image_link = server_html_link_get_str(
png_path,
image=True,
display_width=display_width,
display_height=display_height,
units=units,
)
download_links = [
server_html_link_get_str(png_path),
server_html_link_get_str(pdf(png_path)),
server_html_link_get_str(svg(png_path)),
]
elements = [] # lines or table columns
if name is not None:
if layout == "vertical":
if heading_level is not None:
if isinstance(heading_level, int):
elements.append(f"<h{heading_level}>{name}</h{heading_level}>")
else:
elements.append(f"<b>{name}</b>")
else:
elements.append(name)
else:
elements.append(name)
if show_image:
elements.append(image_link)
if show_download_links:
if layout == "vertical":
# add another new line before download links, otherwise they are sometimes
# shown with right justification
elements.append("<br>")
elements.append(" | ".join(download_links))
if layout == "vertical":
text = "\n".join(elements)
elif layout == "table_row":
raise NotImplementedError
else:
raise NotImplementedError
text += "\n"
if do_display:
display(HTML(text))
else:
return text
def server_markdown_link_get_str(
s, image=False, name=None, display_height=None, display_width=None, units="px"
):
"""Given a filepath, return a markdown image or file link
see server_html_link_get_str for details and documented code
"""
if not image and (display_height is not None or display_width is not None):
raise ValueError()
if display_height is None and display_width is None:
s = str(s)
if name is None:
if not image:
name = Path(s).suffix[1:] # discard dot at the beginning of the suffix
else:
name = "image not found"
link = mhpaths.get_currywurst_link(s)
# add a query string to prevent browser caching
img_link = f"{'!' if image else ''}[{name}]({link}?{time.time()})"
else:
img_link = server_html_link_get_str(
s=s,
image=image,
name=name,
display_height=display_height,
display_width=display_width,
units=units,
)
return img_link
def server_html_link_get_str(
s: Union[Path, str],
image=False,
name=None,
display_height=None,
display_width=None,
units="px",
):
"""Given a filepath, return an html <img> or a download link
For images, return an image link, for other data, return a download link
Note that images are not detected based on suffix, but based on image arg.
This is because image files may either need to be displayed or linked for download.
"""
s = str(s)
if name is None:
if not image:
# for download links, if no name is given, display the filetype
name = Path(s).suffix[1:] # discard dot at the beginning of the suffix
else:
# for images, we use standard alt text
name = "image not found"
# convert filepath to link on http server
link = mhpaths.get_currywurst_link(s)
# add a query string to prevent browser caching
if image:
# add a query string to prevent browser caching
# control figure size:
# - while <img width=100> works, <img height=100> is ignored by jupyterlab,
# so we use a div instead
if display_width is not None:
if units == "px":
display_width_px = display_width
else:
raise NotImplementedError
width_style_str = f"width: {display_width_px}px; "
else:
width_style_str = ""
if display_height is not None:
if units == "px":
display_height_px = display_height
else:
raise NotImplementedError
height_style_str = f"height: {display_height_px}px; "
else:
height_style_str = ""
img_link = textwrap.dedent(
f"""\
<div style="{height_style_str} {width_style_str}">
<img src="{link}?{time.time()}"
alt="{name}"
style="max-width: 100%; max-height: 100%">'
</div>
"""
)
return img_link
else:
return f'<a href="{link}?{time.time()}" download>{name}</a>'
| 32.717252
| 187
| 0.599824
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.