content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import torch
import shutil
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def rotation(inputs):
batch = inputs.shape[0]
target = torch.Tensor(np.random.permutation([0, 1, 2, 3] * (int(batch / 4) + 1)), device=inputs.device)[:batch]
target = target.long()
image = torch.zeros_like(inputs)
image.copy_(inputs)
for i in range(batch):
image[i, :, :, :] = torch.rot90(inputs[i, :, :, :], target[i], [1, 2])
return image, target
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi))
def adjust_learning_rate(optimizer, epoch, args):
epoch = epoch + 1
if epoch <= 5:
lr = args.lr * epoch / 5
elif epoch > 160:
lr = args.lr * 0.01
elif epoch > 180:
lr = args.lr * 0.0001
else:
lr = args.lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = [0] * len(np.unique(dataset.targets))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
beta = 0.9999
effective_num = 1.0 - np.power(beta, label_to_count)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
# weight for each sample
weights = [per_cls_weights[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples
def calc_confusion_mat(val_loader, model, args, save_path):
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print('Class Accuracy : ')
print(cls_acc)
classes = [str(x) for x in args.cls_num_list]
plot_confusion_matrix(all_targets, all_preds, classes, normalize=True, title=args.confusion_title)
plt.savefig(os.path.join(save_path, 'confusion_matrix.pdf'))
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
classes = [str(i) for i in range(10)]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes)
# Rotate the tick labels and set their alignment.
# plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# plt.title(title, fontsize=18)
plt.xlabel('Predicted label', fontsize=17)
plt.ylabel('True label', fontsize=17)
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
norm = 1000 if normalize else 1
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j] / norm, fmt),
ha="center", va="center",
color="black") # color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
return ax
def prepare_folders(args):
folders_util = [args.root_log, args.root_model,
os.path.join(args.root_log, args.store_name),
os.path.join(args.root_model, args.store_name)]
for folder in folders_util:
if not os.path.exists(folder):
print(f'Creating folder: {folder}')
os.mkdir(folder)
def save_checkpoint(args, state, is_best):
filename = f'{args.root_model}/{args.store_name}/ckpt.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""These test the utils.py functions."""
from __future__ import unicode_literals
import pytest
from hypothesis import given
from hypothesis.strategies import binary, floats, integers, lists, text
from natsort.compat.py23 import PY_VERSION, py23_str
from natsort.utils import natsort_key
if PY_VERSION >= 3:
long = int
def str_func(x):
if isinstance(x, py23_str):
return x
else:
raise TypeError("Not a str!")
def fail(_):
raise AssertionError("This should never be reached!")
@given(floats(allow_nan=False) | integers())
def test_natsort_key_with_numeric_input_takes_number_path(x):
assert natsort_key(x, None, str_func, fail, lambda y: y) is x
@pytest.mark.skipif(PY_VERSION < 3, reason="only valid on python3")
@given(binary().filter(bool))
def test_natsort_key_with_bytes_input_takes_bytes_path(x):
assert natsort_key(x, None, str_func, lambda y: y, fail) is x
@given(text())
def test_natsort_key_with_text_input_takes_string_path(x):
assert natsort_key(x, None, str_func, fail, fail) is x
@given(lists(elements=text(), min_size=1, max_size=10))
def test_natsort_key_with_nested_input_takes_nested_path(x):
assert natsort_key(x, None, str_func, fail, fail) == tuple(x)
@given(text())
def test_natsort_key_with_key_argument_applies_key_before_processing(x):
assert natsort_key(x, len, str_func, fail, lambda y: y) == len(x)
|
nilq/baby-python
|
python
|
from ..abstract import ErdReadOnlyConverter
from ..primitives import *
from gehomesdk.erd.values.fridge import FridgeIceBucketStatus, ErdFullNotFull
class FridgeIceBucketStatusConverter(ErdReadOnlyConverter[FridgeIceBucketStatus]):
def erd_decode(self, value: str) -> FridgeIceBucketStatus:
"""Decode Ice bucket status"""
if not value:
n = 0
else:
n = erd_decode_int(value)
is_present_ff = bool(n & 1)
is_present_fz = bool(n & 2)
state_full_ff = ErdFullNotFull.FULL if n & 4 else ErdFullNotFull.NOT_FULL
state_full_fz = ErdFullNotFull.FULL if n & 8 else ErdFullNotFull.NOT_FULL
if not is_present_ff:
state_full_ff = ErdFullNotFull.NA
if not is_present_fz:
state_full_fz = ErdFullNotFull.NA
if not (is_present_ff or is_present_ff):
# No ice buckets at all
total_status = ErdFullNotFull.NA
elif (state_full_ff == ErdFullNotFull.NOT_FULL) or (state_full_fz == ErdFullNotFull.NOT_FULL):
# At least one bucket is not full
total_status = ErdFullNotFull.NOT_FULL
else:
total_status = ErdFullNotFull.FULL
ice_status = FridgeIceBucketStatus(
state_full_fridge=state_full_ff,
state_full_freezer=state_full_fz,
is_present_fridge=is_present_ff,
is_present_freezer=is_present_fz,
total_status=total_status,
)
return ice_status
|
nilq/baby-python
|
python
|
import datetime
import unittest
import unittest.mock
from conflowgen.api.container_flow_generation_manager import ContainerFlowGenerationManager
from conflowgen.application.models.container_flow_generation_properties import ContainerFlowGenerationProperties
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerFlowGenerationManager(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
ContainerFlowGenerationProperties,
ModeOfTransportDistribution,
Schedule
])
mode_of_transport_distribution_seeder.seed()
self.container_flow_generation_manager = ContainerFlowGenerationManager()
def test_generate_with_overwrite(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'generate',
return_value=None) as mock_method:
self.container_flow_generation_manager.generate(overwrite=True)
mock_method.assert_called_once()
def test_generate_without_overwrite_and_no_previous_data(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'generate',
return_value=None) as mock_generate, \
unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'container_flow_data_exists',
return_value=False) as mock_check:
self.container_flow_generation_manager.generate(overwrite=False)
mock_check.assert_called_once()
mock_generate.assert_called_once()
def test_generate_without_overwrite_and_some_previous_data(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'generate',
return_value=None) as mock_generate, \
unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'container_flow_data_exists',
return_value=True) as mock_check:
self.container_flow_generation_manager.generate(overwrite=False)
mock_check.assert_called_once()
mock_generate.assert_not_called()
def test_get_properties(self):
class MockedProperties:
name = "my test data"
start_date = datetime.date(2030, 1, 1)
end_date = datetime.date(2030, 12, 31)
transportation_buffer = 0.2
minimum_dwell_time_of_import_containers_in_hours = 3
minimum_dwell_time_of_export_containers_in_hours = 4
minimum_dwell_time_of_transshipment_containers_in_hours = 5
maximum_dwell_time_of_import_containers_in_hours = 40
maximum_dwell_time_of_export_containers_in_hours = 50
maximum_dwell_time_of_transshipment_containers_in_hours = 60
dict_properties = {
'name': "my test data",
'start_date': datetime.date(2030, 1, 1),
'end_date': datetime.date(2030, 12, 31),
'transportation_buffer': 0.2,
'minimum_dwell_time_of_import_containers_in_hours': 3,
'minimum_dwell_time_of_export_containers_in_hours': 4,
'minimum_dwell_time_of_transshipment_containers_in_hours': 5,
'maximum_dwell_time_of_import_containers_in_hours': 40,
'maximum_dwell_time_of_export_containers_in_hours': 50,
'maximum_dwell_time_of_transshipment_containers_in_hours': 60
}
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_properties_repository,
'get_container_flow_generation_properties',
return_value=MockedProperties) as mock_method:
retrieved_properties = self.container_flow_generation_manager.get_properties()
mock_method.assert_called_once()
self.assertDictEqual(dict_properties, retrieved_properties)
def test_set_properties(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_properties_repository,
'set_container_flow_generation_properties',
return_value=None) as mock_method:
self.container_flow_generation_manager.set_properties(
datetime.datetime.now().date(), datetime.datetime.now().date()
)
properties = ContainerFlowGenerationProperties.get()
mock_method.assert_called_once_with(properties)
def test_container_flow_data_exists(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'container_flow_data_exists',
return_value=True) as mock_method:
response = self.container_flow_generation_manager.container_flow_data_exists()
mock_method.assert_called_once()
self.assertTrue(response)
|
nilq/baby-python
|
python
|
from polecat.rest.schema_builder import RestSchemaBuilder
def test_schema_builder():
schema = RestSchemaBuilder().build()
assert len(schema.routes) > 0
|
nilq/baby-python
|
python
|
from PIL import Image
import matplotlib.pyplot as plt
# Log images
def log_input_image(x, opts):
return tensor2im(x)
def tensor2im(var):
# var shape: (3, H, W)
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
var = ((var + 1) / 2)
var[var < 0] = 0
var[var > 1] = 1
var = var * 255
return Image.fromarray(var.astype('uint8'))
def vis_faces(log_hooks):
display_count = len(log_hooks)
fig = plt.figure(figsize=(8, 4 * display_count))
gs = fig.add_gridspec(display_count, 3)
for i in range(display_count):
hooks_dict = log_hooks[i]
fig.add_subplot(gs[i, 0])
if 'diff_input' in hooks_dict:
vis_faces_with_id(hooks_dict, fig, gs, i)
else:
vis_faces_no_id(hooks_dict, fig, gs, i)
plt.tight_layout()
return fig
def vis_faces_with_id(hooks_dict, fig, gs, i):
plt.imshow(hooks_dict['input_face'])
plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input'])))
fig.add_subplot(gs[i, 1])
plt.imshow(hooks_dict['target_face'])
plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']),
float(hooks_dict['diff_target'])))
fig.add_subplot(gs[i, 2])
plt.imshow(hooks_dict['output_face'])
plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target'])))
def vis_faces_no_id(hooks_dict, fig, gs, i):
plt.imshow(hooks_dict['input_face'], cmap="gray")
plt.title('Input')
fig.add_subplot(gs[i, 1])
plt.imshow(hooks_dict['target_face'])
plt.title('Target')
fig.add_subplot(gs[i, 2])
plt.imshow(hooks_dict['output_face'])
plt.title('Output')
|
nilq/baby-python
|
python
|
import csv
from argparse import ArgumentParser
import re
parser = ArgumentParser()
parser.add_argument('--input_file', type=str)
parser.add_argument('--output_csv_file', type=str)
parser.add_argument('--option', default='eval', choices=['eval', 'debug'])
args = parser.parse_args()
lang_regex = re.compile('lang=(\w+)')
row_dicts = []
with open(args.input_file, 'r') as f_in:
for line in f_in:
if args.option == 'eval':
fieldnames = ['language', 'em_accuracy', 'bleu_score']
em_regex = re.compile('\"em\":\s(\d+\.\d+)')
bleu_regex = re.compile('"bleu":\s(\d+\.\d+)')
if ('lang' in line):
language = lang_regex.findall(line)[0]
elif ('em' in line) or ('bleu' in line):
em = em_regex.findall(line)[0]
bleu = bleu_regex.findall(line)[0]
row_dicts.append({'language': language, 'em_accuracy': em, 'bleu_score': bleu})
elif args.option == 'debug':
fieldnames = ['language', 'size', 'em_accuracy', 'em_wo_params', 'syntax']
if ('lang' in line):
language = lang_regex.findall(line)[0]
elif 'eval' in line or 'test' in line:
_, _, size, em, em_wo_params, fm, dm, nfm, syntax = map(lambda part: part.strip(), line.split(','))
row_dicts.append({'language': language, 'size': size, 'em_accuracy': float(em)*100, 'em_wo_params': float(em_wo_params)*100, 'syntax': float(syntax)*100})
with open(args.output_csv_file, 'w') as f_out:
csv_writer = csv.DictWriter(f_out, fieldnames)
csv_writer.writeheader()
csv_writer.writerows(row_dicts)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
#I haven't initiated the initial hidden states, in that case as per documentation, it will be at default 0 initially.
#make captions also the same size as embedded features
embed = self.embedding(captions[:,:-1])
# Stack the features and captions
embedded_input = torch.cat((features.unsqueeze(1), embed), dim=1) # shape :(batch_size, caption length,embed_size)
hidden_op, (h_1, c_1) = self.lstm(embedded_input) #didn't pass any initial hidden states so its automatically zero
output = self.fc(hidden_op)
return output
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
tokens = []
for i in range(max_len):
hidden_output, states = self.lstm(inputs, states)
outputs = self.fc(hidden_output.squeeze(1))
_, predicted = outputs.max(dim=1) # predicted: (1, 1)
tokens.append(predicted.item())
inputs = self.embedding(predicted) # inputs: (1, embed_size)
inputs = inputs.unsqueeze(1) # inputs: (1, 1, embed_size)
return tokens
|
nilq/baby-python
|
python
|
import unittest
from unittest.mock import patch
import pytest
import Parser.languageInterface as languageInterface
# class Test_LanguageInterface(unittest.TestCase):
# @patch('Parser.languageInterface.LanguageInterface.getSymbols')
# @patch('Parser.languageInterface.LanguageInterface.printParsedData')
# @patch('Parser.languageInterface.LanguageInterface.uploadToApi')
# def test_parseXMLFileWithoutVerbose(self,
# mock_uploadToApi,
# mock_printParsedData,
# mock_getSymbols):
# '''
# it should call getSymbols and uploadToApi but not printParsedData
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.parseXMLFile('filename')
# mock_getSymbols.assert_called_once()
# mock_printParsedData.assert_not_called()
# mock_uploadToApi.assert_called_once()
# @patch('Parser.languageInterface.useful.verbose', True)
# @patch('Parser.languageInterface.LanguageInterface.getSymbols')
# @patch('Parser.languageInterface.LanguageInterface.printParsedData')
# @patch('Parser.languageInterface.LanguageInterface.uploadToApi')
# def test_parseXMLFileWithVerbose(self,
# mock_uploadToApi,
# mock_printParsedData,
# mock_getSymbols):
# '''
# it should call getSymbols, uploadToApi and printParsedData
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.parseXMLFile('filename')
# mock_getSymbols.assert_called_once()
# mock_printParsedData.assert_called_once()
# mock_uploadToApi.assert_called_once()
# @patch('Parser.languageInterface.LanguageInterface.printParsedData')
# @patch('Parser.languageInterface.LanguageInterface.uploadToApi')
# def test_getSymbolsNotImplemented(self,
# mock_uploadToApi,
# mock_printParsedData):
# '''
# it should raise an exception as getSymbols isn't implemented
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# with pytest.raises(Exception) as e:
# assert interface.parseXMLFile('filename')
# assert str(e.value) == 'Not implemented'
# def test_appendToSymbols(self):
# '''
# should append the symbol to the list
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.appendToSymbols('variable', 'symbol')
# self.assertEqual(interface.symbols[0]['symbol_type'], 'variable')
# self.assertEqual(interface.symbols[0]['symbol_list'][0], 'symbol')
# interface.appendToSymbols('variable', 'symbol2')
# self.assertEqual(interface.symbols[0]['symbol_list'][1], 'symbol2')
# @patch('Parser.languageInterface.printingFunctions.printUnions')
# def test_printParsedData(self,
# mock_printUnions):
# '''
# should call the union printing function
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.symbols = [
# {
# 'symbol_type': 'union',
# 'symbol_list': ['symbol']
# }
# ]
# interface.printParsedData()
# mock_printUnions.assert_called_once()
# @patch('Parser.languageInterface.useful.upload', False)
# @patch('Parser.languageInterface.AIClient')
# @patch('Parser.languageInterface.JSONRequestCrafter')
# def test_uploadToApiNoUpload(self,
# mock_JSONRequestCrafter,
# mock_AIClient):
# '''
# it shouldn't call the JsonRequestCrafter function as upload isn't on
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.symbols = [
# {
# 'symbol_type': 'union',
# 'symbol_list': ['symbol']
# }
# ]
# interface.uploadToApi()
# mock_JSONRequestCrafter.assert_not_called()
# @patch('Parser.languageInterface.useful.upload', True)
# @patch('Parser.languageInterface.AIClient')
# @patch('Parser.languageInterface.JSONRequestCrafter')
# def test_uploadToApiUpload(self,
# mock_JSONRequestCrafter,
# mock_AIClient):
# '''
# it should call the JsonRequestCrafter function
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.symbols = [
# {
# 'symbol_type': 'union',
# 'symbol_list': ['symbol']
# }
# ]
# interface.uploadToApi()
# mock_JSONRequestCrafter.assert_called_once()
|
nilq/baby-python
|
python
|
import os
import os.path as op
from sklearn.externals import joblib as jl
from glob import glob
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import f_classif, SelectPercentile
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, f1_score
from skbold.postproc import MvpResults
from skbold.utils import ArrayPermuter
from sklearn.externals.joblib import Parallel, delayed
import numpy as np
def run_subject(sub, N_PERMS):
sub_name = op.basename(op.dirname(sub))
out_dir = op.join('RESULTS', 'TRAIN', 'WITHIN_SUBS', sub_name)
print("Processing sub-%s" % sub_name)
mvp = jl.load(sub)
pipe = Pipeline([
('ufs', SelectPercentile(score_func=f_classif, percentile=100)),
('scaler', StandardScaler()),
('permuter', ArrayPermuter()),
('clf', SVC(kernel='linear'))
])
for i in np.arange(N_PERMS):
mvp_results = MvpResults(mvp=mvp, type_model='classification',
n_iter=10, feature_scoring='fwm',
verbose=False, accuracy=accuracy_score,
f1_score=f1_score)
skf = StratifiedKFold(n_splits=10)
for train_idx, test_idx in skf.split(X=mvp.X, y=mvp.y):
X_train, y_train = mvp.X[train_idx], mvp.y[train_idx]
X_test, y_test = mvp.X[test_idx], mvp.y[test_idx]
pipe.fit(X_train, y_train)
pred = pipe.predict(X_test)
mvp_results.update(pipeline=pipe, test_idx=test_idx, y_pred=pred)
mvp_results.compute_scores(maps_to_tstat=False)
tmp_out_dir = op.join(out_dir, 'perm_%i' % (i + 1))
if not op.isdir(tmp_out_dir):
os.makedirs(tmp_out_dir)
mvp_results.write(out_path=tmp_out_dir)
if __name__ == '__main__':
N_PERMS = 1000
subjects = sorted(glob(op.join('MVP', '???', 'mvp_train_nonzero.jl')))
_ = Parallel(n_jobs=6)(delayed(run_subject)(sub, N_PERMS)
for sub in subjects)
|
nilq/baby-python
|
python
|
import os
import copy
from util.queryParser import SimpleQueryParser
def gene_imagenet_synset(output_file):
sid2synset = {}
for line in open('visualness_data/words.txt'):
sid, synset = line.strip().split('\t')
sid2synset[sid] = synset
fout = open(output_file, 'w')
for line in open('visualness_data/imagenet.synsetid.txt'):
sid = line.strip()
fout.write(sid + "\t" + sid2synset[sid].lower().replace('-', ' ') + '\n')
fout.close()
def readImageNetSynset():
len2visualsynset = {}
data_file = 'visualness_data/imagenet.sid.synset.txt'
if not os.path.exists(data_file):
gene_imagenet_synset(data_file)
for line in open(data_file):
sid, synsets_data = line.strip().split("\t")
synsets = map(str.strip, synsets_data.strip().split(','))
for synset in synsets:
words = synset.strip().split()
length = len(words)
len2visualsynset.setdefault(length, []).append(" ".join(words))
# print 'length:', len2visualsynset.keys()
new_len2visualsynset = {}
for key in len2visualsynset:
new_len2visualsynset[key] = set(len2visualsynset[key])
return new_len2visualsynset
class VisualDetector:
def __init__(self):
self.len2visualsynset = readImageNetSynset()
self.qp = SimpleQueryParser()
def predict(self, query):
origin_word_list = self.qp.process_list(query)
original_len = len(origin_word_list)
word_list = copy.deepcopy(origin_word_list)
all_len = len(word_list)
valid_len = len(word_list)
current_group = max(self.len2visualsynset.keys())
match_counter = 0
while current_group > 0:
if valid_len == 0:
break
while current_group > valid_len:
current_group -= 1
match_flag = 0
for i in range(0, all_len + 1 - current_group):
pattern = " ".join(word_list[i:i+current_group])
if "#" in pattern:
continue
else:
if pattern in self.len2visualsynset[current_group]:
word_list = word_list[:i] + ['#%d' % current_group] + word_list[i+current_group:]
all_len = all_len - current_group + 1
valid_len = valid_len - current_group
match_counter += current_group
match_flag = 1
break
if match_flag == 0:
current_group -= 1
index = 0
labeled_query = []
for word in word_list:
if word.startswith("#"):
n_words = int(word[1:])
new_word = "[" + " ".join(origin_word_list[index:index+n_words]) + "]"
labeled_query.append(new_word)
index += n_words
else:
labeled_query.append(word)
index += 1
return 0 if match_counter == 0 else 1.0*match_counter/original_len, " ".join(labeled_query)
if __name__ == "__main__":
vd = VisualDetector()
query_list = ["flowers", "soccer ball", "dogs and cat", "tattoo design", "barack obama family", "hot weather girls", "funny", "saying and quote"]
for query in query_list:
# print query
visualness_score, labeled_query = vd.predict(query)
print query, "->", labeled_query, visualness_score, '\n'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Count the number of called variants per sample in a VCF file.
"""
import argparse
import collections
import vcf
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"vcf", help="the vcf file to analyze", type=lambda f: vcf.Reader(filename=f)
)
return parser.parse_args()
def main():
args = parse_args()
call_counts = collections.Counter()
hom_alt_counts = collections.Counter()
het_counts = collections.Counter()
for record in filter(lambda r: not r.is_filtered, args.vcf):
for call in filter(lambda s: not s.is_filtered, record.samples):
call_counts[call.sample] += 1
if call.is_variant:
if call.is_het:
het_counts[call.sample] += 1
else:
hom_alt_counts[call.sample] += 1
print("\t".join(["sample", "call_count", "hom_alt_count", "het_count"]))
for sample in call_counts.keys():
print(
"\t".join(
map(
str,
[
sample,
call_counts[sample],
hom_alt_counts[sample],
het_counts[sample],
],
)
)
)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import Aviso
from .models import AvisoViewer
from .forms import AvisoFormAdmin
@admin.register(Aviso)
class AvisoAdmin(admin.ModelAdmin):
fields = ['titulo', 'subtitulo', 'data', 'texto', 'autor', 'editado_por']
list_display = ('titulo', 'subtitulo', 'autor', 'data')
search_fields = ('titulo', 'subtitulo', 'autor', 'texto')
readonly_fields = ['autor', 'editado_por', 'data']
formfield_overrides = {
models.TextField: {'widget': TinyMCE()},
}
form = AvisoFormAdmin
date_hierarchy = 'data'
def save_model(self, request, obj, form, change):
if change:
obj.editado_por = request.user
else:
obj.autor = request.user
obj.save()
@admin.register(AvisoViewer)
class AvisoViewerAdmin(admin.ModelAdmin):
fields = ['aviso', 'residente', 'data_visualizado']
list_display = ('aviso', 'residente', 'data_visualizado')
search_fields = ('aviso', 'residente')
autocomplete_fields = ['residente', 'aviso']
date_hierarchy = 'data_visualizado'
|
nilq/baby-python
|
python
|
import json
USERS = "../static/user.json"
def read_JSON(filename):
try:
with open(filename, "r") as file_obj:
return json.load(file_obj)
except:
return dict()
def write_JSON(data, filename):
with open(filename, "w+") as file_obj:
json.dump(data, file_obj)
def append_JSON(filename, new_data):
with open(filename, "w+") as file_obj:
try:
old_data = json.load(file_obj)
old_data.update(new_data)
except: #In case there is a .json file but its empty
old_data = new_data
write_JSON(old_data, filename)
|
nilq/baby-python
|
python
|
from utils.code_runner import execute_code
import math
def sum_divisors(n):
if n == 1:
return 1
sqrt_n = math.ceil(math.sqrt(n))
divisor = 2
total_sum = 1
while divisor < sqrt_n:
if n % divisor == 0:
total_sum += divisor
total_sum += n // divisor
divisor += 1
# Check for root divisor of square number
if sqrt_n ** 2 == n:
total_sum += sqrt_n
return total_sum
def is_abundant(n):
"""
Abundant: if the sum of its proper divisors is higher than n.
Deficient: if the sum of its proper divisors is less than n.
Perfect: If the sum of its proper divisors exactly equals to n.
"""
if n < 12: # Smallest abundant number is 12.
return False
return sum_divisors(n) > n
def is_sum_of_two_abundant_numbers(n, abundant_numbers):
for abundant in abundant_numbers:
difference = n - abundant
if difference in abundant_numbers:
return True
return False
def problem():
# All abundant number greater than the upper limit
# can be written as the sum of two abundant numbers.
upper_limit = 28123
total_sum = 0
abundant_numbers = []
# Get the sum of all the positive integers
# that cannot be written as the sum of two abundant numbers.
for i in range(0, upper_limit + 1):
# Add i to abundant list if it is abundant
if is_abundant(i):
abundant_numbers.append(i)
# Check if i can be summed up with two abundant numbers.
if not is_sum_of_two_abundant_numbers(i, abundant_numbers):
total_sum += i
return total_sum
if __name__ == '__main__':
execute_code(problem)
|
nilq/baby-python
|
python
|
from datadog import initialize, statsd
import random
import time
options = {
'statsd_host':'127.0.0.1',
'statsd_port':8125
}
initialize(**options)
namespace = "testing7"
# statsd.distribution('example_metric.distribution', random.randint(0, 20), tags=["environment:dev"])
statsd.timing("%s.timing"%namespace, random.randint(1, 20), tags=["environment:dev"])
statsd.distribution("%s.distribution"%namespace, 50 + random.randint(1, 20), tags=["environment:dev"])
# time.sleep(5)
# statsd.timing("%s.timing"%namespace, random.randint(1, 20), tags=["environment:dev"])
# statsd.distribution("%s.distribution"%namespace, 50 + random.randint(1, 20), tags=["environment:dev"])
|
nilq/baby-python
|
python
|
import numpy as np
import ad_path
import antenna_diversity as ad
import matplotlib.pyplot as plt
import h5py
import typing as t
import time
import os
ad_path.nop()
bits_per_slot = 440
slots_per_frame = 1
give_up_value = 1e-6
# How many bits to aim for at give_up_value
certainty = 20
# Stop early at x number of errors. Make sure to scale together with
# slots_per_frame, as this number number must include several different
# h values.
stop_at_errors = 100000
snr_stop = 50
snr_step = 2.5
branches = 5
crc_fail_penalty = 320 # Payload len
savefile = "diversity_mega.h5"
bit_goal = np.ceil(1/give_up_value) * certainty
max_tries = int(np.ceil(bit_goal / (bits_per_slot * slots_per_frame)))
print(bit_goal, max_tries)
snr_values = np.arange(-10, snr_stop+snr_step, snr_step)
snr_todo = list(range(len(snr_values)))
snr_channels = []
for snr in snr_values:
snr_channels.append(ad.channel.RayleighAWGNChannel(branches, snr))
gfsk = ad.modulation.GFSK()
encoder = ad.encoding.SymbolEncoder(2)
# Keep track of class instances used at the innermost loop
selector_dictionary = {}
def rest(hat_recv: np.ndarray, symbols: np.ndarray, slot) -> t.Tuple[int, int, bool, int]:
hat_symbols = gfsk.demodulate(hat_recv)
hat_data = encoder.decode_msb(hat_symbols)
unpacked = ad.protocols.dect.Full.from_bytes(hat_data)
err, n = ad.common.count_symbol_errors(symbols, hat_symbols)
crc_fail = unpacked.crc_drops_packet()
if crc_fail:
pbes = crc_fail_penalty
else:
pbes, _ = ad.common.count_bit_errors(slot.b_field, unpacked.b_field)
return err, n, crc_fail, pbes
# Must return (errors, total, crc, pbes)
def selection_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, _) \
-> t.Tuple[int, int, bool, int]:
hat_recv, _ = ad.diversity_technique.selection_from_h(recv, h)
return rest(hat_recv, symbols, slot)
def mrc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, _) \
-> t.Tuple[int, int, bool, int]:
hat_recv = ad.diversity_technique.combining.mrc(recv, h)
return rest(hat_recv, symbols, slot)
def egc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, _) \
-> t.Tuple[int, int, bool, int]:
hat_recv = ad.diversity_technique.combining.egc(recv)
return rest(hat_recv, symbols, slot)
def crc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, state_id)\
-> t.Tuple[int, int, bool, int]:
if state_id not in selector_dictionary:
selector = ad.diversity_technique.CRCSelection(len(recv))
selector_dictionary[state_id] = selector
else:
selector = selector_dictionary[state_id]
hat_recv, _ = selector.select(recv)
err, n, crc_fail, pbes = rest(hat_recv, symbols, slot)
selector.report_crc_status(not crc_fail)
return err, n, crc_fail, pbes
def power_and_crc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, state_id)\
-> t.Tuple[int, int, bool, int]:
crc_fails = []
# loop over branches
for r in recv:
_, _, crc_fail, _ = rest(r, symbols, slot)
crc_fails.append(crc_fail)
answer, index = ad.diversity_technique.selection.selection_from_power_and_crc(recv, crc_fails)
return rest(answer, symbols, slot)
def renedif_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, state_id)\
-> t.Tuple[int, int, bool, int]:
if state_id not in selector_dictionary:
selector = ad.diversity_technique.ReneDif()
selector_dictionary[state_id] = selector
else:
selector = selector_dictionary[state_id]
hat_recv, _ = selector.select(recv)
return rest(hat_recv, symbols, slot)
# Her instilles listen af algorithmer der skal køres
algorithms = [selection_recv_h, mrc_recv_h, crc_recv_h, egc_recv_h, renedif_recv_h, power_and_crc_recv_h]
algo_names = ["Selection", "MRC", "CRC", "EGC", "rene", "power_and_crc"]
# algorithms = [renedif_recv_h, crc_recv_h]
# algo_names = ["rene", "crc"]
# Thing with structure [snr_index][branch][algorithm] = [errors, total, payload_errors, slots, pbes]
data = np.zeros((len(snr_values), branches, len(algorithms), 5))
if os.path.isfile(savefile):
with h5py.File(savefile, "r") as f:
data = f["data"][:]
print("Loaded existing data from file")
def make_frame_array():
frame_array = []
for i in range(slots_per_frame):
data = ad.protocols.dect.Full.with_random_payload()
frame_array.append(data)
return frame_array
run = 0
start = time.time()
while len(snr_todo) > 0:
frame = make_frame_array()
for slot in frame:
symbols = encoder.encode_msb(slot.to_bytes())
signal = gfsk.modulate(symbols)
for i, snr_index in enumerate(snr_todo):
ch = snr_channels[snr_index]
recv, h = ch.run(signal)
done = True
for branch in range(branches):
for ai, algorithm in enumerate(algorithms):
state_id = f"{snr_index}.{branch}.{ai}"
errors, total, _, _, _ = data[snr_index][branch][ai]
prob = errors / total
# print(f"snr_index: {snr_index}, branch: {branch}, snr: {snr_values[snr_index]}, total: {total}, prob: {prob}")
if total > bit_goal or errors > stop_at_errors:
continue
done = False
err, n, crc_fault, pbes = algorithm(recv[:branch+1], h[:branch+1], symbols, slot, state_id)
data[snr_index][branch][ai][0] += err
data[snr_index][branch][ai][1] += n
data[snr_index][branch][ai][2] += int(crc_fault)
data[snr_index][branch][ai][3] += 1
data[snr_index][branch][ai][4] += pbes
ch.frame_sent()
if done:
del snr_todo[i]
run += 1
if run % 10 == 0:
end = time.time()
duration = (end - start) / 10
print(f"Run: {run}, time: {duration}s, last_snr_goal: {total}/{bit_goal}, snr_todo: ({len(snr_todo)}) {snr_values[snr_todo]}")
start = end
print(data)
with h5py.File("diversity_mega.h5", "w") as f:
f.create_dataset("data", data=data)
for i, algo_name in enumerate(algo_names):
# Draw BER over SNR plots
plt.figure()
for branch in range(branches):
probs = np.empty(len(snr_values))
for snr_i, _ in enumerate(snr_values):
errors, total, _, _, _ = data[snr_i][branch][i]
probs[snr_i] = errors / total
plt.title(algo_name)
plt.plot(snr_values, probs, label=f"N = {branch+1}")
plt.xlabel('SNR [dB]')
plt.ylabel('Bit Error Rate')
plt.yscale("log")
plt.legend()
plt.grid(True)
plt.savefig(f"{algo_name}_snrber.pdf")
# Draw payload_error graph
plt.figure()
for branch in range(branches):
probs = np.empty(len(snr_values))
for snr_i, _ in enumerate(snr_values):
_, _, payload_fail, slots, _ = data[snr_i][branch][i]
probs[snr_i] = payload_fail / slots
plt.plot(snr_values, probs, label=f"N = {branch+1}")
plt.xlabel("SNR [dB]")
plt.ylabel("Ratio of packets CRC errors")
plt.legend()
plt.grid(True)
plt.savefig(f"{algo_name}_payload_error.pdf")
# Draw pbes graph
plt.figure()
for branch in range(branches):
probs = np.empty(len(snr_values))
for snr_i, _ in enumerate(snr_values):
_, _, _, slots, pbes = data[snr_i][branch][i]
probs[snr_i] = pbes / slots
plt.plot(snr_values, probs, label=f"N = {branch+1}")
plt.xlabel("SNR [dB]")
plt.ylabel("Payload Bit Error Score")
plt.legend()
plt.grid(True)
plt.savefig(f"{algo_name}_payload_bit_error_score.pdf")
|
nilq/baby-python
|
python
|
import uuid
import factory.fuzzy
from dataworkspace.apps.request_access import models
from dataworkspace.tests.factories import UserFactory
class AccessRequestFactory(factory.django.DjangoModelFactory):
requester = factory.SubFactory(UserFactory)
contact_email = factory.LazyAttribute(lambda _: f"test.user+{uuid.uuid4()}@example.com")
reason_for_access = factory.fuzzy.FuzzyText()
class Meta:
model = models.AccessRequest
|
nilq/baby-python
|
python
|
# Joey Alexander
# Built by Gautam Mittal (2017)
# Real-time chord detection and improvisation software that uses Fast Fourier Transforms, DSP, and machine learning
import sys
sys.path.append('util')
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from music21 import *
import os, threading, subprocess, numpy as np, atexit, pyaudio, matplotlib.pyplot as plt, chords, peakutils, player
# Set up chord detection variables
global CURRENT_CHORD, CURRENT_SCALE, detection_started
chordFinder = chords.ChordDetector()
chordQualities = chords.qualities
chordRoots = chords.noteNames
# Set up synthesized instrument
instrument = player.Player()
instrument.setBPM(240)
def run():
global CURRENT_SCALE
while True:
instrument.play(CURRENT_SCALE["scale"])
# Given chord symbol return list of 1, 3, 5, 7 scale degrees ("chord tones")
def chordTones(chordSymbol):
return eval(os.popen('./util/chordScale "'+chordSymbol+'"').read())
# Given a chord, find an appropriate scale to use for improvisation
def improvisationScale(chord, symbol):
# Decide on scale type based on common chord-scale conventions
scaleType = scale.DorianScale()
if chord.quality == 1:
scaleType = scale.MajorScale()
elif chord.quality == 3:
scaleType = scale.MixolydianScale()
tones = map(lambda x: x.replace('b', '-'), chordTones(symbol))
scales = scaleType.derive(tones) # Find the scale based on the given tones
allPitches = scales.getPitches() # Get the assosciated scale degrees
allNoteNames = [i.name for i in allPitches] # Turn them into real note names
return {'name': scales.name, 'scale': allNoteNames}
# Record audio in real-time for chord detection
class MicrophoneRecorder(object):
def __init__(self, rate=2000, chunksize=2**12):
self.rate = rate
self.chunksize = chunksize
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=1,
rate=self.rate,
input=True,
frames_per_buffer=self.chunksize,
stream_callback=self.new_frame)
self.lock = threading.Lock()
self.stop = False
self.frames = []
atexit.register(self.close)
def new_frame(self, data, frame_count, time_info, status):
data = np.fromstring(data, 'int16')
with self.lock:
self.frames.append(data)
if self.stop:
return None, pyaudio.paComplete
return None, pyaudio.paContinue
def get_frames(self):
with self.lock:
frames = self.frames
self.frames = []
return frames
def start(self):
self.stream.start_stream()
def close(self):
with self.lock:
self.stop = True
self.stream.close()
self.p.terminate()
class MplFigure(object):
def __init__(self, parent):
self.figure = plt.figure(facecolor='white')
self.canvas = FigureCanvas(self.figure)
class LiveFFTWidget(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.initUI()
self.initData()
self.initMplWidget()
def initUI(self):
vbox = QtGui.QVBoxLayout()
self.main_figure = MplFigure(self)
vbox.addWidget(self.main_figure.canvas)
self.setLayout(vbox)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Joey Alexander')
self.show()
timer = QtCore.QTimer()
timer.timeout.connect(self.handleNewData)
timer.start(50)
self.timer = timer
def initData(self):
mic = MicrophoneRecorder()
mic.start()
self.mic = mic
self.freq_vect = np.fft.rfftfreq(mic.chunksize,
1./mic.rate)
self.time_vect = np.arange(mic.chunksize, dtype=np.float32) / mic.rate * 1000
def initMplWidget(self):
self.ax_top = self.main_figure.figure.add_subplot(211)
self.ax_top.set_ylim(-32768, 32768)
self.ax_top.set_xlim(0, self.time_vect.max())
self.ax_top.set_xlabel(u'time (ms)', fontsize=6)
self.ax_bottom = self.main_figure.figure.add_subplot(212)
self.ax_bottom.set_ylim(0, 1)
self.ax_bottom.set_xlim(0, self.freq_vect.max())
self.ax_bottom.set_xlabel(u'frequency (Hz)', fontsize=6)
self.line_top, = self.ax_top.plot(self.time_vect,
np.ones_like(self.time_vect))
self.line_bottom, = self.ax_bottom.plot(self.freq_vect,
np.ones_like(self.freq_vect))
# handles the asynchroneously collected sound chunks
def handleNewData(self):
global detection_started, CURRENT_SCALE, CURRENT_CHORD
frames = self.mic.get_frames()
if len(frames) > 0:
current_frame = frames[-1]
# get 12x1 chroma vector with respective energies for each note
chroma = chords.calculateChromagram(self.freq_vect, np.abs(np.fft.rfft(current_frame)))
chordFinder.detectChord(chroma)
chordString = ""
if chordFinder.intervals > 0:
chordString = str(chordRoots[chordFinder.rootNote]) + str(chordQualities[chordFinder.quality]) + str(chordFinder.intervals)
else:
chordString = str(chordRoots[chordFinder.rootNote]) + str(chordQualities[chordFinder.quality])
CURRENT_SCALE = improvisationScale(chordFinder, chordString)
CURRENT_CHORD = {
'chord': chordString,
'root': chordRoots[chordFinder.rootNote],
'quality': chordQualities[chordFinder.quality],
'interval': chordFinder.intervals
}
print CURRENT_CHORD
if detection_started == False:
detection_started = True
t = threading.Thread(target=run).start()
# plots the time signal
self.line_top.set_data(self.time_vect, current_frame)
fft_frame = np.fft.rfft(current_frame)
fft_frame /= np.abs(fft_frame).max()
self.line_bottom.set_data(self.freq_vect, np.abs(fft_frame))
self.main_figure.canvas.draw()
if __name__ == "__main__":
detection_started = False
app = QtGui.QApplication(sys.argv)
window = LiveFFTWidget()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
# coding=utf-8
#Author: Chion82<sdspeedonion@gmail.com>
import requests
import urllib
import re
import sys, os
import HTMLParser
import json
from urlparse import urlparse, parse_qs
reload(sys)
sys.setdefaultencoding('utf8')
class PixivHackLib(object):
def __init__(self):
self.__session_id = ''
self.__session = requests.Session()
self.__session.headers.update({'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.125 Safari/537.36'})
self.__keyword = 'kancolle'
self.__min_ratings = 0
self.__max_pics = 10
self.__pic_downloaded_count = 0
self.__download_manga = True
self.__download_big_images = True
self.__author_ratings = []
if not os.path.exists('pixivimages'):
os.makedirs('pixivimages')
@property
def session_id(self):
return self.__session_id
@session_id.setter
def session_id(self, id_str):
self.__session_id = id_str
def config(self, keyword, min_ratings, max_pics, download_manga, download_big_images):
self.__keyword = keyword
self.__min_ratings = min_ratings
self.__max_pics = max_pics
self.__download_manga = download_manga
self.__download_big_images = download_big_images
def crawl(self):
self.__pic_downloaded_count = 0
self.__author_ratings = []
page = 1
while self.__pic_downloaded_count < self.__max_pics :
try:
search_result = self.__get_search_result(page, None)
if (len(search_result)==0 or page>1000):
print('No more result found. ')
break
for link in search_result:
if (self.__pic_downloaded_count >= self.__max_pics):
break
self.__enter_illustration_page(link, 'pixivimages')
page = page + 1
print('************************Moving to next page************************')
except Exception:
print('Crawl error. Skipping page...')
page = page + 1
continue
print('All Done! Saving author info...')
self.__save_author_ratings()
def crawl_by_author(self, author_list, max_pics_per_author):
for author_id in author_list:
print('***********************Crawling by author*************************')
print('author Pixiv ID : ' + author_id)
self.__pic_downloaded_count = 0
page = 1
if not os.path.exists('pixivimages/' + author_id):
os.makedirs('pixivimages/' + author_id)
while self.__pic_downloaded_count < max_pics_per_author:
try:
search_result = self.__get_search_result(page, author_id)
if (len(search_result) == 0):
print('No more result found.')
break
for link in search_result:
if (self.__pic_downloaded_count >= max_pics_per_author):
break
self.__enter_illustration_page(link, 'pixivimages/' + author_id)
page = page + 1
print('************************Moving to next page***************************')
except Exception:
print('Crawl error. Skipping page...')
page = page + 1
continue
print('***********************Moving to next author**************************')
print('All Done!')
def __get_search_result(self, page, author_id):
try:
if (author_id == None):
search_result = self.__session.get('http://www.pixiv.net/search.php?word=' + urllib.quote(self.__keyword) + '&p=' + str(page), cookies={'PHPSESSID': self.__session_id})
else:
search_result = self.__session.get('http://www.pixiv.net/member_illust.php?id=' + author_id + '&type=all&p=' + str(page), cookies={'PHPSESSID': self.__session_id})
except Exception:
print('Connection failure. Retrying...')
return self.__get_search_result(page, author_id)
result_list = re.findall(r'<a href="(/member_illust\.php\?mode=.*?&illust_id=.*?)">', search_result.text)
return ['http://www.pixiv.net'+self.__html_decode(link) for link in result_list if (not '"' in link)]
def __enter_illustration_page(self, url, directory):
print('********************Entering illustration page*********************')
print('Entering ' + url)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id})
except Exception:
print('Connection failure. Retrying...')
self.__enter_illustration_page(url, directory)
return
re_result_ratings = re.findall(r'<dd class="rated-count">(.*?)</dd>', page_result.text)
ratings = re_result_ratings[0]
pixiv_id = parse_qs(urlparse(url).query)['illust_id'][0]
re_result_author_id = re.findall(r'<a href="/member\.php\?id=(.*?)" class="user-link">', page_result.text)
pixiv_author_id = re_result_author_id[0]
print('pixiv_id=' + pixiv_id)
print('ratings='+ratings)
print('author_id='+pixiv_author_id)
if (int(ratings) < self.__min_ratings):
print('Ratings < ' + str(self.__min_ratings) + ' , Skipping...')
return
self.__increment_author_ratings(pixiv_author_id, int(ratings), pixiv_id)
re_manga_result = re.findall(r'<a href="(member_illust\.php\?mode=manga&illust_id=.*?)"', page_result.text)
re_image_result = re.findall(r'data-src="(.*?)" class="original-image"', page_result.text)
re_big_image_result = re.findall(r'<a href="(member_illust\.php\?mode=big&illust_id=.*?)"', page_result.text)
if (len(re_manga_result) > 0):
if (self.__download_manga == False):
print('Illustration is manga. Skipping...')
return
print('Illustration is manga. Entering manga page.')
self.__enter_manga_page('http://www.pixiv.net/' + self.__html_decode(re_manga_result[0]), pixiv_id, url, directory)
self.__pic_downloaded_count = self.__pic_downloaded_count + 1
elif (len(re_image_result) > 0):
print('Illustration is image. Downloading image...')
self.__pic_downloaded_count = self.__pic_downloaded_count + 1
self.__download_image(self.__html_decode(re_image_result[0]), url, directory)
print('Download completed.')
elif (len(re_big_image_result) > 0):
if (self.__download_big_images == False):
print('Illustration is big-image. Skipping...')
return
print('Illustration mode is big-image. Entering big-image page.')
self.__enter_big_image_page('http://www.pixiv.net/' + self.__html_decode(re_big_image_result[0]), url, directory)
self.__pic_downloaded_count = self.__pic_downloaded_count + 1
else:
print('Illustration mode not supported. Skipping...')
def __enter_big_image_page(self, url, referer, directory):
print('********************Entering big-image page************************')
print('Entering ' + url)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__enter_big_image_page(url, referer, directory)
return
re_big_image_url = re.findall(r'<img src="(.*?)"', page_result.text)
print('Downloading big-image.')
self.__download_image(self.__html_decode(re_big_image_url[0]), url, directory)
print('Download completed.')
def __enter_manga_page(self, url, pixiv_id, referer,directory):
print('********************Entering manga page**************************')
print('Entering ' + url)
if not os.path.exists(directory + '/' + pixiv_id):
os.makedirs(directory + '/' + pixiv_id)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__enter_manga_page(url, pixiv_id, referer,directory)
return
re_manga_page_result = re.findall(r'<a href="(/member_illust\.php\?mode=manga_big.*?)"', page_result.text)
for link in re_manga_page_result:
self.__enter_manga_big_page('http://www.pixiv.net' + self.__html_decode(link), url, directory + '/' + pixiv_id)
def __enter_manga_big_page(self, url, referer, directory):
print('********************Entering manga-big page***************************')
print('Entering ' + url)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__enter_manga_big_page(url, referer, directory)
return
re_image_result = re.findall(r'<img src="(.*?)"', page_result.text)
print('Downloading manga-big image...')
self.__download_image(self.__html_decode(re_image_result[0]), url, directory)
print('Download completed.')
def __increment_author_ratings(self, author_id, increment, pixiv_id):
for author in self.__author_ratings:
if (author['author_id'] == author_id):
if (pixiv_id in author['illust_id']):
return
author['total_ratings'] = author['total_ratings'] + increment
author['illust_id'].append(pixiv_id)
return
self.__author_ratings.append({'author_id':author_id, 'total_ratings':increment, 'illust_id':[pixiv_id]})
def __save_author_ratings(self):
self.__author_ratings = sorted(self.__author_ratings, key=lambda author:author['total_ratings'], reverse=True)
f = open('author_info.json','w+')
f.write(json.dumps(self.__author_ratings))
f.close()
def __html_decode(self, string):
h = HTMLParser.HTMLParser()
return h.unescape(string)
def __download_image(self, url, referer, directory):
try:
download_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__download_image(url, referer, directory)
return
if (download_result.status_code != 200):
print('Download Error')
print(download_result.text)
return
url_parsed_array = url.split('/')
file_name = url_parsed_array[len(url_parsed_array)-1]
with open(directory + '/' + file_name, 'wb+') as f:
for chunk in download_result.iter_content():
f.write(chunk)
f.close()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.2 on 2020-01-20 10:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200117_1430'),
]
operations = [
migrations.AlterField(
model_name='imagefile',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_files', to='api.Image'),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# file_modified.py
# takes input file or string and returns file modified date
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os.path, sys
parent_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(parent_dir)
from util.parse_inputs import parse_inputs
import os.path
import time
# -----------------------------------------------------------------------------
# Variables
# -----------------------------------------------------------------------------
time_format = "%a, %d %b %Y %H:%M:%S"
# -----------------------------------------------------------------------------
# Input should be a list of files or directories
# -----------------------------------------------------------------------------
def file_modified(input_value):
for i in input_value:
if os.path.exists(i):
unix_time = os.path.getmtime(i)
formatted_time = time.strftime(time_format, time.localtime(unix_time))
print(str(i) + '\t' + formatted_time)
else:
print('Unable to find ' + str(i))
if __name__ == "__main__":
input_value = parse_inputs(strip_newline_stdin=True)
if input_value:
file_modified(input_value)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Normalizes ini files."""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R1702
# pylint: disable=R0912
import re
import sys
from collections import defaultdict
class Processor:
"""Process and normalizes an ini file."""
def __init__(self):
self.r: dict[str, dict[str, str]] = defaultdict(dict)
self.heading = re.compile(r"\[(\w+)\]")
self.entry = re.compile(r"(\w+)=(.*)")
self.cur = None
def line(self, line: str):
"""Process a line of an ini file to be normalized."""
if m := self.heading.match(line):
self.cur = m[1]
if m := self.entry.match(line):
if not self.cur:
raise ValueError("Missing section header")
self.r[self.cur][m[1]] = m[2]
def out(self) -> str:
"""Generates normalized ini file."""
sections = []
hdrs = list(self.r.keys())
hdrs.sort()
for hdr in hdrs:
rc = self.r[hdr]
sec = [f"[{hdr}]\n"]
ks = list(rc.keys())
ks.sort()
for k in ks:
sec.append(f"{k}={rc[k]}\n")
sections.append("".join(sec))
return "\n".join(sections)
def main():
"""Main function."""
rep = Processor()
for fname in sys.argv[1:]:
with open(fname, encoding="utf8") as fd:
for line in fd:
rep.line(line)
print(rep.out(), end="")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import poplib
from email.parser import Parser
email = 'liang_renhong@163.com'
password = 'lrh0000'
pop3_server = 'pop.163.com'
server = poplib.POP3(pop3_server)
print(server.getwelcome().decode('utf8'))
server.user(email)
server.pass_(password)
print('Message: %s. Size: %s' % (server.stat()))
resp, mails, octets = server.list()
# print(mails)
index = len(mails)
resp, lines, octets = server.retr(index)
msg_content = b'\r\n'.join(lines).decode('utf-8')
msg = Parser().parsestr(msg_content)
print(msg)
server.quit()
|
nilq/baby-python
|
python
|
def test_dictionary():
"""Dictionary"""
fruits_dictionary = {
'cherry': 'red',
'apple': 'green',
'banana': 'yellow',
}
assert isinstance(fruits_dictionary, dict)
assert fruits_dictionary['apple'] == 'green'
assert fruits_dictionary['banana'] == 'yellow'
assert fruits_dictionary['cherry'] == 'red'
assert 'apple' in fruits_dictionary
assert 'pineapple' not in fruits_dictionary
# Modify
fruits_dictionary['apple'] = 'red'
# Add
fruits_dictionary['pineapple'] = 'yellow'
assert fruits_dictionary['pineapple'] == "yellow"
assert list(fruits_dictionary) == ['cherry', 'apple', 'banana', 'pineapple']
assert sorted(fruits_dictionary) == [
'apple', 'banana', 'cherry', 'pineapple'
]
del fruits_dictionary['pineapple']
assert list(fruits_dictionary) == ['cherry', 'apple', 'banana']
dictionary_via_constructor = dict([('sape', 4139), ('guido', 4127),
('jack', 4098)])
assert dictionary_via_constructor['sape'] == 4139
assert dictionary_via_constructor['guido'] == 4127
assert dictionary_via_constructor['jack'] == 4098
dictionary_via_expression = {x: x**2 for x in (2, 4, 6)}
assert dictionary_via_expression[2] == 4
assert dictionary_via_expression[4] == 16
assert dictionary_via_expression[6] == 36
dictionary_for_string_keys = dict(sape=4139, guido=4127, jack=4098)
assert dictionary_for_string_keys['sape'] == 4139
assert dictionary_for_string_keys['guido'] == 4127
assert dictionary_for_string_keys['jack'] == 4098
|
nilq/baby-python
|
python
|
import os, time, logging, configparser, psutil
# Setting
logging.basicConfig(filename='log/app.log', filemode='w',format='[%(levelname)s][%(name)s][%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info('Module Loaded')
config = configparser.ConfigParser()
config.read("settings.ini")
filesystem = config['Filesystem']
def _get_name(dirpath):
if str.find(dirpath,'\\') != -1:
if str.find(dirpath,'\\') < len(dirpath)-1:
return dirpath[str.rindex(dirpath,'\\')+1:]
else:
return dirpath
else:
return ''
def _get_parent_path(dirpath):
if str.find(dirpath,'\\') != -1:
if str.find(dirpath,'\\') < len(dirpath)-1:
return dirpath[:str.rindex(dirpath,'\\')]
else:
return dirpath
else:
return ''
def _get_format(filename):
if str.find(filename,'.') != -1:
return filename[str.rindex(filename,'.'):]
else:
return ''
def _get_level(dirpath):
path_list = dirpath.split('\\')
if path_list[1] == '':
level = 1
else:
level = len(path_list)
return level - 1
def _get_measure_index(measure = filesystem['measure']):
if measure in ('b', 'bytes'):
measure_index = 1
elif measure in ('kb', 'KB'):
measure_index = 1000
elif measure.lower() in ('mb', 'MB', 'mb'):
measure_index = 1000000
else:
measure_index = 1
return measure_index
def _get_file_size(path, file, measure = filesystem['measure']):
try:
measure_index = _get_measure_index(measure)
filepath = os.path.join(path, file)
return os.path.getsize(filepath) / measure_index
except Exception as err:
logging.error(f'[Path]: {path} [File]: {file} issue ' + str(err))
def _get_time(path, time_type = 'c'):
try:
if time_type == 'c':
return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime(os.path.getctime(path)))
if time_type == 'm':
return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime(os.path.getmtime(path)))
except Exception as err:
logging.error(f'[File]: {path} issue ' + str(err))
def get_folder_info(root_disk, dirpath, dirnames, filenames):
file = {}
file['root'] = root_disk
file['name'] = _get_name(dirpath)
file['path'] = dirpath
file['parent'] = _get_parent_path(dirpath)
file['file_type'] = 'folder'
file['format'] = 'folder'
file['level'] = _get_level(dirpath) - 1
file['dirs_count'] = len(dirnames)
file['files_count'] = len(filenames)
file['size'] = 0
file['measure'] = filesystem['measure']
file['created_at'] = _get_time(dirpath, 'c')
file['updated_at'] = _get_time(dirpath, 'm')
return file
def get_file_info(root_disk, dirpath, filename):
file = {}
file['root'] = root_disk
file['name'] = filename
file['path'] = os.path.join(dirpath, filename)
file['parent'] = dirpath
file['file_type'] = 'file'
file['format'] = _get_format(filename)
file['level'] = _get_level(dirpath) - 1
file['dirs_count'] = 0
file['files_count'] = 0
file['size'] = _get_file_size(dirpath, filename)
file['measure'] = filesystem['measure']
file['created_at'] = _get_time(file['path'], 'c')
file['updated_at'] = _get_time(file['path'], 'm')
return file
def get_total_space(root_disk = 'C:\\', measure = filesystem['measure']):
measure_index = _get_measure_index(measure)
total_info = {}
total_info['root'] = root_disk
total_info['total'] = psutil.disk_usage(root_disk).total / measure_index
total_info['used'] = psutil.disk_usage(root_disk).used / measure_index
total_info['free'] = psutil.disk_usage(root_disk).free / measure_index
return total_info
|
nilq/baby-python
|
python
|
# Copyright 2016 AC Technologies LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import re
import numpy as np
import six
import sys
from tensor2tensor.data_generators.problem import problem_hparams_to_features
import tensorflow as tf
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.framework import graph_util
from tensorflow.python.util import compat
# Dependency imports
from tensor2tensor import models # pylint: disable=unused-import
from g2p_seq2seq import g2p_problem
from g2p_seq2seq import g2p_trainer_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
from tensor2tensor.data_generators import text_encoder
from six.moves import input
from six import text_type
EOS = text_encoder.EOS
class G2PModel(object):
"""Grapheme-to-Phoneme translation model class.
"""
def __init__(self, params, train_path="", dev_path="", test_path="",
cleanup=False, p2g_mode=False):
# Point out the current directory with t2t problem specified for g2p task.
usr_dir.import_usr_dir(os.path.dirname(os.path.abspath(__file__)))
self.params = params
self.test_path = test_path
if not os.path.exists(self.params.model_dir):
os.makedirs(self.params.model_dir)
# Register g2p problem.
self.problem = registry._PROBLEMS[self.params.problem_name](
self.params.model_dir, train_path=train_path, dev_path=dev_path,
test_path=test_path, cleanup=cleanup, p2g_mode=p2g_mode)
self.frozen_graph_filename = os.path.join(self.params.model_dir,
"frozen_model.pb")
self.inputs, self.features, self.input_fn = None, None, None
self.mon_sess, self.estimator_spec, self.g2p_gt_map = None, None, None
self.first_ex = False
if train_path:
self.train_preprocess_file_path, self.dev_preprocess_file_path =\
None, None
self.estimator, self.decode_hp, self.hparams =\
self.__prepare_model(train_mode=True)
self.train_preprocess_file_path, self.dev_preprocess_file_path =\
self.problem.generate_preprocess_data()
elif os.path.exists(self.frozen_graph_filename):
self.estimator, self.decode_hp, self.hparams =\
self.__prepare_model()
self.__load_graph()
self.checkpoint_path = tf.train.latest_checkpoint(self.params.model_dir)
else:
self.estimator, self.decode_hp, self.hparams =\
self.__prepare_model()
def __prepare_model(self, train_mode=False):
"""Prepare utilities for decoding."""
hparams = registry.hparams(self.params.hparams_set)
hparams.problem = self.problem
hparams.problem_hparams = self.problem.get_hparams(hparams)
if self.params.hparams:
tf.logging.info("Overriding hparams in %s with %s",
self.params.hparams_set,
self.params.hparams)
hparams = hparams.parse(self.params.hparams)
trainer_run_config = g2p_trainer_utils.create_run_config(hparams,
self.params)
if train_mode:
exp_fn = g2p_trainer_utils.create_experiment_fn(self.params, self.problem)
self.exp = exp_fn(trainer_run_config, hparams)
decode_hp = decoding.decode_hparams(self.params.decode_hparams)
estimator = trainer_lib.create_estimator(
self.params.model_name,
hparams,
trainer_run_config,
decode_hparams=decode_hp,
use_tpu=False)
return estimator, decode_hp, hparams
def __prepare_interactive_model(self):
"""Create monitored session and generator that reads from the terminal and
yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
"""
def input_fn():
"""Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`)."""
gen_fn = decoding.make_input_fn_from_generator(
self.__interactive_input_fn())
example = gen_fn()
example = decoding._interactive_input_tensor_to_features_dict(
example, self.hparams)
return example
self.res_iter = self.estimator.predict(input_fn)
if os.path.exists(self.frozen_graph_filename):
return
# List of `SessionRunHook` subclass instances. Used for callbacks inside
# the prediction call.
hooks = estimator_lib._check_hooks_type(None)
# Check that model has been trained.
# Path of a specific checkpoint to predict. The latest checkpoint
# in `model_dir` is used
checkpoint_path = estimator_lib.saver.latest_checkpoint(
self.params.model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'
.format(self.params.model_dir))
with estimator_lib.ops.Graph().as_default() as graph:
estimator_lib.random_seed.set_random_seed(
self.estimator._config.tf_random_seed)
self.estimator._create_and_assert_global_step(graph)
self.features, input_hooks = self.estimator._get_features_from_input_fn(
input_fn, estimator_lib.model_fn_lib.ModeKeys.PREDICT)
self.estimator_spec = self.estimator._call_model_fn(
self.features, None, estimator_lib.model_fn_lib.ModeKeys.PREDICT,
self.estimator.config)
try:
self.mon_sess = estimator_lib.training.MonitoredSession(
session_creator=estimator_lib.training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=self.estimator_spec.scaffold,
config=self.estimator._session_config),
hooks=hooks)
except:
# raise StandardError("Invalid model in {}".format(self.params.model_dir))
raise ValueError("Invalid model in {}".format(self.params.model_dir))
def decode_word(self, word):
"""Decode word.
Args:
word: word for decoding.
Returns:
pronunciation: a decoded phonemes sequence for input word.
"""
num_samples = 1
decode_length = 100
vocabulary = self.problem.source_vocab
# This should be longer than the longest input.
const_array_size = 10000
input_ids = vocabulary.encode(word)
input_ids.append(text_encoder.EOS_ID)
self.inputs = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(self.inputs) < const_array_size
self.inputs += [0] * (const_array_size - len(self.inputs))
result = next(self.res_iter)
pronunciations = []
if self.decode_hp.return_beams:
beams = np.split(result["outputs"], self.decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = self.problem.target_vocab.decode(
decoding._save_until_eos(beam, is_image=False))
pronunciations.append(beam_string)
tf.logging.info(beam_string)
else:
if self.decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
res = result["outputs"].flatten()
if text_encoder.EOS_ID in res:
index = list(res).index(text_encoder.EOS_ID)
res = res[0:index]
pronunciations.append(self.problem.target_vocab.decode(res))
return pronunciations
def __interactive_input_fn(self):
num_samples = self.decode_hp.num_samples if self.decode_hp.num_samples > 0\
else 1
decode_length = self.decode_hp.extra_length
input_type = "text"
p_hparams = self.hparams.problem_hparams
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
features = {
"inputs": np.array(self.inputs).astype(np.int32),
}
for k, v in six.iteritems(problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
def __run_op(self, sess, decode_op, feed_input):
"""Run tensorflow operation for decoding."""
results = sess.run(decode_op,
feed_dict={"inp_decode:0" : [feed_input]})
return results
def train(self):
"""Run training."""
print('Training started.')
execute_schedule(self.exp, self.params)
def interactive(self):
"""Interactive decoding."""
self.inputs = []
self.__prepare_interactive_model()
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
saver = tf.train.import_meta_graph(self.checkpoint_path + ".meta",
import_scope=None,
clear_devices=True)
saver.restore(sess, self.checkpoint_path)
inp = tf.placeholder(tf.string, name="inp_decode")[0]
decode_op = tf.py_func(self.decode_word, [inp], tf.string)
while True:
word = get_word()
pronunciations = self.__run_op(sess, decode_op, word)
print (" ".join(pronunciations))
else:
while not self.mon_sess.should_stop():
word = get_word()
pronunciations = self.decode_word(word)
print(" ".join(pronunciations))
# To make sure the output buffer always flush at this level
sys.stdout.flush()
def decode(self, output_file_path):
"""Run decoding mode."""
outfile = None
# Output results to a file if given.
if output_file_path:
tf.logging.info("Writing decodes into %s" % output_file_path)
outfile = tf.gfile.Open(output_file_path, "w")
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name="inp_decode")[0]
decode_op = tf.py_func(self.__decode_from_file, [inp],
[tf.string, tf.string])
[inputs, decodes] = self.__run_op(sess, decode_op, self.test_path)
else:
inputs, decodes = self.__decode_from_file(self.test_path)
# Output decoding results
for _input, _decode in zip(inputs, decodes):
_input = compat.as_text(_input)
_decode = compat.as_text(_decode)
if output_file_path:
outfile.write("{} {}\n".format(_input, _decode))
else:
print("Raw prediction: {} {}".format(_input, _decode))
def evaluate(self):
"""Run evaluation mode."""
words, pronunciations = [], []
for case in self.problem.generator(self.test_path,
self.problem.source_vocab,
self.problem.target_vocab):
word = self.problem.source_vocab.decode(case["inputs"]).replace(
EOS, "").strip()
pronunciation = self.problem.target_vocab.decode(case["targets"]).replace(
EOS, "").strip()
words.append(word)
pronunciations.append(pronunciation)
self.g2p_gt_map = create_g2p_gt_map(words, pronunciations)
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name="inp_decode")[0]
decode_op = tf.py_func(self.calc_errors, [inp],
[tf.int64, tf.int64, tf.int64, tf.int64])
results = self.__run_op(sess, decode_op, self.test_path)
else:
results = self.calc_errors(self.test_path)
word_correct, word_errors, phone_errors, total_ref_phones = results
wer = 100.0 * word_errors / (word_correct + word_errors)
per = 100.0 * phone_errors / total_ref_phones
print("="*80)
print("Total: {} words, {} phones".\
format(word_correct + word_errors, total_ref_phones))
print("Word errors: {} ({:.2f}%)".format(word_errors, wer))
print("Phone errors: {} ({:.2f}%)".format(phone_errors, per))
print("Total word errors: {}".format(word_errors))
print("Total phone errors: {}".format(phone_errors))
print("="*80)
def freeze(self):
"""Freeze pre-trained model."""
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(self.params.model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/frozen_model.pb"
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what
# part it can dump
# NOTE: this variable is plural, because you can have multiple output nodes
output_node_names = []
hparams = self.params.hparams.split(",")
num_layers = [int(hp.split("=")[1]) for hp in hparams
if hp.startswith("num_hidden_layers")][0]
root_dir = "transformer/parallel_0_4/transformer/transformer/body"
for i in range(num_layers):
output_node_names.append("{}/encoder/layer_{}/self_attention/".format(root_dir, i) +\
"multihead_attention/dot_product_attention/attention_weights")
for i in range(num_layers):
output_node_names.append("{}/decoder/layer_{}/self_attention/".format(root_dir, i) +\
"multihead_attention/dot_product_attention/attention_weights")
output_node_names.append("{}/decoder/layer_{}/encdec_attention/".format(root_dir, i) +\
"multihead_attention/dot_product_attention/attention_weights")
# We clear devices to allow TensorFlow to control on which device it will
# load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta',
clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names, # The output node names are used to select the
#usefull nodes
variable_names_blacklist=['global_step'])
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as output_graph_file:
output_graph_file.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def __load_graph(self):
"""Load freezed graph."""
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(self.frozen_graph_filename, "rb") as frozen_graph_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(frozen_graph_file.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as self.graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="import")
def __decode_from_file(self, filename):
"""Compute predictions on entries in filename and write them out."""
if not self.decode_hp.batch_size:
self.decode_hp.batch_size = 32
tf.logging.info("decode_hp.batch_size not specified; default=%d" %
self.decode_hp.batch_size)
p_hparams = self.hparams.problem_hparams
inputs_vocab = p_hparams.vocabulary["inputs"]
targets_vocab = p_hparams.vocabulary["targets"]
problem_name = "grapheme_to_phoneme_problem"
tf.logging.info("Performing decoding from a file.")
inputs = _get_inputs(filename)
num_decode_batches = (len(inputs) - 1) // self.decode_hp.batch_size + 1
def input_fn():
"""Function for inputs generator."""
input_gen = _decode_batch_input_fn(
num_decode_batches, inputs, inputs_vocab,
self.decode_hp.batch_size, self.decode_hp.max_input_size)
gen_fn = decoding.make_input_fn_from_generator(input_gen)
example = gen_fn()
return decoding._decode_input_tensor_to_features_dict(example,
self.hparams)
decodes = []
result_iter = self.estimator.predict(input_fn)
try:
for result in result_iter:
if self.decode_hp.return_beams:
decoded_inputs = inputs_vocab.decode(
decoding._save_until_eos(result["inputs"], False))
beam_decodes = []
output_beams = np.split(result["outputs"], self.decode_hp.beam_size,
axis=0)
for k, beam in enumerate(output_beams):
decoded_outputs = targets_vocab.decode(
decoding._save_until_eos(beam, False))
beam_decodes.append(decoded_outputs)
decodes.append(beam_decodes)
else:
decoded_inputs = inputs_vocab.decode(
decoding._save_until_eos(result["inputs"], False))
decoded_outputs = targets_vocab.decode(
decoding._save_until_eos(result["outputs"], False))
decodes.append(decoded_outputs)
except:
# raise StandardError("Invalid model in {}".format(self.params.model_dir))
raise ValueError("Invalid model in {}".format(self.params.model_dir))
return [inputs, decodes]
def calc_errors(self, decode_file_path):
"""Calculate a number of word and phone prediction errors."""
inputs, decodes = self.__decode_from_file(decode_file_path)
word_correct, word_errors, phone_errors = 0, 0, 0
total_ref_phones = 0
word_set = set()
for index, word in enumerate(inputs):
if word in word_set:
continue
word_set.add(word)
# Estimate #phones of the word
ref_phone_count = np.mean([len(ref_str.split(" "))
for ref_str in self.g2p_gt_map[word]])
total_ref_phones += int(ref_phone_count)
if self.decode_hp.return_beams:
beam_correct_found = False
for beam_decode in decodes[index]:
if beam_decode in self.g2p_gt_map[word]:
beam_correct_found = True
break
if beam_correct_found:
word_correct += 1
else:
word_errors += 1
# Estimate phone-level errors
phone_error = phone_errors_for_single_word(decodes[index],
self.g2p_gt_map[word])
phone_errors += phone_error
else:
if decodes[index] in self.g2p_gt_map[word]:
word_correct += 1
else:
word_errors += 1
# Estimate phone-level errors
phone_error = phone_errors_for_single_word([decodes[index]],
self.g2p_gt_map[word])
phone_errors += phone_error
return word_correct, word_errors, phone_errors, total_ref_phones
def phone_errors_for_single_word(predicted_strs, ref_strs):
"""
Given decoded results (depending on beam size) and a list of ref
pronunciations, estimate the phone-level edit distance. Return the min
distance.
"""
phone_error_list = []
for ref_str in ref_strs:
for predicted_str in predicted_strs:
d = phone_edit_distance(predicted_str, ref_str)
phone_error_list.append(d)
return min(phone_error_list)
def phone_edit_distance(predicted_str, ref_str):
"""
Estimate the edit distance between predicted and ref phone sequences.
"""
predicted_list = predicted_str.split(" ")
ref_list = ref_str.split(" ")
m, n = len(predicted_list), len(ref_list)
dp = [[0] * (m+1) for _ in range(n+1)]
dp[0][0] = 0
for i in range(1, m+1):
dp[0][i] = i
for i in range(1, n+1):
dp[i][0] = i
for i in range(1, m+1):
for j in range(1, n+1):
if predicted_list[i-1] == ref_list[j-1]:
dp[j][i] = dp[j-1][i-1]
else:
dp[j][i] = min(dp[j-1][i] + 1, dp[j][i-1] + 1, dp[j-1][i-1] + 1)
return dp[n][m]
def get_word():
"""Get next word in the interactive mode."""
word = ""
try:
word = input("> ")
#if not issubclass(type(word), text_type):
# word = text_type(word, encoding="utf-8", errors="replace")
except EOFError:
pass
if not word:
pass
return word
def create_g2p_gt_map(words, pronunciations):
"""Create grapheme-to-phoneme ground true mapping."""
g2p_gt_map = {}
for word, pronunciation in zip(words, pronunciations):
if word in g2p_gt_map:
g2p_gt_map[word].append(pronunciation)
else:
g2p_gt_map[word] = [pronunciation]
return g2p_gt_map
def _get_inputs(filename, delimiters="\t "):
"""Returning inputs.
Args:
filename: path to file with inputs, 1 per line.
delimiters: str, delimits records in the file.
Returns:
a list of inputs
"""
tf.logging.info("Getting inputs")
delimiters_regex = re.compile("[" + delimiters + "]+")
inputs = []
with tf.gfile.Open(filename) as input_file:
lines = input_file.readlines()
for line in lines:
if set("[" + delimiters + "]+$").intersection(line):
items = re.split(delimiters_regex, line.strip(), maxsplit=1)
inputs.append(items[0])
else:
inputs.append(line.strip())
return inputs
def _decode_batch_input_fn(num_decode_batches, inputs,
vocabulary, batch_size, max_input_size):
"""Decode batch"""
for batch_idx in range(num_decode_batches):
tf.logging.info("Decoding batch %d out of %d" % (batch_idx, num_decode_batches))
batch_length = 0
batch_inputs = []
for _inputs in inputs[batch_idx * batch_size:(batch_idx + 1) * batch_size]:
input_ids = vocabulary.encode(_inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
input_ids.append(text_encoder.EOS_ID)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
encoded_input = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(encoded_input)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
"problem_choice": np.array(0).astype(np.int32),
}
def execute_schedule(exp, params):
if not hasattr(exp, params.schedule):
raise ValueError(
"Experiment has no method %s, from --schedule" % params.schedule)
with profile_context(params):
getattr(exp, params.schedule)()
@contextlib.contextmanager
def profile_context(params):
if params.profile:
with tf.contrib.tfprof.ProfileContext("t2tprof",
trace_steps=range(100),
dump_steps=range(100)) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling("op", opts, range(100))
yield
else:
yield
|
nilq/baby-python
|
python
|
import os
from shutil import copy
def prepare_iso_linux(iso_base_dir, rootfs_dir):
# copy isolinux files to the corresponding folder
isolinux_files = ['isolinux.bin', 'isolinux.cfg', 'ldlinux.c32']
for file in isolinux_files:
full_file = '/etc/omni-imager/isolinux/' + file
copy(full_file, iso_base_dir)
# copy linux kernel to the corresponding folder
kernel_dir = rootfs_dir + '/boot/vmlinuz-*'
cmd = ['cp', kernel_dir, iso_base_dir + '/vmlinuz']
os.system(' '.join(cmd))
def make_iso(iso_base, rootfs_dir):
prepare_iso_linux(iso_base, rootfs_dir)
orig_dir = os.getcwd()
os.chdir(iso_base)
cmd = 'mkisofs -R -l -D -o ../openEuler-test.iso -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table ./'
os.system(cmd)
os.chdir(orig_dir)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1 on 2021-03-02 21:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('waterspout_api', '0007_auto_20201215_1526'),
]
operations = [
migrations.AddField(
model_name='calibratedparameter',
name='price_yield_correction_factor',
field=models.DecimalField(decimal_places=3, default=1, max_digits=6),
),
migrations.AddField(
model_name='cropmodification',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='crop_modifications', to='waterspout_api.crop'),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys, utils, random # import the modules we will need
utils.check_version((3,7)) # make sure we are running at least Python 3.7
utils.clear() # clear the screen
print('Greetings!') # print out 'Greeting!'
colors = ['red','orange','yellow','green','blue','violet','purple'] # make a list of color
play_again = '' # make "play again" empty
best_count = sys.maxsize # the biggest number
while (play_again != 'n' and play_again != 'no'): #start a while loop with two conditions
match_color = random.choice(colors) # using random method to select a color randomly
count = 0 # count strat with 0
color = '' # make color empty
while (color != match_color):
color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line
color = color.lower().strip() # It turns all letters on 'color' into the lower case and delete all the spaces.
count += 1 # the 'count' will plus one after finishing a loop
if (color == match_color): # if color equals to match_color, it will execute the following codes
print('Correct!') # when condition is true, it will print out 'Correct!'
else: # if false
print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) # if false, it will print out this line.
print('\nYou guessed it in {0} tries!'.format(count)) # print out this line in the next line with the number of user's tries
if (count < best_count): # if user's tries are less than best_count which is the biggest number
print('This was your best guess so far!') # it will print out this line
best_count = count # let best_count = count
play_again = input("\nWould you like to play again? ").lower().strip() # print this out on the next line and delete all spaces and turn it into lower case.
print('Thanks for playing!') # print out 'Thanks for playing.'
|
nilq/baby-python
|
python
|
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3:
basestring = unicode = str
else:
unicode = unicode
basestring = basestring
if PY3:
from ._py3compat import execfile
else:
execfile = execfile
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
import subprocess
#----------------------------------------------------------------------
## generic pipe-like cleaning functions
def rpl(x, y=''):
def _func(s):
return s.replace(x, y)
return _func
def pipe(*args):
def _func(txt):
return subprocess.run(list(args), input=txt,
text=True, capture_output=True).stdout
return _func
def read_file(path):
with open(path) as f:
txt = f.read()
return txt
def remove_blanklines(txt):
return '\n'.join([l for l in txt.splitlines() if l])
#----------------------------------------------------------------------
## main process pipeline
def main(path):
# text processing pipeline
pipeline = [
pipe('/usr/local/bin/stripcmt'), # strip comments
remove_blanklines,
rpl(';'),
rpl('C74_CONST', 'const'),
rpl('(void)', '()'),
]
# read it
with open(path) as f:
txt = f.read()
# process it
for func in pipeline:
txt = func(txt)
return txt
if __name__ == '__main__':
output = main(sys.argv[1])
print(output) # for convenient redirection
|
nilq/baby-python
|
python
|
import flask
from flask import request, jsonify
from secrets import secrets
from babel_categories import BabelCategories
from babel_hypernyms import BabelHypernyms
from babel_lemmas_of_senses import BabelLemmasOfSenses
from babel_parser import BabelParser
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return "<h1>API para reconocimiento de metáforas</h1><p>Esta es un prototipo de API para reconocimiento de metáforas en castellano.</p>"
def parse_mode(args, key):
if not 'parser' in args: # Default
return BabelParser(secrets['babel_key'])
elif args['parser'] == 'babel_parser':
return BabelParser(secrets['babel_key'])
else:
raise Exception('El parser elegido no existe')
def source_mode(args, key):
if not 'mode' in args:
raise Exception('No se ha elegido un método de comprobación')
elif args['mode'] == 'babel_categories':
return BabelCategories(key)
elif args['mode'] == 'babel_hypernyms':
return BabelHypernyms(key)
elif args['mode'] == 'babel_senses':
return BabelLemmasOfSenses(key)
else:
raise Exception('El método de comprobación elegido no existe')
def choose_parser_key(args):
if 'parser_key' in args:
return args['parser_key']
else:
return secrets['babel_key']
def choose_source_key(args):
if 'mode_key' in args:
return args['mode_key']
else:
return secrets['babel_key']
def get_text(args):
if 'text' in args:
return args['text']
else:
raise Exception('Es necesario proporcionar el texto a analizar')
@app.route('/api/v1/check', methods=['GET'])
def api_v1_check():
#TODO comprobar si la API de babel no devuelve nada
parser_key = choose_parser_key(request.args)
source_key = choose_source_key(request.args)
parser = parse_mode(request.args, parser_key)
source = source_mode(request.args, source_key)
text = get_text(request.args)
word_and_id = None
try:
word_and_id = parser.parse(text)
except:
raise Exception('Hubo un problema analizando sintácticamente el texto')
metaphors_found = None
try:
metaphors_found = source.find_metaphors(word_and_id)
except:
raise Exception('Hubo un problema buscando la metáfora')
return {
'text': text,
'parser': parser.toString(),
'mode': source.toString(),
'relation': metaphors_found['relation'],
'isMetaphor': metaphors_found['isMetaphor'],
'reason': metaphors_found['reason'],
}, 200, {'Access-Control-Allow-Origin': '*'}
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
__author__ = 'Spasley'
|
nilq/baby-python
|
python
|
from rest_framework import exceptions, status
from api.services import translation
class PreconditionFailedException(exceptions.APIException):
status_code = status.HTTP_412_PRECONDITION_FAILED
default_detail = translation.Messages.MSG_PRECONDITION_FAILED
default_code = 'precondition_failed'
|
nilq/baby-python
|
python
|
import warnings
import pulumi
class Provider(pulumi.ProviderResource):
"""
The provider type for the kubernetes package.
"""
def __init__(self,
resource_name,
opts=None,
cluster=None,
context=None,
enable_dry_run=None,
kubeconfig=None,
namespace=None,
suppress_deprecation_warnings=None,
render_yaml_to_directory=None,
__name__=None,
__opts__=None):
"""
Create a Provider resource with the given unique name, arguments, and options.
:param str resource_name: The unique name of the resource.
:param pulumi.ResourceOptions opts: An optional bag of options that controls this resource's behavior.
:param pulumi.Input[str] cluster: If present, the name of the kubeconfig cluster to use.
:param pulumi.Input[str] context: If present, the name of the kubeconfig context to use.
:param pulumi.Input[bool] enable_dry_run: BETA FEATURE - If present and set to True, enable server-side diff
calculations. This feature is in developer preview, and is disabled by default.
This config can be specified in the following ways, using this precedence:
1. This `enableDryRun` parameter.
2. The `PULUMI_K8S_ENABLE_DRY_RUN` environment variable.
:param pulumi.Input[str] kubeconfig: The contents of a kubeconfig file.
If this is set, this config will be used instead of $KUBECONFIG.
:param pulumi.Input[str] namespace: If present, the default namespace to use.
This flag is ignored for cluster-scoped resources.
A namespace can be specified in multiple places, and the precedence is as follows:
1. `.metadata.namespace` set on the resource.
2. This `namespace` parameter.
3. `namespace` set for the active context in the kubeconfig.
:param pulumi.Input[bool] suppress_deprecation_warnings: If present and set to True, suppress apiVersion
deprecation warnings from the CLI.
This config can be specified in the following ways, using this precedence:
1. This `suppressDeprecationWarnings` parameter.
2. The `PULUMI_K8S_SUPPRESS_DEPRECATION_WARNINGS` environment variable.
:param pulumi.Input[str] render_yaml_to_directory: BETA FEATURE - If present, render resource manifests to this
directory. In this mode, resources will not be created on a Kubernetes cluster, but
the rendered manifests will be kept in sync with changes to the Pulumi program.
This feature is in developer preview, and is disabled by default. Note that some
computed Outputs such as status fields will not be populated since the resources are
not created on a Kubernetes cluster. These Output values will remain undefined,
and may result in an error if they are referenced by other resources. Also note that
any secret values used in these resources will be rendered in plaintext to the
resulting YAML.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = {
"cluster": cluster,
"context": context,
"enableDryRun": enable_dry_run,
"kubeconfig": kubeconfig,
"namespace": namespace,
"suppressDeprecationWarnings": suppress_deprecation_warnings,
"renderYamlToDirectory": render_yaml_to_directory,
}
super(Provider, self).__init__("kubernetes", resource_name, __props__, opts)
|
nilq/baby-python
|
python
|
import json
import pulumi
import pulumi_aws as aws
# CONFIG
DB_NAME='dbdemo'
DB_USER='user1'
DB_PASSWORD='p2mk5JK!'
DB_PORT=6610
IAM_ROLE_NAME = 'redshiftrole'
redshift_role = aws.iam.Role(IAM_ROLE_NAME,
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Sid": "",
"Principal": {
"Service": "redshift.amazonaws.com",
},
}],
}))
# allow s3 read
aws.iam.RolePolicyAttachment(IAM_ROLE_NAME+'attachment',
role=redshift_role.name,
policy_arn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
redshift_cluster = aws.redshift.Cluster("default",
cluster_identifier="moshe-cluster",
cluster_type="single-node",
database_name=DB_NAME,
master_password=DB_PASSWORD,
master_username=DB_USER,
node_type="dc1.large",
iam_roles=[redshift_role.arn],
port=DB_PORT,
skip_final_snapshot=True,
)
pulumi.export('arn', redshift_role.arn)
pulumi.export('host', redshift_cluster.dns_name)
|
nilq/baby-python
|
python
|
import win32api, mmapfile
import winerror
import tempfile, os
from pywin32_testutil import str2bytes
system_info=win32api.GetSystemInfo()
page_size=system_info[1]
alloc_size=system_info[7]
fname=tempfile.mktemp()
mapping_name=os.path.split(fname)[1]
fsize=8*page_size
print fname, fsize, mapping_name
m1=mmapfile.mmapfile(File=fname, Name=mapping_name, MaximumSize=fsize)
m1.seek(100)
m1.write_byte(str2bytes('?'))
m1.seek(-1,1)
assert m1.read_byte()==str2bytes('?')
## A reopened named mapping should have exact same size as original mapping
m2=mmapfile.mmapfile(Name=mapping_name, File=None, MaximumSize=fsize*2)
assert m2.size()==m1.size()
m1.seek(0,0)
m1.write(fsize*str2bytes('s'))
assert m2.read(fsize)==fsize*str2bytes('s')
move_src=100
move_dest=500
move_size=150
m2.seek(move_src,0)
assert m2.tell()==move_src
m2.write(str2bytes('m')*move_size)
m2.move(move_dest, move_src, move_size)
m2.seek(move_dest, 0)
assert m2.read(move_size) == str2bytes('m') * move_size
## m2.write('x'* (fsize+1))
m2.close()
m1.resize(fsize*2)
assert m1.size()==fsize * 2
m1.seek(fsize)
m1.write(str2bytes('w') * fsize)
m1.flush()
m1.close()
os.remove(fname)
## Test a file with size larger than 32 bits
## need 10 GB free on drive where your temp folder lives
fname_large=tempfile.mktemp()
mapping_name='Pywin32_large_mmap'
offsetdata=str2bytes('This is start of offset')
## Deliberately use odd numbers to test rounding logic
fsize = (1024*1024*1024*10) + 333
offset = (1024*1024*32) + 42
view_size = (1024*1024*16) + 111
## round mapping size and view size up to multiple of system page size
if fsize%page_size:
fsize += page_size - (fsize%page_size)
if view_size%page_size:
view_size += page_size - (view_size%page_size)
## round offset down to multiple of allocation granularity
offset -= offset%alloc_size
m1=None
m2=None
try:
try:
m1=mmapfile.mmapfile(fname_large, mapping_name, fsize, 0, offset*2)
except mmapfile.error, exc:
# if we don't have enough disk-space, that's OK.
if exc.winerror!=winerror.ERROR_DISK_FULL:
raise
print "skipping large file test - need", fsize, "available bytes."
else:
m1.seek(offset)
m1.write(offsetdata)
## When reopening an existing mapping without passing a file handle, you have
## to specify a positive size even though it's ignored
m2=mmapfile.mmapfile(File=None, Name=mapping_name, MaximumSize=1,
FileOffset=offset, NumberOfBytesToMap=view_size)
assert m2.read(len(offsetdata))==offsetdata
finally:
if m1 is not None:
m1.close()
if m2 is not None:
m2.close()
if os.path.exists(fname_large):
os.remove(fname_large)
|
nilq/baby-python
|
python
|
# Copyright (c) Microsoft Corporation.
# Copyright (c) 2018 Jensen Group
# Licensed under the MIT License.
"""
Module for generating rdkit molobj/smiles/molecular graph from free atoms
Implementation by Jan H. Jensen, based on the paper
Yeonjoon Kim and Woo Youn Kim
"Universal Structure Conversion Method for Organic Molecules: From Atomic Connectivity
to Three-Dimensional Geometry"
Bull. Korean Chem. Soc. 2015, Vol. 36, 1769-1777
DOI: 10.1002/bkcs.10334
"""
from qdk.chemistry._xyz2mol.ac import xyz2AC, AC2mol
from qdk.chemistry._xyz2mol.util import chiral_stereo_check
def xyz2mol(
atoms,
coordinates,
charge=0,
allow_charged_fragments=True,
use_graph=True,
use_huckel=False,
embed_chiral=True
):
"""
Generate a rdkit molobj from atoms, coordinates and a total_charge.
args:
atoms - list of atom types (int)
coordinates - 3xN Cartesian coordinates
charge - total charge of the system (default: 0)
optional:
allow_charged_fragments - alternatively radicals are made
use_graph - use graph (networkx)
use_huckel - Use Huckel method for atom connectivity prediction
embed_chiral - embed chiral information to the molecule
returns:
mols - list of rdkit molobjects
"""
# Get atom connectivity (AC) matrix, list of atomic numbers, molecular charge,
# and mol object with no connectivity information
AC, mol = xyz2AC(atoms, coordinates, charge, use_huckel=use_huckel)
# Convert AC to bond order matrix and add connectivity and charge info to
# mol object
new_mols = AC2mol(mol, AC, atoms, charge,
allow_charged_fragments=allow_charged_fragments,
use_graph=use_graph)
# Check for stereocenters and chiral centers
if embed_chiral:
for new_mol in new_mols:
chiral_stereo_check(new_mol)
return new_mols
|
nilq/baby-python
|
python
|
import logging
import os
import socket
from logging import Logger
from typing import Any, Dict, List, Optional, Union
from pathlib import Path
import docker
import dockerpty
from docker import DockerClient
from docker.models.images import Image
from docker.errors import APIError, DockerException
from requests import RequestException
from .utils import BLDRSetupFailed
def _create_docker_client() -> DockerClient:
try:
return docker.from_env(version='auto')
except DockerException as e:
raise BLDRSetupFailed(
'Cannot create Docker client. Is Docker daemon running?\nAdditional info: {}'.format(e)
)
def _check_docker_client(client: DockerClient) -> None:
try:
client.ping()
except (DockerException, RequestException) as e:
raise BLDRSetupFailed(
'Cannot connect to Docker daemon. Is Docker daemon running?\nAdditional info: {}'.format(e)
)
class DockerImageBuilder:
def __init__(self, client: Optional[DockerClient] = None, logger: Logger = logging.getLogger('DockerImageBuilder')) -> None:
self._logger: logging.Logger = logger
if client is None:
client = _create_docker_client()
self._client: DockerClient = client
_check_docker_client(self._client)
def build(self, path: Path, dockerfile: str, tag: str, buildargs: Dict, nocache: bool = False) -> 'DockerImage':
stream = self._client.api.build(
path=str(path),
dockerfile=dockerfile,
tag=tag,
forcerm=True,
nocache=nocache,
buildargs=buildargs,
decode=True,
)
for chunk in stream:
if chunk.get('stream', None) is not None:
self._logger.debug(chunk.get('stream').strip())
elif chunk.get('errorDetail', None) is not None:
raise DockerException(chunk.get('error'))
return DockerImage(client=self._client, image=tag)
class DockerImage:
def __init__(self, image: Union[str, Image], client: Optional[DockerClient] = None, logger: Optional[Logger] = None) -> None:
if client is None:
client = _create_docker_client()
self._client = client
_check_docker_client(self._client)
self._logger = logger
if self._logger is None:
self._logger = logging.getLogger('DockerImage')
self._tag = image
def create_container(self, **kwargs: Any) -> 'DockerContainer':
return DockerContainer(client=self._client, image=self._tag, **kwargs)
class DockerContainer:
def __init__(
self,
image: Union[str, Image],
command: Union[str, List],
environment: Optional[Dict] = None,
user: Optional[str] = None,
volumes: Optional[Dict] = None,
client: Optional[DockerClient] = None,
logger: Logger = logging.getLogger('DockerContainer'),
tmp_on_tmpfs: bool = True,
) -> None:
if client is None:
client = _create_docker_client()
self._client = client
_check_docker_client(self._client)
self._logger = logger
try:
self._client.images.get(image)
except docker.errors.ImageNotFound:
self._client.images.pull(image)
tmpfs = {'/tmp': 'rw,exec'} if tmp_on_tmpfs else {}
self._container = self._client.containers.create(
init=True,
image=image,
command=command,
stdin_open=True,
tty=os.isatty(0),
environment=environment,
network='host',
security_opt=['seccomp=unconfined'],
tmpfs=tmpfs,
user=user,
volumes=volumes,
extra_hosts={socket.gethostname(): "127.0.0.1"},
)
def __enter__(self) -> 'DockerContainer':
self._container.start()
return self
def run_with_pty(self, interactive: bool = False) -> int:
dockerpty.start(self._client.api, self._container.id, interactive=interactive, logs=True)
exit_code = self.get_exit_code()
self._container.remove()
return exit_code
def exec(self, command: Union[str, List]) -> int:
exec_id = self._client.api.exec_create(container=self._container.id, cmd=command)
stream = self._client.api.exec_start(exec_id=exec_id, stream=True)
for chunk in stream:
self._logger.debug(chunk.decode('utf-8', errors='ignore').strip())
return self._client.api.exec_inspect(exec_id=exec_id).get('ExitCode', 0)
def exec_run(self, command: Union[str, List]) -> str:
exitcode, output = self._container.exec_run(command)
if exitcode != 0:
raise ValueError('The following command "{}" exited with code: {}'.format(command, exitcode))
output = output.decode('utf-8', errors='ignore')
return output
def exec_with_pty(self, command: Union[str, List]) -> None:
dockerpty.exec_command(self._client.api, self._container.id, command=command)
def get_exit_code(self) -> int:
return self._client.api.inspect_container(self._container.id)['State'].get('ExitCode', 0)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
try:
self._container.kill()
except APIError:
pass
finally:
self._container.remove()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# coding=utf-8
import json
import sys
from PIL import Image
from pprint import pprint
import mutual_infor as mi
'''
note: Imager
'''
default_img_path = "img.jpg"
data_dir = "data/map_img/"
class Imager:
def __init__(self, path):
self.path = path
self.entropy = 0.0
self.width = 0
self.height = 0
self.is_process = False
def load(self):
try:
im = Image.open(self.path)
except IOError, e:
print "error msg:", e
return
self.data = im.getdata()
self.width = im.width
self.height = im.height
im.close()
def display(self):
data = {}
data["path"] = self.path
data["entropy"] = self.entropy
data["width"] = self.width
data["height"] = self.height
res = json.dumps(data)
return res
def get_image_info(self):
image_info = {}
if not self.is_process:
self.process()
image_info["width"] = self.width
image_info["height"] = self.height
image_info["entropy"] = self.entropy
return image_info
def process(self):
try:
im = Image.open(self.path).convert("L")
except IOError as e:
print e
else:
self.width = im.width
self.height = im.height
# get entropy
self.data = im.getdata()
mi_base = mi.MIBase()
self.entropy = mi_base.compute_entropy(self.data)
im.close()
def get_graydata(self):
try:
im = Image.open(self.path).convert("L")
except IOError as e:
print e
return
else:
data = im.getdata()
im.close()
return data
if __name__ == '__main__':
if len(sys.argv) == 2:
image = Imager(sys.argv[1])
image.process()
data = image.display()
print data
else:
print "param error"
|
nilq/baby-python
|
python
|
from z3 import Int
class Storage(object):
def __init__(self):
self._storage = {}
def __getitem__(self, item):
if item not in self._storage.keys():
# self._storage[item] = Int("s_" + str(item))
self._storage[item] = 0
return self._storage[item]
def __setitem__(self, key, value):
self._storage[key] = value
def __len__(self):
return len(self._storage)
def get_storage(self) -> dict:
return self._storage
|
nilq/baby-python
|
python
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
import kenlm
from predictor import WordPredictor
from vocabtrie import VocabTrie
import numbers
class TestWordPredictor(unittest.TestCase):
def setUp(self):
self.wordPredictor = WordPredictor('../resources/lm_word_medium.kenlm', '../resources/vocab_100k')
self.language_model = kenlm.LanguageModel('../resources/lm_word_medium.kenlm')
self.vocab_filename = '../resources/vocab_100k'
self.vocab_id = ''
def test_create_new_trie(self):
wp = self.wordPredictor
self.assertIsInstance(wp.create_new_trie(self.vocab_filename), VocabTrie, "OK")
def test_update_char_list_from_string(self):
list = ['a']
str = "bc"
res = ['a', 'b', 'c']
self.assertEqual(self.wordPredictor.update_char_list_from_string(list, str), res, "OK")
def test_create_char_list_from_vocab(self):
char_set = self.wordPredictor.create_char_list_from_vocab(self.vocab_id, self.vocab_filename)
#id, char_set = test_res.popitem()
#self.assertIsInstance(type(id), type(str), "Return type is not same")
self.assertIsInstance(type(char_set), type(set), "Return type is not same")
def test_add_vocab(self, vocab_id = 'vocab_id'):
new_trie = self.wordPredictor.create_new_trie(self.vocab_filename)
self.assertTrue((new_trie!= None))
self.assertFalse((new_trie == None))
def test_get_vocab_trie(self):
flag, vocabTr = self.wordPredictor.get_vocab_trie(self.vocab_id)
self.assertIsInstance(vocabTr, VocabTrie, 'Not OK')
self.assertIsInstance(type(flag), type(bool), "Not OK")
"""
def test_get_punc_token(self):
self.assertEqual(self.wordPredictor.get_punc_token(','), ',comma', 'Punctuation and token are not equal')
"""
def test_get_context_state(self):
sIn, sOut = self.wordPredictor.get_context_state('<s>', self.language_model, self.vocab_id)
self.assertIsInstance(sIn, kenlm.State, 'stateIn is not an instance of kenlm.State')
self.assertIsInstance(sOut, kenlm.State, 'stateOut is not an instance of kenlm.State')
def test_find_most_probable_word(self):
pass
def test_get_words(self):
pass
def test__get_words(self):
suggestion_list = self.wordPredictor._get_words('a', 'the united states of', self.vocab_id, 3,-float('inf'))
self.assertTrue(isinstance(type(suggestion_list), type(str)), "Not a list") #basestring is gone in python 3
def test_print_suggestions(self):
pass
def test_get_most_likely_word(self):
word, log_prob = self.wordPredictor.get_most_probable_word('a', 'the united states of', self.vocab_id)
self.assertEqual(word, 'america', "Not equal")
self.assertTrue(isinstance(log_prob, numbers.Number), "False")
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import logging
from pdb import Pdb
import sys
import time
from pathlib import Path
from typing import List
from pprint import pformat
import docker
import yaml
logger = logging.getLogger(__name__)
current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
WAIT_TIME_SECS = 20
RETRY_COUNT = 7
MAX_WAIT_TIME = 240
# https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/
pre_states = ["NEW", "PENDING", "ASSIGNED", "PREPARING", "STARTING"]
failed_states = [
"COMPLETE",
"FAILED",
"SHUTDOWN",
"REJECTED",
"ORPHANED",
"REMOVE",
"CREATED",
]
# UTILS --------------------------------
def get_tasks_summary(tasks):
msg = ""
for t in tasks:
t["Status"].setdefault("Err", "")
msg += "- task ID:{ID}, STATE: {Status[State]}, ERROR: '{Status[Err]}' \n".format(
**t
)
return msg
def get_failed_tasks_logs(service, docker_client):
failed_logs = ""
for t in service.tasks():
if t["Status"]["State"].upper() in failed_states:
cid = t["Status"]["ContainerStatus"]["ContainerID"]
failed_logs += "{2} {0} - {1} BEGIN {2}\n".format(
service.name, t["ID"], "=" * 10
)
if cid:
container = docker_client.containers.get(cid)
failed_logs += container.logs().decode("utf-8")
else:
failed_logs += " log unavailable. container does not exists\n"
failed_logs += "{2} {0} - {1} END {2}\n".format(
service.name, t["ID"], "=" * 10
)
return failed_logs
# --------------------------------------------------------------------------------
def osparc_simcore_root_dir() -> Path:
WILDCARD = "services/web/server"
root_dir = Path(current_dir)
while not any(root_dir.glob(WILDCARD)) and root_dir != Path("/"):
root_dir = root_dir.parent
msg = f"'{root_dir}' does not look like the git root directory of osparc-simcore"
assert root_dir.exists(), msg
assert any(root_dir.glob(WILDCARD)), msg
assert any(root_dir.glob(".git")), msg
return root_dir
def core_docker_compose_file() -> Path:
return osparc_simcore_root_dir() / ".stack-simcore-version.yml"
def core_services() -> List[str]:
with core_docker_compose_file().open() as fp:
dc_specs = yaml.safe_load(fp)
return [x for x in dc_specs["services"].keys()]
def ops_docker_compose_file() -> Path:
return osparc_simcore_root_dir() / ".stack-ops.yml"
def ops_services() -> List[str]:
with ops_docker_compose_file().open() as fp:
dc_specs = yaml.safe_load(fp)
return [x for x in dc_specs["services"].keys()]
def wait_for_services() -> None:
# get all services
services = core_services() + ops_services()
client = docker.from_env()
running_services = [
x for x in client.services.list() if x.name.split("_")[-1] in services
]
# check all services are in
assert len(running_services), "no services started!"
assert len(services) == len(
running_services
), f"Some services are missing or unexpected:\nexpected: {len(services)} {services}\ngot: {len(running_services)} {[service.name for service in running_services]}"
# now check they are in running mode
for service in running_services:
task = None
for n in range(RETRY_COUNT):
# get last updated task
sorted_tasks = sorted(service.tasks(), key=lambda task: task["UpdatedAt"])
task = sorted_tasks[-1]
if task["Status"]["State"].upper() in pre_states:
print(
"Waiting [{}/{}] for {}...\n{}".format(
n, RETRY_COUNT, service.name, get_tasks_summary(service.tasks())
)
)
time.sleep(WAIT_TIME_SECS)
elif task["Status"]["State"].upper() in failed_states:
print(
f"Waiting [{n}/{RETRY_COUNT}] Service {service.name} failed once...\n{get_tasks_summary(service.tasks())}"
)
time.sleep(WAIT_TIME_SECS)
else:
break
assert task
assert (
task["Status"]["State"].upper() == "RUNNING"
), "Expected running, got \n{}\n{}".format(
pformat(task), get_tasks_summary(service.tasks())
)
# get_failed_tasks_logs(service, client))
if __name__ == "__main__":
# get retry parameters
# wait for the services
sys.exit(wait_for_services())
|
nilq/baby-python
|
python
|
"""Settings for admin panel related to the authors app."""
|
nilq/baby-python
|
python
|
import unittest
from yauber_algo.errors import *
class PercentRankTestCase(unittest.TestCase):
def test_category(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import percent_rank
#
# Function settings
#
algo = 'percent_rank'
func = percent_rank
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([nan, nan, nan, nan, nan, .30, .10]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
5
),
suffix='reg'
)
s.check_regular(
array([nan, nan, nan, nan, nan, 1.00, .90]),
func,
(
array([1, 2, 3, 4, 5, 6, 6]),
5
),
suffix='equal_numbers'
)
s.check_regular(
array([nan, nan, nan, nan, nan, .50, .50]),
func,
(
array([1, 1, 1, 1, 1, 1, 1]),
5
),
suffix='all_equal_numbers'
)
s.check_regular(
array([nan, nan, nan, nan, nan, nan, .10]),
func,
(
array([nan, 2, 1, 4, 3, 2, 1]),
5
),
suffix='skip_nan'
)
s.check_regular(
array([nan, nan, nan, nan, nan, nan, nan]),
func,
(
array([nan, 2, nan, 2, 3, 2, 1]),
5
),
suffix='skip_nan_min_count_5'
)
s.check_regular(
array([nan, nan, nan, nan, nan, 2 / 5, 1 / 5]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
4
),
suffix='min_period_eq_5',
exception=YaUberAlgoInternalError
)
s.check_regular(
array([nan, nan, nan, nan, nan, 2 / 5, 1 / 5]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
0
),
suffix='zero_period_err',
exception=YaUberAlgoArgumentError
)
s.check_regular(
array([nan, nan, nan, nan, nan, 2 / 5, 1 / 5]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
-1
),
suffix='neg_period_err',
exception=YaUberAlgoArgumentError
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, .10, nan, .20]),
func,
(
array([nan, 2, 1, 4, 3, 5, 1, inf, 1]),
6
),
suffix='inf'
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, .10, nan, nan]),
func,
(
array([nan, 2, 1, 4, 3, 5, 1, inf, nan]),
6
),
suffix='inf_nan'
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, .10, nan, .20]),
func,
(
array([nan, 2, 1, 4, 3, 5, 1, -inf, 1]),
6
),
suffix='neg_inf'
)
s.check_series(
pd.Series(array([nan, nan, nan, nan, nan, .30, .10])),
func,
(
pd.Series(array([3, 2, 1, 4, 3, 2, 1])),
5
),
suffix=''
)
s.check_dtype_float(
array([nan, nan, nan, nan, nan, .30, .10], dtype=np.float),
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.float),
5
),
suffix=''
)
s.check_dtype_bool(
array([nan, nan, nan, nan, nan, .20, .70], dtype=np.float),
func,
(
array([0, 1, 1, 0, 1, 0, 1], dtype=np.bool),
5
),
suffix=''
)
s.check_dtype_int(
array([nan, nan, nan, nan, nan, .30, .10], dtype=np.float),
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.int32),
5
),
suffix=''
)
s.check_dtype_object(
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.object),
5
),
suffix=''
)
s.check_futref(5, 1,
func,
(
np.random.random(100),
5
),
)
s.check_window_consistency(5, 1,
func,
(
np.random.random(100),
5
),
)
|
nilq/baby-python
|
python
|
import sys
import pandas as pd
from sqlalchemy import create_engine
import pickle
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
# custom transformer
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
'''
Modified StartingVerbExtractor class used to improve analysis performance
'''
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def load_data(database_filepath):
'''
Load dataset, input set, and labels set from SQLite database.
Arguments:
database_filepath: path to database where dataset is saved to (String)
Returns:
X: feature dataset (Pandas Series)
y: label dataset (Pandas Series)
category_names: list of column names (Pandas Index)
'''
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('df',engine)
# load feature set (X), label set (Y), and column names
X = df['message']
y = df.iloc[:,4:]
category_names = y.columns
return X, y, category_names
def tokenize(text):
'''
Tokenize text to enable NLP.
Arguments:
text: English text to be tokenized for ML (List)
Returns:
clean_tokens: tokenized text for ML (List)
'''
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
'''
Build ML pipeline that includes GridSearch, FeatureUnion, pipeline with CountVectorizer and TfidfTransformer, StartingVerbExtractor, and AdaBoostClassifier for analysis.
Returns:
model: ML pipeline that contains NLP processes and classifier (Scikit Pipeline)
'''
# parameters for grid search to improve pipeline performance
parameters = {
'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),
'features__text_pipeline__vect__max_df': (0.5, 0.75),
'features__text_pipeline__vect__max_features': (None, 5000),
'features__text_pipeline__tfidf__use_idf': (True, False)
}
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(AdaBoostClassifier()))
])
model = GridSearchCV(pipeline, param_grid=parameters)
return model
def evaluate_model(model, X_test, Y_test, category_names):
'''
Evaluate performance of ML pipeline by displaying multiple scores.
Arguments:
model: ML pipeline to be evaluated (Scikit Pipeline)
X_test: test feature dataset (Pandas Series)
Y_test: test label dataset (Pandas Series)
category_names: list of column names (List)
'''
# model predictions
y_pred = model.predict(X_test)
# Overall accuracy of model
accuracy = (y_pred == Y_test).mean()
print("Overall Accuracy:", accuracy.mean())
# scores report
y_pred_df = pd.DataFrame(y_pred, columns=category_names)
for col in category_names:
print('Attribute: {}\n'.format(col))
print(classification_report(Y_test[col], y_pred_df[col]))
def save_model(model, model_filepath):
'''
Build ML pipeline that includes FeatureUnion, pipeline with CountVectorizer and TfidfTransformer, StartingVerbExtractor, and AdaBoostClassifier for analysis.
Arguments:
model: ML pipeline to be saved (Scikit Pipeline)
model_filepath: name of pickle file the model is saved to (String)
'''
filename = model_filepath
pickle.dump(model, open(filename, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
_base_ = '../pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py'
voxel_size = [0.16, 0.16, 4]
point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
model = dict(
type='DynamicVoxelNet',
voxel_layer=dict(
max_num_points=-1,
point_cloud_range=point_cloud_range,
voxel_size=voxel_size,
max_voxels=(-1, -1)),
voxel_encoder=dict(
type='DynamicPillarFeatureNet',
in_channels=4,
feat_channels=[64],
with_distance=False,
voxel_size=voxel_size,
point_cloud_range=point_cloud_range))
|
nilq/baby-python
|
python
|
import collections
import sys
def main(letters, words):
d = collections.defaultdict(list)
print(d)
print(letters)
print(words)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2:])
|
nilq/baby-python
|
python
|
# ### Problem 1
# Ask the user to enter a number.
# Using the provided list of numbers, use a for loop to iterate the array and print out all the values that are smaller than the user input and print out all the values that are larger than the number entered by the user.
# ```
# # Start with this List
# list_of_many_numbers = [12, 24, 1, 34, 10, 2, 7]
# Example Input/Output if the user enters the number 9:
# ```
# The User entered 9
# 1 2 7 are smaller than 9
# 12 24 34 10 are larger than 9
# ```
userinput= int(input(" Enter a number here: "))# Input from the User
list_of_many_numbers = [12, 24, 1, 34, 10, 2, 7]
# KEY: Didnt print in order like example but code is good and commented
for each in list_of_many_numbers:
if each > userinput:
print(f'{each} is great than {userinput} ')
elif each== userinput:
print("This number is present in my array") # Equal case addressed
else:
print(f'{each} is smaller than {userinput}')
|
nilq/baby-python
|
python
|
import tensorflow as tf
import numpy as np
import json
import argparse
import cv2
import os
import glob
import math
import time
import glob
def infer(frozen_pb_path, output_node_name, img_path, output_path=None):
with tf.gfile.GFile(frozen_pb_path, "rb") as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
tf.import_graph_def(
restored_graph_def,
input_map=None,
return_elements=None,
name=""
)
graph = tf.get_default_graph()
input_image = graph.get_tensor_by_name("image:0")
output_heat = graph.get_tensor_by_name("%s:0" % output_node_name)
res = {}
use_times = []
with tf.Session() as sess:
# if directory, then glob all files
if os.path.isdir(img_path):
img_files = glob.glob(os.path.join(img_path,"*"))
else:
img_files = [img_path]
print(img_path)
print(img_files)
# if file, then do once
for img_path in img_files:
fname = os.path.basename(img_path)
print(img_path)
ori_img = cv2.imread(img_path)
ori_shape = ori_img.shape
shape = input_image.get_shape().as_list()
inp_img = cv2.resize(ori_img, (shape[1], shape[2]))
st = time.time()
heat = sess.run(output_heat, feed_dict={input_image: [inp_img]})
infer_time = 1000 * (time.time() - st)
#print("img_id = %d, cost_time = %.2f ms" % (img_id, infer_time))
use_times.append(infer_time)
grey_heat = 255*np.squeeze(np.amax(heat, axis=3))
grey_heat = cv2.resize(grey_heat, (ori_shape[1], ori_shape[0]), interpolation=cv2.INTER_AREA)
color_heat = np.zeros((ori_shape[0], ori_shape[1], 3), dtype=np.float32)
color_heat[:,:,2] = grey_heat
#cv2.imwrite(output_path, grey_heat)
merged_img = cv2.addWeighted(ori_img.astype(np.float32), 1.0, color_heat, 1.0, 0)
new_fname = "_out.".join(fname.split("."))
out_fpath = os.path.join(output_path, new_fname)
cv2.imwrite(out_fpath, merged_img)
#res[img_id] = np.squeeze(heat)
print("Average inference time = %.2f ms" % np.mean(use_times))
#return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_pb_path", type=str, default="")
parser.add_argument("--img_path", type=str, default="")
parser.add_argument("--output_path", type=str, default="output_images")
parser.add_argument("--output_node_name", type=str, default='Convolutional_Pose_Machine/stage_5_out')
parser.add_argument("--gpus", type=str, default="1")
args = parser.parse_args()
if not os.path.isfile(args.output_path):
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
infer(args.frozen_pb_path, args.output_node_name, args.img_path, args.output_path)
|
nilq/baby-python
|
python
|
#!flask/bin/python
# -*- coding: utf-8 -*-
from api import app
from flask import jsonify, make_response
@app.errorhandler(401)
def unauthorized(error=None):
mensagem = {'status': 401, 'mensagem': 'Voce nao tem permissao para acessar essa pagina!'}
resp = jsonify(mensagem)
resp.status_code = 401
# REDIRECIONAR PRO LOGIN
return resp
@app.errorhandler(404)
def not_found(error=None):
mensagem = {"status": 404, "mensagem": 'Nao encontramos o que voce estava procurando. Tente novamente.'}
resp = jsonify(mensagem)
resp.status_code = 404
return resp
@app.errorhandler(405)
def method_not_allowed(error=None):
mensagem = {'status': 405, 'mensagem': 'Metodo nao permitido!'}
resp = jsonify(mensagem)
resp.status_code = 405
return resp
@app.errorhandler(500)
def internal_server_error(error=None):
mensagem = {'status': 500, 'mensagem': 'Ops. Algo deu errado. Tente novamente.'}
resp = jsonify(mensagem)
resp.status_code = 500
return resp
|
nilq/baby-python
|
python
|
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct
class USB_VER:
USB1_0 = "1.0"
USB1_1 = "1.1"
USB2_0 = "2.0"
class PID:
"""
USB Protocol layer packet identifier values
:attention: visualy writen in msb-first, transmited in lsb first
"""
# Address for host-to-device transfer
TOKEN_OUT = 0b0001
# Address for device-to-host transfer
TOKEN_IN = 0b1001
# Start of frame marker (sent each ms)
TOKEN_SOF = 0b0101
# Address for host-to-device control transfer
TOKEN_SETUP = 0b1101
# Even-numbered data packet
DATA_0 = 0b0011
# Odd-numbered data packet
DATA_1 = 0b1011
# Data packet for high-bandwidth isochronous transfer (USB 2.0)
DATA_2 = 0b0111
# Data packet for high-bandwidth isochronous transfer (USB 2.0)
DATA_M = 0b1111
# Data packet accepted
HS_ACK = 0b0010
# Data packet not accepted; please retransmit
HS_NACK = 0b1010
# Transfer impossible; do error recovery
HS_STALL = 0b1110
# Data not ready yet (USB 2.0)
HS_NYET = 0b0110
# Low-bandwidth USB preamble
PREAMBLE = 0b1100
# Split transaction error (USB 2.0)
ERR = 0b1100
# High-bandwidth (USB 2.0) split transaction
SPLIT = 0b1000
# Check if endpoint can accept data (USB 2.0)
PING = 0b0100
addr_t = Bits(7)
endp_t = Bits(4)
crc5_t = Bits(5)
crc16_t = Bits(16)
pid_t = Bits(4)
"""
:attention: every packet starts with sync and ends in EOP,
which is not in data structures below
"""
"""
There are three types of token packets,
* In - Informs the USB device that the host wishes to read information.
* Out - Informs the USB device that the host wishes to send information.
* Setup - Used to begin control transfers.
"""
packet_token_t = HStruct(
(pid_t, "pid"),
(addr_t, "addr"),
(endp_t, "endp"),
(crc5_t, "crc5"),
)
USB_MAX_FRAME_LEN = {
USB_VER.USB1_0: 8,
USB_VER.USB1_1: 1023,
USB_VER.USB2_0: 1024,
}
def get_packet_data_t(usb_ver: USB_VER):
max_frame_len = USB_MAX_FRAME_LEN[usb_ver]
# pid has to be one of DATA_0, DATA_1, DATA_2, DATA_M
return HStruct(
(pid_t, "pid"),
(HStream(Bits(8), frame_len=(1, max_frame_len)), "data"),
(crc16_t, "crc"),
)
"""
There are three type of handshake packets which consist simply of the PID
* ACK - Acknowledgment that the packet has been successfully received.
* NAK - Reports that the device temporary cannot send or received data.
Also used during interrupt transactions to inform the host there is no data to send.
* STALL - The device finds its in a state that it requires intervention from the host.
"""
packet_hs_t = HStruct(
(pid_t, "pid"),
)
"""
The SOF packet consisting of an 11-bit frame number is sent by the host
every 1ms ± 500ns on a full speed bus or every 125 µs ± 0.0625 µs on a high speed bus.
"""
frame_number_t = Bits(11)
packet_sof_t = HStruct(
(pid_t, "pid"),
(frame_number_t, "frame_number"),
(crc5_t, "crc5"),
)
|
nilq/baby-python
|
python
|
###############################################################################
# Author: Wasi Ahmad
# Project: Match Tensor: a Deep Relevance Model for Search
# Date Created: 7/28/2017
#
# File Description: This script contains code related to the sequence-to-sequence
# network.
###############################################################################
import torch
import torch.nn as nn
from nn_layer import EmbeddingLayer, Encoder, ExactMatchChannel
class MatchTensor(nn.Module):
"""Class that classifies question pair as duplicate or not."""
def __init__(self, dictionary, embedding_index, args):
""""Constructor of the class."""
super(MatchTensor, self).__init__()
self.dictionary = dictionary
self.embedding_index = embedding_index
self.config = args
self.num_directions = 2 if self.config.bidirection else 1
self.embedding = EmbeddingLayer(len(self.dictionary), self.config)
self.linear_projection = nn.Linear(self.config.emsize, self.config.featsize)
self.query_encoder = Encoder(self.config.featsize, self.config.nhid_query, True, self.config)
self.document_encoder = Encoder(self.config.featsize, self.config.nhid_doc, True, self.config)
self.query_projection = nn.Linear(self.config.nhid_query * self.num_directions, self.config.nchannels)
self.document_projection = nn.Linear(self.config.nhid_doc * self.num_directions, self.config.nchannels)
self.exact_match_channel = ExactMatchChannel()
self.conv1 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 3), padding=1)
self.conv2 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 5), padding=(1, 2))
self.conv3 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 7), padding=(1, 3))
self.relu = nn.ReLU()
self.conv = nn.Conv2d(self.config.nfilters * 3, self.config.match_filter_size, (1, 1))
self.output = nn.Linear(self.config.match_filter_size, 1)
# Initializing the weight parameters for the embedding layer.
self.embedding.init_embedding_weights(self.dictionary, self.embedding_index, self.config.emsize)
def forward(self, batch_queries, query_len, batch_docs, doc_len):
"""
Forward function of the match tensor model. Return average loss for a batch of sessions.
:param batch_queries: 2d tensor [batch_size x max_query_length]
:param query_len: 1d numpy array [batch_size]
:param batch_docs: 3d tensor [batch_size x num_rel_docs_per_query x max_document_length]
:param doc_len: 2d numpy array [batch_size x num_clicks_per_query]
:return: score representing click probability [batch_size x num_clicks_per_query]
"""
# step1: apply embedding lookup
embedded_queries = self.embedding(batch_queries)
embedded_docs = self.embedding(batch_docs.view(-1, batch_docs.size(-1)))
# step2: apply linear projection on embedded queries and documents
embedded_queries = self.linear_projection(embedded_queries.view(-1, embedded_queries.size(-1)))
embedded_docs = self.linear_projection(embedded_docs.view(-1, embedded_docs.size(-1)))
# step3: transform the tensors so that they can be given as input to RNN
embedded_queries = embedded_queries.view(*batch_queries.size(), self.config.featsize)
embedded_docs = embedded_docs.view(-1, batch_docs.size()[-1], self.config.featsize)
# step4: pass the encoded query and doc through a bi-LSTM
encoded_queries = self.query_encoder(embedded_queries, query_len)
encoded_docs = self.document_encoder(embedded_docs, doc_len.reshape(-1))
# step5: apply linear projection on query hidden states
projected_queries = self.query_projection(encoded_queries.view(-1, encoded_queries.size()[-1])).view(
*batch_queries.size(), -1)
projected_queries = projected_queries.unsqueeze(1).expand(projected_queries.size(0), batch_docs.size(1),
*projected_queries.size()[1:])
projected_queries = projected_queries.contiguous().view(-1, *projected_queries.size()[2:])
projected_docs = self.document_projection(encoded_docs.view(-1, encoded_docs.size()[-1]))
projected_docs = projected_docs.view(-1, batch_docs.size(2), projected_docs.size()[-1])
projected_queries = projected_queries.unsqueeze(2).expand(*projected_queries.size()[:2], batch_docs.size()[-1],
projected_queries.size(2))
projected_docs = projected_docs.unsqueeze(1).expand(projected_docs.size(0), batch_queries.size()[-1],
*projected_docs.size()[1:])
# step6: 2d product between projected query and doc vectors
query_document_product = projected_queries * projected_docs
# step7: append exact match channel
exact_match = self.exact_match_channel(batch_queries, batch_docs).unsqueeze(3)
query_document_product = torch.cat((query_document_product, exact_match), 3)
query_document_product = query_document_product.transpose(2, 3).transpose(1, 2)
# step8: run the convolutional operation, max-pooling and linear projection
convoluted_feat1 = self.conv1(query_document_product)
convoluted_feat2 = self.conv2(query_document_product)
convoluted_feat3 = self.conv3(query_document_product)
convoluted_feat = self.relu(torch.cat((convoluted_feat1, convoluted_feat2, convoluted_feat3), 1))
convoluted_feat = self.conv(convoluted_feat).transpose(1, 2).transpose(2, 3)
max_pooled_feat = torch.max(convoluted_feat, 2)[0].squeeze()
max_pooled_feat = torch.max(max_pooled_feat, 1)[0].squeeze()
return self.output(max_pooled_feat).squeeze().view(*batch_docs.size()[:2])
|
nilq/baby-python
|
python
|
"""
Aravind Veerappan
BNFO 601 - Exam 2
Question 2. Protein BLAST
"""
import math
from PAM import PAM
class BLAST(object):
FORWARD = 1 # These are class variables shared by all instances of the BLAST class
BACKWARD = -1
ROW = (0, 1)
COLUMN = (1, 0)
def __init__(self, query=None, target=None, word_size=3, gap_open=-10, gap_extend=-4, threshold=10, PAM=None):
self.query = query # This is the string corresponding to the query sequence
self.target = target # This is the string corresponding to the target sequence
self.word_size = word_size # Size of the seed word for initiating extensions
self.word_score = None # something different required for PBLAST!
self.gap_open = gap_open
self.gap_extend = gap_extend
self.querylen = len(query)
self.targetlen = len(target)
self.blast_table = {} # Our main dynamic programming table containing scores
self.traceback_table = {} # A corresponding table for recording the tracebacks
self.target_index = {}
self.threshold = threshold # Neighborhood threshold value for scoring
self.PAM = PAM # PAM table
return
def score(self): # This method performs BLAST scoring and returns a string describing the resulting alignment
result_summary = [] # A list, for now, that will store results of the alignments
if not self.target_index: # if this is the first time scoring we should index the target
for i in xrange(len(self.target) - self.word_size + 1):
word = self.target[i: i + self.word_size]
if word in self.target_index:
self.target_index[word].append(i) # A dict of lists is an efficient structure for this index.
# The list items are word coordinates in the target.
else:
self.target_index[word] = [i]
# print self.target_index
## First we must iterate through words in the query:
query_position = 0
while query_position < self.querylen - self.word_size + 1:
# print "Query position is", query_position
query_word = self.query[query_position:query_position + self.word_size]
# lookup scores for each AA pair from PAM table
for target_word in self.target_index.keys():
score = 0
for i in range(len(target_word)):
score += self.PAM[target_word[i], query_word[i]]
# If the calculated score is higher than the neighborhood threshold value then extend the alignment
# and set the starting word score equal to the calculated score
if score > self.threshold:
self.word_score = score
for target_position in self.target_index[target_word]:
print "Searching for seed", query_word, "at target position", target_position
# print "Extending forward"
forward_score, forward_extension_q, forward_extension_t = \
self._extend_alignment(query_position, target_position, self.FORWARD)
# print "Extending backwards"
backward_score, backward_extension_q, backward_extension_t = \
self._extend_alignment(query_position, target_position, self.BACKWARD)
q_result = backward_extension_q[:-1] + query_word + forward_extension_q[1:]
t_result = backward_extension_t[:-1] + query_word + forward_extension_t[1:]
# Note that the last character of a backward extension, and the zeroth character of a forward
# extension overlap with the query word and should therefore be discarded - thus the slice notation.
score = forward_score + backward_score - self.word_score
# We need to make sure that we don't double count the seed score!
# calculate e-value
# e_value = self.querylen * self.targetlen * math.e ** (math.log(1 / 4) * score)
# calculate bit score
# bit_score = (-math.log(1 / 4) * score - math.log(1)) / math.log(2)
query_begin = query_position - len(backward_extension_q) + 2
target_begin = target_position - len(backward_extension_t) + 2
# result_summary.append((e_value, bit_score, score, q_result, t_result, query_begin, target_begin))
result_summary.append((score, q_result, t_result, query_begin, target_begin))
alignment_string = '\nAlignment had a score of ' + str(score) + ' and is:\n\nTarget:\t' + \
str(target_begin) + '\t' + str(t_result) + '\n\t\t\t'
for k in xrange(len(t_result)): # t and q alignments should be the same length!
if t_result[k] == q_result[k]:
alignment_string += '|'
# Only put a bar if the two characters are identical at this position
else:
alignment_string += ' ' # otherwise just insert a space
alignment_string += '\nQuery:\t' + str(query_begin) + '\t' + str(q_result) + '\n'
print alignment_string
# The above statements just concatenate together a multi-line string that will correctly display
# the best alignment when it is subsequently printed.
query_position += 1
return result_summary
def _extend_alignment(self, query_start, target_start, direction):
""" This private method attempts to extend an alignment in the forward and backward direction
depending on the value of the direction flag, which here takes the value 1 (for forward extension) or
-1 for backward.For clarity these constants are defined by the class variables self.FORWARD and self.BACKWARD
"""
self.high_score = self.word_score
# highest scores encountered so far will always initially be the word_score * match_reward
self.high_q_pos = self.high_t_pos = 0
if direction == self.FORWARD: # We start with the 0,0 position representing the last character
query_start += self.word_size - 1 # of the seed word for forward extensions.
target_start += self.word_size - 1 # For backward extensions, leave it as it is (i.e. zeroth character)
self.blast_table = dict()
# The BLAST table is a dict of tuples. Each tuple represents a (query, target) position
# this sparse representation will be much more efficient than using a 2D list
self.blast_table[0, 0] = self.high_score # initialize the top left corner with the word score
self.high_q_pos = 0
self.high_t_pos = 0
self.traceback_table[0, 0] = (1, 1)
# There is no traceback path for the origin, but the program logic elsewhere dictates that we provide one
cur_t_pos = 1 # we are going to score the edges first (top and left), which can *only* ever be gaps back
# to the origin. i.e. the question of matching or not matching is completely irrelevant here.
# We start by scoring the top edge, beginning with position 1..
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(0, cur_t_pos)] = cur_score # only record non-zero values
self.traceback_table[(0, cur_t_pos)] = (0, 1) # record a target gap in the traceback table
cur_score = max(0, self.blast_table[(0, cur_t_pos)] + self.gap_extend) # any subsequent are extends
cur_t_pos += 1
cur_t_pos = 0 # Now we do the same thing for the left edge as we just did for the top edge
cur_q_pos = 1
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(cur_q_pos, 0)] = cur_score # only record non-zero values
self.traceback_table[(cur_q_pos, 0)] = (1, 0) # record a query gap in the traceback table
cur_score = max(0, self.blast_table[(cur_q_pos, 0)] + self.gap_extend)
cur_t_pos += 1
# print "blast table 0,0 is", self.blast_table[0, 0], "and high score is", self.high_score
# alright, finished with edges. Note that high scores can NEVER occur in an edge so these were not considered.
# Henceforth, however, we will need to think about this.
cur_t_pos = 0 # Start at the first position
cur_q_pos = 0
# Now we will score the table, proceeding according to the algorithm description: first incrementing along
# the diagonal, then scoring the adjacent row, then the column below
# Unlike Smith Waterman, the matrix is no longer of defined size, so we need to use while loops instead of for
while True: # I think it's cleaner to affirmatively break out of this main loop. Too bad Python has no do-while
cur_t_pos += 1 # Advance along the diagonal by incrementing
cur_q_pos += 1 # Remember, these refer to coordinates in our table, not in the actual target or query
# Probably we need to do some bounds checking here too with respect to absolute position in the query and
# target similar to what is done in the _fill_in_row_or_column method
# print "Beginning row starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_row = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start, target_start,
direction, self.ROW)
# print "Max in row was ", max_in_row
# print "Beginning column starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_column = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start,
target_start, direction, self.COLUMN)
# print "Max in column was ", max_in_column
if not max(max_in_row, max_in_column):
break # If the maximum value we encounter in both the rows and columns is zero, we are done building
# print "Finished building a matrix"
best_q_alignment = [] # best partial alignment for the query sequence
best_t_alignment = [] # best partial alignment for the target sequence
## Now we can go ahead and produce an output string corresponding to the best alignment
cur_q_pos = self.high_q_pos # our approach is start at the high scoring box, and to trace our way back
cur_t_pos = self.high_t_pos
while cur_q_pos >= 0 and cur_t_pos >= 0 and self.blast_table.setdefault((cur_q_pos, cur_t_pos), 0):
q_offset, t_offset = self.traceback_table[cur_q_pos, cur_t_pos]
# unpack the offset tuples stored in the traceback table
if q_offset:
try:
best_q_alignment.append(self.query[query_start + cur_q_pos * direction])
except IndexError:
print "YO!", query_start, cur_q_pos, direction, query_start + cur_q_pos * direction
print "Best_q_alignment", best_q_alignment
quit()
else:
best_q_alignment.append('-') # if the value is a zero, we are gapping!
if t_offset:
best_t_alignment.append(self.target[target_start + cur_t_pos * direction])
else:
best_t_alignment.append('-') # if the value is a zero, we are gapping, now the other way
cur_q_pos -= q_offset # Note that we are subtracting positively valued offsets.
cur_t_pos -= t_offset # This design choice makes later printing a traceback table a lot prettier.
# Alternatively, we could have built our alignments by adding things at the beginning using statements like
# best_t_alignment.insert(0,'-') etc. But in Python inserting items at the beginning of a list is much slower
# than appending at the end. We are better off appending at the end, then reversing the whole mess when done.
# print "Returning information about a partial alignment", self.high_score, best_q_alignment, best_t_alignment
# flip 'em both once we are done, since we built them "end-to-beginning". Note that we don't need to flip
# sequences corresponding to backwards extensions!
if direction == self.FORWARD:
best_q_alignment.reverse()
best_t_alignment.reverse()
return self.high_score, ''.join(best_q_alignment), ''.join(best_t_alignment)
def _fill_in_row_or_column(self, cur_q_pos, cur_t_pos, query_start, target_start, direction, row_or_column):
"""This private method will fill in a row or column, depending on the tuple passed in the row_or_column argument
Each row or column is filled in until a zero-valued result is obtained.
"""
# print "filling in a row or column"
max_in_current_row_or_column = 0
q_add, t_add = row_or_column
# These variables will control whether we fill in a row or a column. If the argument row_or_column = (0,1)
# we will end filling in a row. If the argument is assigned (1,0) we will fill a column
while True:
query_position = query_start + cur_q_pos * direction # remember, direction here is either -1 or 1
target_position = target_start + cur_t_pos * direction # so is a positive or negative offset multiplier
# query and target position variables here refer to the actual (absolute) position within the query
# and target sequences respectively
if (query_position < 0) or (target_position < 0):
# print "Ran out of query or target sequence while attempting backwards extension"
break # we can go no further
if (query_position >= self.querylen) or (target_position >= self.targetlen):
# print "Ran out of q or t while attempting forwards extension", query_position, target_position
break # again, we can go no further
q_char = self.query[query_position]
t_char = self.target[target_position]
# print "comparing", q_char, query_position, "to", t_char, target_position
# use PAM table to find the increment
increment = self.PAM[(q_char, t_char)]
match_score = self.blast_table[(cur_q_pos - 1, cur_t_pos - 1)] + increment
# improvement for later - decide whether to apply gap opening or gap extension penalties
# for the moment just set gap increment to the gap_open value
increment = self.gap_open
# scores associated with gapping in either the target or query
target_gap_score = self.blast_table.setdefault((cur_q_pos, cur_t_pos - 1), 0) + increment
query_gap_score = self.blast_table.setdefault((cur_q_pos - 1, cur_t_pos), 0) + increment
best_score = max(
(0, (0, 0)), # a 0 score will never have a traceback
(match_score, (1, 1)), # A match corresponds to a -1,-1 traceback
(target_gap_score, (0, 1)), # A target gap corresponds to a 0, -1 traceback
(query_gap_score, (1, 0)) # A query gap corresponds to a -1, 0 traceback
)
if not best_score[0]:
break
self.blast_table[cur_q_pos, cur_t_pos] = best_score[0]
# The first element in the tuple is the actual score to be recorded
# print "Recording", best_score[0], "at position", cur_q_pos, cur_t_pos
self.traceback_table[cur_q_pos, cur_t_pos] = best_score[1]
# The traceback offsets associated with the score are in a tuple as described earlier
if best_score[0] >= self.high_score:
# This represents the "high road" approach. "low road" would simply be >
self.high_score = best_score[0] # record the new high score
self.high_q_pos = cur_q_pos # also record the i and j positions associated with that score
self.high_t_pos = cur_t_pos
if best_score[0] > max_in_current_row_or_column:
max_in_current_row_or_column = best_score[0]
# The maximum in a particular row or column is different from the overall high score! We actually
# only care if this value is non-zero, as this will tell us that another iteration along the diagonal is
# required.
cur_t_pos += t_add # We end up adding either a zero or a one to these depending on
cur_q_pos += q_add # whether we are filling in a row or a column, setting us up for the next iteration
return max_in_current_row_or_column
def __str__(self):
""" This is a "special method attribute" overwriting the __str__ method defined in object.
__str__ controls what the string representation of objects of the BLAST class will look like.
It is invoked by print statements, which will print the return value. The bad news is that the routine here
was more-or-less just lifted from the old Smith Waterman program. However, BLAST uses a fundamentally
different sort of data structure for representing the blast and traceback tables.
Can you fix this method so that it does something useful?
"""
lineout = 'Scoring table:\n\t' + '\t'.join(self.target) + '\n'
# The above is just a fancy looking way to break the target string into tab-delimited individual characters
for i in xrange(self.querylen):
lineout += self.query[i] + "\t"
for j in xrange(self.targetlen):
lineout += str(self.blast_table[i, j]) + "\t"
lineout += '\n'
lineout += '\n\nTraceback table:\n\t' + '\t'.join(self.target) + '\n'
for i in xrange(self.querylen):
lineout += self.query[i] + "\t"
for j in xrange(self.targetlen):
lineout += ''.join([str(k) for k in self.traceback_table[i, j]]) + "\t"
# just prettying up the traceback tuples
lineout += '\n'
return lineout
# MAIN PROGRAM
numbat = 'LVSMLESYVAAPDLILLDIMMPGMDGLELGGMDGGKPILT'
quoll = 'DDMEVIGTAYNPDVLVLDIIMPHLDGLAVAAMEAGRPLIS'
# calculate PAM120 matrix
A = PAM(N=120)
PAM1 = A.Build_PAMN()
B = BLAST(numbat, quoll, PAM=PAM1)
print B.score()
|
nilq/baby-python
|
python
|
import sublime, sublimeplugin
import os
class NewPluginCommand(sublimeplugin.WindowCommand):
def run(self, window, args):
view = window.newFile()
path = sublime.packagesPath() + u"/user"
try:
os.chdir(path)
except Exception:
pass
view.options().set("syntax", "Packages/Python/Python.tmLanguage")
template = """import sublime, sublimeplugin
# This simple plugin will add 'Hello, World!' to the end of the buffer when run.
# To run it, save it within the User/ directory, then open the console (Ctrl+~),
# and type: view.runCommand('sample')
#
# See http://www.sublimetext.com/docs/plugin-basics for more information
class SampleCommand(sublimeplugin.TextCommand):
def run(self, view, args):
view.insert(view.size(), "Hello, World!\\n")
"""
view.insert(0, template)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import math
def main():
limit = 999
print(sumOfMultiples(3, limit) + sumOfMultiples(5, limit) - sumOfMultiples(15, limit))
def sumOfMultiples(n, max):
return n * (math.floor(max / n) * (math.floor(max / n) + 1)) / 2
if __name__ == "__main__": main()
|
nilq/baby-python
|
python
|
from .orders import Order
from .customers import Customer
from .products import Product
from .line_items import LineItem
from .lot_code import LotCode
from .warehouse import Warehouse
from .location import Location
from .inventories import Inventory
from .inventory_adjustments import InventoryAdjustment
from .inventory_adjustment_logs import InventoryAdjustmentLog
from .receipt import Receipt
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from flask import Flask
from flask import abort
from flask import make_response
from flask import render_template
from flask import request
import sleekxmpp
app = Flask(__name__)
app.config.from_envvar("XMPP_CHAT_BADGE_CONFIG")
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class MUCBot(sleekxmpp.ClientXMPP):
""" """
def __init__(self, jid, password, nick):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.nick = nick
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.get_roster()
self.send_presence()
def get_number_of_occupants(self, room):
querying_jid = '{}/{}'.format(room, self.nick)
try:
result = self.plugin['xep_0030'].get_info(
jid=room,
node=None,
cached=True,
ifrom=querying_jid,
block=True,
timeout=10
)
except sleekxmpp.exceptions.IqError:
return None
fields = result.xml.find(
'{http://jabber.org/protocol/disco#info}query').find(
'{jabber:x:data}x').findall(
'{jabber:x:data}field')
for field in fields:
if field.get('var') == 'muc#roominfo_occupants':
return field.find('{jabber:x:data}value').text
return None
def initBOT(jid, password, nick):
# Set up the MUCBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp_client = MUCBot(jid, password, nick)
xmpp_client.register_plugin('xep_0030') # Service Discovery
return xmpp_client
bot = initBOT(app.config['JID'], app.config['PASSWORD'], app.config['NICK'])
bot.connect()
bot.process(block=False)
@app.route("/badge.svg")
def hello():
room = request.args.get('room')
if room is None:
return abort(400)
number = bot.get_number_of_occupants(room)
svg = render_template('badge.svg', number=number)
response = make_response(svg)
response.content_type = 'image/svg+xml'
response.cache_control.max_age = 60
return response
|
nilq/baby-python
|
python
|
import chess
from datetime import datetime
from tqdm import tqdm
from os import getcwd
from utils.trans_table_utils import *
from utils.history_utils import *
from utils.heuristics import combined
from agents.alpha_beta_agent import AlphaBetaAgent
from agents.alpha_beta_agent_trans import AlphaBetaAgentTrans
from agents.combined_agent import CombinedAgent
from agents.history_agent import OrderedAgent
from agents.minimax_agent import MiniMaxAgent
from agents.pv_agent import PVAgent
from agents.random_agent import RandAgent
from agents.random_agent_trans import RandAgentTrans
from agents.history_agent_trans import OrderedAgentTrans
class ChessGame:
def __init__(self, white_agent_name, white_agent, black_agent_name, black_agent):
self.white_agent_name = white_agent_name
self.black_agent_name = black_agent_name
self.white_agent = white_agent
self.black_agent = black_agent
self.white_agent_depth = white_agent.maximum_depth if hasattr(white_agent, 'maximum_depth') else 0
self.black_agent_depth = black_agent.maximum_depth if hasattr(black_agent, 'maximum_depth') else 0
self.white_agent_num_moves = 0
self.black_agent_num_moves = 0
self.white_agent_decision_time = 0
self.black_agent_decision_time = 0
self.white_agent_result = 0
self.black_agent_result = 0
self.board = chess.Board()
def play_game(self, display=False):
while not self.board.is_game_over() or self.board.is_seventyfive_moves() or self.board.is_fivefold_repetition():
self.play_round(display=display)
result = self.board.result()
if result == '0-1':
self.white_agent_result = -1
self.black_agent_result = 1
elif result == '1-0':
self.white_agent_result = 1
self.black_agent_result = -1
return {
'white_agent_name': self.white_agent_name,
'black_agent_name': self.black_agent_name,
'white_agent_depth': str(self.white_agent_depth),
'black_agent_depth': str(self.black_agent_depth),
'white_agent_num_moves': str(self.white_agent_num_moves),
'black_agent_num_moves': str(self.black_agent_num_moves),
'white_agent_decision_time': str(self.white_agent_decision_time),
'black_agent_decision_time': str(self.black_agent_decision_time),
'white_agent_result': str(self.white_agent_result),
'black_agent_result': str(self.black_agent_result)
}
def play_round(self, display=False):
start = datetime.utcnow()
self.play_move(self.white_agent)
self.white_agent_decision_time += (datetime.utcnow() - start).total_seconds()
self.white_agent_num_moves += 1
if display:
print(self.board.unicode(borders=True))
start = datetime.utcnow()
self.play_move(self.black_agent)
self.black_agent_decision_time += (datetime.utcnow() - start).total_seconds()
self.black_agent_num_moves += 1
if display:
print(self.board.unicode(borders=True))
def play_move(self, agent):
chosen_move = agent.get_move(self.board.copy())
if chosen_move is not None:
self.board.push_uci(chosen_move.uci())
def generate_data(white_agent_name, black_agent_name, white_agent, black_agent, path, num_runs=100, display=False):
with open(path, 'w') as f:
f.write('game_number\tagent_type\tagent_color\tagent_depth\tagent_num_moves\tagent_decision_time\tgame_result\n')
for g_n in tqdm(range(num_runs)):
g = ChessGame(white_agent_name, black_agent_name, white_agent, black_agent).play_game(display=display)
f.write(str(g_n) + '\t' + g['white_agent_name'] + '\t' + 'white' + '\t' + g['white_agent_depth'] + '\t' + g['white_agent_num_moves'] + '\t' + g['white_agent_decision_time'] + '\t' + g['white_agent_result'] + '\n')
f.write(str(g_n) + '\t' + g['black_agent_name'] + '\t' + 'black' + '\t' + g['black_agent_depth'] + '\t' + g['black_agent_num_moves'] + '\t' + g['black_agent_decision_time'] + '\t' + g['black_agent_result'] + '\n')
# TODO: This is stupid hard-coded. Remove this you dummies. Love you
write_trans_table(black_agent.trans_table, getcwd() + '/data/history_agent/trans_table.pickle')
write_history_table(black_agent)
def main():
# Base
# generate_data('random', RandAgent(chess.WHITE), 'random', RandAgent(chess.BLACK), getcwd()[:-5] + 'data/RvR.csv')
# generate_data('random', RandAgent(chess.WHITE), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/RvA2.csv')
# generate_data('minimax2', MiniMaxAgent(chess.WHITE, combined, 2), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/M2vA2.csv')
# generate_data('alphabeta2', AlphaBetaAgent(chess.WHITE, combined, 2), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/A2vA2.csv')
# Transposition Tables
# generate_data('alphabeta2', AlphaBetaAgent(chess.WHITE, combined, 2), 'alphabeta2_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/A2vA2T_1.csv', 300)
# History tables
# generate_data('history2', OrderedAgent(chess.WHITE, combined, 2), 'history2', OrderedAgent(chess.BLACK, combined, 2, True), getcwd()[:-5] + 'data/H2vH2.csv')
# generate_data('pv2', PVAgent(chess.WHITE, combined, 2), 'pv2', PVAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/P2vP2.csv')
# generate_data('combined2', CombinedAgent(chess.WHITE, combined, 2), 'combined2', CombinedAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/C2vC2.csv')
# Depth
# generate_data('alphabeta1', AlphaBetaAgent(chess.WHITE, combined, 1), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/A1vA2.csv')
# generate_data('alphabeta1', AlphaBetaAgent(chess.WHITE, combined, 1), 'alphabeta3', AlphaBetaAgent(chess.BLACK, combined, 3), getcwd()[:-5] + 'data/A1vA3.csv')
# generate_data('alphabeta2', AlphaBetaAgent(chess.WHITE, combined, 2), 'alphabeta3', AlphaBetaAgent(chess.BLACK, combined, 3), getcwd()[:-5] + 'data/A2vA3.csv')
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_1', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_2', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_3', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_4', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_5', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_6', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_7', 300)
#
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_1', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_2', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_3', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_4', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_5', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_6', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_7', 300)
agent1, agent2 = [OrderedAgent(chess.WHITE, combined, 2), OrderedAgentTrans(chess.BLACK, combined, 3)]
generate_data('ordered_history2', agent1, 'ordered_history2_trans', agent2, getcwd()[:-5] + 'data/H2vHT2.csv', 1, display=True)
write_trans_table(agent2.trans_table, getcwd()[:-5] + 'data/history_agent/trans_table.pickle')
write_history_table(agent2)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
The galvo voltage control UI
Aditya Venkatramani 04/21 --> Adapted from zStage.py
"""
import os
from PyQt5 import QtCore, QtGui, QtWidgets
import storm_control.sc_library.parameters as params
import storm_control.hal4000.halLib.halDialog as halDialog
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.qtdesigner.galvo1D_ui as galvoUi
class GalvoView(halDialog.HalDialog):
"""
Manages the galvo1D GUI.
"""
def __init__(self, configuration = None, **kwds):
super().__init__(**kwds)
self.parameters = params.StormXMLObject()
self.galvo_fn = None
# Load UI
self.ui = galvoUi.Ui_Dialog()
self.ui.setupUi(self)
icon_path = os.path.join(os.path.dirname(__file__),"../icons/")
self.ui.upLButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "2uparrow-128.png")))
self.ui.upLButton.clicked.connect(self.handleUpLButton)
self.ui.upSButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "1uparrow-128.png")))
self.ui.upSButton.clicked.connect(self.handleUpSButton)
self.ui.downSButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "1downarrow-128.png")))
self.ui.downSButton.clicked.connect(self.handleDownSButton)
self.ui.downLButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "2downarrow-128.png")))
self.ui.downLButton.clicked.connect(self.handleDownLButton)
self.ui.zeroButton.clicked.connect(self.handleZeroButton)
self.ui.goButton.clicked.connect(self.handleGoButton)
# Set to minimum size & fix.
self.adjustSize()
self.setFixedSize(self.width(), self.height())
# Add parameters.
self.parameters.add(params.ParameterRangeFloat(description ="Galvo large step size",
name = "volt_large_step",
value = configuration.get("large_step"),
min_value = 0.0,
max_value = 1000.0))
self.parameters.add(params.ParameterRangeFloat(description ="Galvo small step size",
name = "volt_small_step",
value = configuration.get("small_step"),
min_value = 0.0,
max_value = 1000.0))
#self.setEnabled(False)
def getParameters(self):
return self.parameters
def handleDownLButton(self, boolean):
self.galvo_fn.goRelative(-1.0*self.parameters.get("volt_large_step"))
def handleDownSButton(self, boolean):
self.galvo_fn.goRelative(-1.0*self.parameters.get("volt_small_step"))
def handleGoButton(self, boolean):
self.galvo_fn.goAbsolute(self.ui.goSpinBox.value())
def handleUpLButton(self, boolean):
self.galvo_fn.goRelative(self.parameters.get("volt_large_step"))
def handleUpSButton(self, boolean):
self.galvo_fn.goRelative(self.parameters.get("volt_small_step"))
def handleZeroButton(self, boolean):
self.galvo_fn.zero()
def handleGalvoVoltage(self, volt):
self.ui.galvoVoltLabel.setText("{0:.2f}".format(volt))
def newParameters(self, parameters):
self.parameters.setv("volt_large_step", parameters.get("volt_large_step"))
self.parameters.setv("volt_small_step", parameters.get("volt_small_step"))
def setFunctionality(self, galvo_fn):
self.galvo_fn = galvo_fn
self.galvo_fn.galvoVoltage.connect(self.handleGalvoVoltage)
self.galvo_fn.zero()
class Galvo(halModule.HalModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.configuration = module_params.get("configuration")
self.view = GalvoView(module_name = self.module_name,
configuration = module_params.get("configuration"))
self.view.halDialogInit(qt_settings,
module_params.get("setup_name") + " galvo")
def cleanUp(self, qt_settings):
self.view.cleanUp(qt_settings)
def handleResponse(self, message, response):
if message.isType("get functionality"):
self.view.setFunctionality(response.getData()["functionality"])
def processMessage(self, message):
if message.isType("configure1"):
self.sendMessage(halMessage.HalMessage(m_type = "add to menu",
data = {"item name" : "Galvo",
"item data" : "galvoview"}))
self.sendMessage(halMessage.HalMessage(m_type = "get functionality",
data = {"name" : self.configuration.get("galvo_fn")}))
self.sendMessage(halMessage.HalMessage(m_type = "initial parameters",
data = {"parameters" : self.view.getParameters()}))
elif message.isType("new parameters"):
p = message.getData()["parameters"]
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"old parameters" : self.view.getParameters().copy()}))
self.view.newParameters(p.get(self.module_name))
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"new parameters" : self.view.getParameters()}))
elif message.isType("show"):
if (message.getData()["show"] == "galvoview"):
self.view.show()
elif message.isType("start"):
if message.getData()["show_gui"]:
self.view.showIfVisible()
|
nilq/baby-python
|
python
|
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 019:
Um professor quer sortear um dos seus quatro alunos para apagar
o quadro. Faça um um programa que ajude ele, lendo o nome deles e
escrevendo o nome escolhido.
''')
from random import choice
alunos = []
alunos.append(input('Digite o nome do primeiro aluno: '))
alunos.append(input('Digite o nome do segundo aluno: '))
alunos.append(input('Digite o nome do terceiro aluno: '))
alunos.append(input('Digite o nome do quarto aluno: '))
print('O aluno escolhido é {}'.format(choice(alunos)))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import pytest
import sklearn.datasets as datasets
import sklearn.neural_network as nn
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestNeuralNtwork(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.neural_network.BernoulliRBM, nn.BernoulliRBM)
self.assertIs(df.neural_network.MLPClassifier, nn.MLPClassifier)
self.assertIs(df.neural_network.MLPRegressor, nn.MLPRegressor)
@pytest.mark.parametrize("algo", ['BernoulliRBM'])
def test_RBM(self, algo):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
mod1 = getattr(df.neural_network, algo)(random_state=self.random_state)
mod2 = getattr(nn, algo)(random_state=self.random_state)
df.fit(mod1)
mod2.fit(digits.data, digits.target)
result = df.transform(mod1)
expected = mod2.transform(digits.data)
self.assertIsInstance(result, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result.data.values, expected)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Find the water-land threshold in an image (for example the band 7 of
a WorldView multispectral image by computing a kernel-density
estimate using Gaussian kernels. A good threshold is usually the
first minimum of this estimate.
This tool needs python 3, numpy, scipy, matplotlib, and osgeo.
'''
import sys, time, math, argparse
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
from osgeo import gdal
from scipy.signal import argrelextrema
# Try to use sklearn as well, gives very similar results in very similar time.
# Install this with:
# conda install -c conda-forge scikit-learn
use_sklearn = False # off by default
if use_sklearn:
from sklearn.neighbors import KernelDensity
usage = "python bathy_threshold_calc.py --image <image> --num-samples <num>."
parser = argparse.ArgumentParser(usage=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--image', dest = 'image', default = "",
help='The single-channel image to use to find the water-land threshold.')
parser.add_argument("--num-samples", dest="num_samples", type=int, default = 1000000,
help="The number of samples to pick from the image (more samples " +
"will result in more accuracy but will be slower).")
parser.add_argument("--no-plot", action="store_true", default=False,
dest="no_plot", help="Do not show the plot.")
(options, args) = parser.parse_known_args(sys.argv)
if options.image == "":
parser.print_help()
sys.exit(1)
print("Image file is " + options.image)
print("Number of samples is " + str(options.num_samples))
# Try to read the file using GDAL
try:
ds = gdal.Open(options.image, gdal.GA_ReadOnly)
if ds is None:
print("Could not read the file: " + options.image)
sys.exit(1)
if ds.RasterCount != 1:
print("Expecting one band in " + options.image + ", but got instead: " +
str(ds.RasterCount) + ".")
sys.exit(1)
rb = ds.GetRasterBand(1)
image = rb.ReadAsArray()
except Exception as err:
print("Could not read the file: " + options.image)
print("It must exist and be a single-band TIF file.")
sys.exit(1)
num_rows = image.shape[0]
num_cols = image.shape[1]
if num_rows <= 0 or num_cols <= 0:
print("Expecting an image with positive dimensions")
sys.exit(1)
num_vals = num_rows * num_cols
samp_ratio = math.sqrt( float(num_vals) / float(options.num_samples) )
num_sub_rows = round(num_rows / samp_ratio)
if num_sub_rows < 1:
num_sub_rows = 1
if num_sub_rows > num_rows:
num_sub_rows = num_rows
num_sub_cols = round(num_cols / samp_ratio)
if num_sub_cols < 1:
num_sub_cols = 1
if num_sub_cols > num_cols:
num_sub_cols = num_cols
print("Number of image rows and columns: " + str(num_rows) + ", " + str(num_cols))
print("Picking a uniform sample of dimensions " + str(num_sub_rows) + ", " + str(num_sub_cols))
print("Please be patient. It make take several minutes to find the answer.")
# Subsample uniformly the image
sub_rows = np.round(np.array(range(num_sub_rows)) * float(num_rows - 1)/float(num_sub_rows - 1))
sub_cols = np.round(np.array(range(num_sub_cols)) * float(num_cols - 1)/float(num_sub_cols - 1))
sub_rows = sub_rows.astype(int)
sub_cols = sub_cols.astype(int)
sub_image = image[sub_rows, :][:, sub_cols]
# Make it into an array
data = sub_image.reshape(-1)
xvals = np.linspace(data.min(), data.max(), 1000)
beg = time.time()
kde = st.gaussian_kde(data)
yvals = kde(xvals)
min_pos = argrelextrema(yvals, np.less); min_vals = xvals[min_pos]
end = time.time()
# Note that it is not universal for it to be first minimum. Sometimes
# the second minimum is better!
print("Positions of the minima: ", min_vals)
print("Suggested threshold is the position of the first minimum: ", min_vals[0])
print("Please verify with the graph. There is a chance subsequent minima may work better.")
print("Elapsed time in seconds:", round(10.0*(end - beg))/10.0)
# sklearn, with similar results
if use_sklearn:
beg2 = time.time()
kernel = 'gaussian'
kde2 = KernelDensity(kernel = kernel, bandwidth = 10).fit(data[:, np.newaxis])
log_dens = kde2.score_samples(xvals[:, np.newaxis])
yvals2 = np.exp(log_dens).reshape(-1)
min_pos2 = argrelextrema(yvals2, np.less); min_vals2 = xvals[min_pos2]
end2 = time.time()
print("Elapsed time for sklearn kernel estimation in seconds:", round(10.0*(end2 - beg2))/10.0)
print("Suggested threshold is the position of the first minimum2: ", min_vals2[0])
print("Positions of the minima2: ", min_vals2)
# Plot the kernel-density estimate and highlight the minima
if not options.no_plot:
plt.figure(1)
plt.hist(data, bins=100, density=True, label="Data histogram")
plt.plot(xvals, yvals, label="KDE", c="red")
plt.vlines(min_vals, ymin=0, ymax=yvals.max(),colors='g', ls="--", label="Minima", alpha=0.7)
if use_sklearn:
plt.plot(xvals, yvals2, color = 'green', lw = 2,
linestyle='-', label="kernel = '{0}'".format(kernel))
plt.legend()
plt.show()
|
nilq/baby-python
|
python
|
""" Tests for the main server file. """
from unittest import TestCase
from unittest.mock import patch
from app import views
class ViewsTestCase(TestCase):
""" Our main server testcase. """
def test_ping(self):
self.assertEqual(views.ping(None, None), 'pong')
@patch('app.views.notify_recipient')
@patch('app.views.is_valid_pull_request')
def test_valid_pull_request(self, validator, notifier):
""" Should notify upon a valid pull request. """
validator.return_value = True
notifier.return_value = True
result = views.pull_request({}, None)
self.assertEqual(result, 'Recipient Notified')
@patch('app.views.is_valid_pull_request')
def test_invalid_pull_request(self, validator):
""" Should ignore an invalid pull request. """
validator.return_value = False
result = views.pull_request({}, None)
self.assertRegex(result, 'ignored')
|
nilq/baby-python
|
python
|
#
__doc__ = """
Schema for test/simulator configuration file.
TODO:
- Somehow, validation of test config doesn't work correctly. Only type conversion works.
"""
from configobj import ConfigObj, flatten_errors
from validate import Validator, ValidateError, VdtTypeError
import os
from StringIO import StringIO
import mproboenv
from environ import EnvFileLoc, EnvFileLoc, EnvTestcfgSection, EnvTestcfgOption, EnvTestcfgPort, EnvSimcfg, EnvPortName
from dave.common.misc import get_abspath, from_engr, force_list, str2num
from dave.common.davelogger import DaVELogger
import dave.mprobo.mchkmsg as mcode
class SchemaConfig(object):
def __init__(self, configobj, configspecfile, config_type, logger_id='logger_id'):
self._logger = DaVELogger.get_logger('%s.%s.%s' % (logger_id, __name__, self.__class__.__name__)) # logger
self.cfgtype = config_type
configspec = ConfigObj(infile=configspecfile, interpolation=False, list_values=False)
vfile = StringIO()
configobj.write(vfile)
self.config = ConfigObj(vfile.getvalue().splitlines(), configspec=configspec)
vfile.close()
def _validate(self, custom_check = {}):
self.vtor = Validator(custom_check)
results = self.config.validate(self.vtor) # this will always not be True
return flatten_errors(self.config, results)
def _output_vdterror(self, error_key):
for (section_list, key, _) in self.vdt_errors:
if key is None:
pass
#print 'The following sections "%s" is(are) missing in the %s configuration' % ('.'.join(section_list), self.cfgtype)
else:
msg = mcode.ERR_011 % (key, ','.join(section_list))
if key in error_key:
raise ValidateError(msg)
else:
print '[Warning]' + msg
def get_cfg(self):
''' get validated ConfigObj '''
return self.config
class SchemaSimulatorConfig(SchemaConfig):
def __init__(self, configobj, is_goldenonly=False, logger_id='logger_id'):
self._tenvf = EnvFileLoc()
self._tenvsc = EnvSimcfg()
self._schema_filename = mproboenv.get_simcfg()
SchemaConfig.__init__(self, configobj, self._schema_filename, 'simulator', logger_id)
self.vdt_errors = self._validate()
self._run_custom_check(is_goldenonly)
def raise_vdterror(self):
self._output_vdterror([self._tenvsc.model, self._tenvsc.simulator])
def _run_custom_check(self, is_goldenonly):
models = [self._tenvsc.golden] + [] if is_goldenonly else [self._tenvsc.revised]
for x in models:
self.config[x] = self._chk_circuit_subsection(self.config[x])
self.config[x] = self._chk_ams_control(self.config[x])
self.config[x] = self._chk_hdl_files(self.config[x])
def _chk_ams_control(self, section):
if section[self._tenvsc.ams_control_file] == '':
del section[self._tenvsc.ams_control_file]
return section
assert section[self._tenvsc.model] == self._tenvsc.model_ams, '"%s" is valid only for model="%s"' % (self._tenvsc.ams_control_file, self._tenvsc.model_ams)
v = section[self._tenvsc.ams_control_file]
assert type(v)==str, mcode.ERR_012 % (v, self._tenvsc.ams_control_file)
fname = get_abspath(v, do_assert=False, logger=self._logger)
#assert os.path.isfile(fname), mcode.ERR_013 % v
section[self._tenvsc.ams_control_file]=fname
return section
def _chk_circuit_subsection(self, section):
''' circuit subsection is not validated with schema.
Rather, it is separately validated because it depends on 'model' '''
if section[self._tenvsc.circuit] == {}:
del section[self._tenvsc.circuit]
return section
assert section[self._tenvsc.model] == self._tenvsc.model_ams, mcode.ERR_014 % self._tenvsc.model_ams
for k,v in section[self._tenvsc.circuit].items():
assert type(v)==str, mcode.ERR_015 % (v,k)
fname = get_abspath(v, do_assert=False, logger=self._logger)
#assert os.path.isfile(fname), mcode.ERR_016 % v
section[self._tenvsc.circuit][k]=fname
return section
def _chk_hdl_files(self, section):
''' check hdl files exist and update path'''
if section[self._tenvsc.hdl_files] == ['']:
section[self._tenvsc.hdl_files] = []
for idx, f in enumerate(section[self._tenvsc.hdl_files]):
assert type(f)==str, mcode.ERR_017 % self._tenvsc.hdl_files
fname = get_abspath(f, do_assert=False, logger=self._logger)
#assert os.path.isfile(fname), mcode.ERR_018 % f
section[self._tenvsc.hdl_files][idx] = fname
return section
#--------------------------------------------------------------
def _chk_engrtime(value):
''' Check if value is time in engr notation like 11ns, 5fs, etc. '''
time_suffix = 's'
if not isinstance(value,str) or value[-1] != time_suffix or from_engr(value[:-1]) == None:
raise VdtTypeError(value)
return value
def _chk_verilogtime(value):
''' Check if value is Verilog timescale format like 1fs, 10fs, 100fs, ... '''
check_engrtime(value)
if value[0] == '1' and all(x is '0' for x in value[1:]):
return value
else:
raise VdtValueError(value)
class SchemaTestConfig(SchemaConfig):
#(_pkg_module_root_dir,dummy_filename) = os.path.split(os.path.abspath(__file__))
def __init__(self, configobj, logger_id='logger_id'):
self._tenvf = EnvFileLoc()
self._tenvs = EnvTestcfgSection()
self._tenvr = EnvTestcfgOption()
self._tenvtp = EnvTestcfgPort()
self._tenvp = EnvPortName()
self._schema_filename = mproboenv.get_testcfg()
SchemaConfig.__init__(self, configobj, self._schema_filename, 'test', logger_id)
self.vdt_errors = self._validate({
'time_engr' : _chk_engrtime,
'time_verilg' : _chk_verilogtime
})
self._run_custom_check()
def raise_vdterror(self):
self._output_vdterror([])
def _run_custom_check(self):
for t in self.config.keys():
self.config[t][self._tenvs.option] = self._chk_regress(self.config[t][self._tenvs.option])
self.config[t][self._tenvs.port] = self._chk_port(self.config[t][self._tenvs.port])
def _chk_regress(self, section):
''' do_not_progress subsection under regression section
it takes/returns the whole regress section
'''
if self._tenvr.regression_do_not_regress not in section.keys():
return section
section[self._tenvr.regression_do_not_regress] = dict([(k,force_list(v)) for k,v in section[self._tenvr.regression_do_not_regress].items()])
return section
def _chk_port(self, section):
''' prohibited, default_value '''
for k,v in section.items():
section[k][self._tenvtp.default_value] = self._chk_port_default(section[k])
#TODO: validate prohibited
#try:
# section[k][self._tenvtp.prohibited] = self._chk_port_prohibited(section[k])
#except:
# pass
return section
def _chk_port_default(self, port):
ana_port = [self._tenvp.AnalogInput, self._tenvp.AnalogOutput]
dtype = float if port[self._tenvtp.port_type] in ana_port else int
return str2num(port[self._tenvtp.default_value], dtype)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Progress component
"""
from bowtie._component import Component
class Progress(Component):
"""This component is used by all visual components and
is not meant to be used alone.
By default, it is not visible.
It is an opt-in feature and you can happily use Bowtie
without using the progress indicators at all.
It is useful for indicating progress to the user for long-running processes.
It can be accessed through the ``.progress`` accessor.
Examples
--------
>>> plotly = Plotly()
>>> def callback(x):
>>> plotly.progress.do_visible(True)
>>> plotly.progress.do_percent(0)
>>> compute1()
>>> plotly.progress.do_inc(50)
>>> compute2()
>>> plotly.progress.do_visible(False)
"""
_TEMPLATE = 'progress.jsx'
_COMPONENT = 'CProgress'
_PACKAGE = None
_TAG = ('<CProgress '
'socket={{socket}} '
'uuid={{{uuid}}} '
'>')
def _instantiate(self):
return self._TAG.format(
uuid="'{}'".format(self._uuid)
)
# pylint: disable=no-self-use
def do_percent(self, percent):
"""Set the percentage of the progress.
Parameters
----------
percent : number
Sets the progress to this percentage.
Returns
-------
None
"""
return percent
def do_inc(self, inc):
"""Increments the progress indicator.
Parameters
----------
inc : number
Value to increment the progress.
Returns
-------
None
"""
return inc
def do_visible(self, visible):
"""Hides and shows the progress indicator.
Parameters
----------
visible : bool
If ``True`` shows the progress indicator
otherwise it is hidden.
Returns
-------
None
"""
return visible
def do_active(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
def do_success(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
def do_error(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
|
nilq/baby-python
|
python
|
from rest_framework.test import APITestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from requests.auth import HTTPBasicAuth
from django.conf import settings
class JWTViewsTestCase(APITestCase):
def test_fails_when_logged_out(self):
self.client.logout()
response = self.client.post(reverse('auth-api-token-session'), {})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data["detail"], "Authentication credentials were not provided.")
def test_fails_with_non_session_authentication(self):
# Will try HTTP Basic Authentication, make sure that's elected in the settings
self.assertIn('rest_framework.authentication.BasicAuthentication', settings.REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'])
user = get_user_model().objects.create(username='user', password='pass')
self.client.auth = HTTPBasicAuth('user', 'pass')
response = self.client.post(reverse('auth-api-token-session'), {})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data["detail"], "Authentication credentials were not provided.")
def test_succeeds_with_session_authentication(self):
get_user_model().objects.create_user(username='user', password='pass')
self.client.login(username='user', password='pass')
response = self.client.post(reverse('auth-api-token-session'), {})
self.assertEqual(response.status_code, 200)
self.assertIn('token', response.data)
|
nilq/baby-python
|
python
|
from guhs.guhs_configuration import GuhsConfiguration
def from_guhs_configuration(configuration: GuhsConfiguration):
return {
'targets': [
{'order_id': t.order_id, 'name': t.name}
for t in configuration.targets
],
'boot_selection_timeout': configuration.boot_selection_timeout,
'default_target': str(configuration.default_target.order_id)
}
|
nilq/baby-python
|
python
|
from email.policy import default
from .base import print_done, finalize, SRC_PATH, CONFIG_PATH
import invoke
@invoke.task
def isort(context, src_path=SRC_PATH):
print('Running isort...')
context.run('isort {src_path} -m VERTICAL_HANGING_INDENT --tc'.format(src_path=src_path))
print_done(indent=4)
@invoke.task
def yapf(context, src_path=SRC_PATH, config_path=CONFIG_PATH):
print('Running yapf...')
config_file = config_path / '.style.yapf'
context.run('yapf --style="{config_file}" {src_path} -r -i'.format(src_path=src_path, config_file=config_file))
print_done(indent=4)
@invoke.task
def unify(context, src_path=SRC_PATH):
print('Running unify...')
context.run('unify {src_path} -r -i --quote "\""'.format(src_path=src_path))
print_done(indent=4)
@invoke.task(name='format', default=True, post=[isort, yapf, unify, ])
def format_task(_):
print("Running formatters...")
formatter = invoke.Collection('format')
formatter.add_task(isort, 'isort')
formatter.add_task(yapf, 'yapf')
formatter.add_task(unify, 'unify')
formatter.add_task(format_task, 'all')
|
nilq/baby-python
|
python
|
# Escreva um programa que pergunte a quantidade de Km
# percorridos por um carro alugado e a quantidade de dias pelos
# quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro
# custa R$60 por dia e R$0.15 por Km rodado.
km = float(input("Quantos km percorreu?: "))
dia = int(input("Quantos dias ele foi alugado?: "))
print("O valor a ser pago é: R${:.2f}".format(km * 0.15 + dia * 60))
|
nilq/baby-python
|
python
|
import os
# import pprint
import re
from datetime import datetime
from pathlib import Path
from nornir_napalm.plugins.tasks import napalm_get
from nornir_utils.plugins.functions import print_result
from nornir_utils.plugins.tasks.files import write_file
# from nornir_netmiko.tasks import netmiko_send_command, netmiko_send_config
from helpers import Helpers
from app.utils import write_cfg_on_db, get_last_config_for_device
from path_helper import search_configs_path
from differ import diff_get_change_state
from config import *
# nr_driver = Helpers()
drivers = Helpers(username=username, password=password)
search_configs_path = search_configs_path()
configs_folder_path = f"{Path(__file__).parent.parent}/configs"
# Get time for configs name
timestamp = datetime.now()
# The function needed replace ntp clock period on cisco switch, but he's always changing
def clear_clock_period(config: str) -> str:
# pattern for replace
pattern = r"ntp\sclock-period\s[0-9]{1,30}\n"
# Returning changed config or if this command not found return original file
return re.sub(pattern, "", str(config))
# Start process backup configs
def backup_config(task, path):
"""
This function starts to process backup config on the network devices
"""
# Get ip address in task
ipaddress = task.host.hostname
# Get Last config dict
last_config = search_configs_path.get_lats_config_for_device(ipaddress=ipaddress)
# Start task and get config on device
device_config = task.run(task=napalm_get, getters=["config"])
device_config = device_config.result["config"]["running"]
if task.host.platform == "ios" and fix_clock_period is True:
device_config = clear_clock_period(device_config)
# Open last config
if last_config is not None:
last_config = open(last_config["config_path"])
# Get candidate config from nornir tasks
candidate_config = device_config
# Get diff result state if config equals pass
result = diff_get_change_state(
config1=candidate_config, config2=last_config.read()
)
# Close last config file
last_config.close()
else:
result = False
# If configs not equals
if result is False:
# Create directory for configs
if not os.path.exists(
f"{path}/{timestamp.date()}_{timestamp.hour}-{timestamp.minute}"
):
os.mkdir(f"{path}/{timestamp.date()}_{timestamp.hour}-{timestamp.minute}")
# Startt task for write cfg file
task.run(
task=write_file,
content=device_config,
filename=f"{path}/{timestamp.date()}_{timestamp.hour}-{timestamp.minute}/{task.host.hostname}.cfg",
)
# Start process backup configs
def backup_config_on_db(task):
"""
This function starts to process backup config on the network devices
Need for work nornir task
"""
# Get ip address in task
ipaddress = task.host.hostname
# Get the latest configuration file from the database,
# needed to compare configurations
last_config = get_last_config_for_device(ipaddress=ipaddress)
# Run the task to get the configuration from the device
device_config = task.run(task=napalm_get, getters=["config"])
device_config = device_config.result["config"]["running"]
if task.host.platform == "ios" and fix_clock_period is True:
device_config = clear_clock_period(device_config)
# Open last config
if last_config is not None:
last_config = last_config["last_config"]
# Get candidate config from nornir tasks
candidate_config = device_config
# Get diff result state if config equals pass
result = diff_get_change_state(config1=candidate_config, config2=last_config)
else:
result = False
# If configs not equals
if result is False:
write_cfg_on_db(ipaddress=str(ipaddress), config=str(device_config))
def main():
"""
Main
"""
# Start process
with drivers.nornir_driver() as nr_driver:
result = nr_driver.run(
name="Backup configurations", path=configs_folder_path, task=backup_config
)
# Print task result
print_result(result, vars=["stdout"])
# if you have error uncomment this row, and you see all result
# print_result(result)
def main2():
"""
Main
"""
# Start process
with drivers.nornir_driver() as nr_driver:
result = nr_driver.run(name="Backup configurations", task=backup_config_on_db)
# Print task result
print_result(result, vars=["stdout"])
# if you have error uncomment this row, and you see all result
# print_result(result)
if __name__ == "__main__":
main2()
|
nilq/baby-python
|
python
|
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import state_changes
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
class StateTestChange(state_changes._StateChangeState):
a = 1
b = 2
c = 3
class StateMachineTest(fixtures.TestBase):
def test_single_change(self):
"""test single method that declares and invokes a state change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.b
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_single_incorrect_change(self):
"""test single method that declares a state change but changes to the
wrong state."""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.c
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method 'move_to_b\(\)' "
r"caused an unexpected state change to <StateTestChange.c: 3>",
):
m.move_to_b()
def test_single_failed_to_change(self):
"""test single method that declares a state change but didn't do
the change."""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
pass
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method 'move_to_b\(\)' failed to change state "
"to <StateTestChange.b: 2> as "
"expected",
):
m.move_to_b()
def test_change_from_sub_method_with_declaration(self):
"""test successful state change by one method calling another that
does the change.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_b()
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_method_and_sub_method_no_change(self):
"""test methods that declare the state should not change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def _inner_do_nothing(self):
pass
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def do_nothing(self):
self._inner_do_nothing()
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
m.do_nothing()
eq_(m._state, StateTestChange.a)
def test_method_w_no_change_illegal_inner_change(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.c
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def do_nothing(self):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method '_inner_move_to_c\(\)' can't be called here; "
r"method 'do_nothing\(\)' is already in progress and this "
r"would cause an unexpected state change to "
"<StateTestChange.c: 3>",
):
m.do_nothing()
eq_(m._state, StateTestChange.a)
def test_change_from_method_sub_w_no_change(self):
"""test methods that declare the state should not change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def _inner_do_nothing(self):
pass
@state_changes._StateChange.declare_states(
(StateTestChange.a,), StateTestChange.b
)
def move_to_b(self):
self._inner_do_nothing()
self._state = StateTestChange.b
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_invalid_change_from_declared_sub_method_with_declaration(self):
"""A method uses _expect_state() to call a sub-method, which must
declare that state as its destination if no exceptions are raised.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
# method declares StateTestChange.c so can't be called under
# expect_state(StateTestChange.b)
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.c
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Cant run operation '_inner_move_to_c\(\)' here; will move "
r"to state <StateTestChange.c: 3> where we are "
"expecting <StateTestChange.b: 2>",
):
m.move_to_b()
def test_invalid_change_from_invalid_sub_method_with_declaration(self):
"""A method uses _expect_state() to call a sub-method, which must
declare that state as its destination if no exceptions are raised.
Test an error is raised if the sub-method doesn't change to the
correct state.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
# method declares StateTestChange.b, but is doing the wrong
# change, so should fail under expect_state(StateTestChange.b)
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"While method 'move_to_b\(\)' was running, method "
r"'_inner_move_to_c\(\)' caused an unexpected state change "
"to <StateTestChange.c: 3>",
):
m.move_to_b()
def test_invalid_prereq_state(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.c,), "d"
)
def move_to_d(self):
self._state = "d"
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Can't run operation 'move_to_d\(\)' when "
"Session is in state <StateTestChange.b: 2>",
):
m.move_to_d()
def test_declare_only(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._move_to_b()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
AssertionError,
"Unexpected call to _expect_state outside of "
"state-changing method",
):
m.move_to_b()
def test_sibling_calls_maintain_correct_state(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, StateTestChange.c
)
def move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, _NO_CHANGE
)
def do_nothing(self):
pass
m = Machine()
m.do_nothing()
eq_(m._state, _NO_CHANGE)
m.move_to_c()
eq_(m._state, StateTestChange.c)
def test_change_from_sub_method_requires_declaration(self):
"""A method can't call another state-changing method without using
_expect_state() to allow the state change to occur.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._inner_move_to_b()
m = Machine()
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method '_inner_move_to_b\(\)' can't be called here; "
r"method 'move_to_b\(\)' is already in progress and this would "
r"cause an unexpected state change to <StateTestChange.b: 2>",
):
m.move_to_b()
|
nilq/baby-python
|
python
|
import operator
import rules
from rules.predicates import is_authenticated
from marketplace.domain import marketplace
rules.add_perm('user.is_same_user', operator.eq)
rules.add_perm('user.is_authenticated', is_authenticated)
rules.add_rule('user.is_site_staff', marketplace.user.is_site_staff)
rules.add_rule('volunteer.new_user_review', marketplace.user.is_site_staff)
|
nilq/baby-python
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test Omniglot dataset operators
"""
import mindspore.dataset as ds
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testOmniglot"
def test_omniglot_basic():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case basic")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
BASIC_EXPECTED_SHAPE = {"82386": 1, "61235": 1, "159109": 2}
ACTUAL_SHAPE = {"82386": 0, "61235": 0, "159109": 0}
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
ACTUAL_SHAPE[str(item["image"].shape[0])] += 1
count[item["label"]] += 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
assert count == [2, 2, 0, 0]
assert ACTUAL_SHAPE == BASIC_EXPECTED_SHAPE
def test_omniglot_num_samples():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case numSamples")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_samples=8, num_parallel_workers=2)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
random_sampler = ds.RandomSampler(num_samples=3, replacement=True)
data1 = ds.OmniglotDataset(DATA_DIR,
num_parallel_workers=2,
sampler=random_sampler)
num_iter = 0
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
assert num_iter == 3
random_sampler = ds.RandomSampler(num_samples=3, replacement=False)
data1 = ds.OmniglotDataset(DATA_DIR,
num_parallel_workers=2,
sampler=random_sampler)
num_iter = 0
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
assert num_iter == 3
def test_omniglot_num_shards():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case numShards")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_shards=4, shard_id=2)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == 82386
assert item["label"] == 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 1
def test_omniglot_shard_id():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case withShardID")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_shards=4, shard_id=1)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == 159109
assert item["label"] == 0
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 1
def test_omniglot_no_shuffle():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case noShuffle")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, shuffle=False)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
SHAPE = [159109, 159109, 82386, 61235]
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == SHAPE[num_iter]
count[item["label"]] += 1
num_iter += 1
assert num_iter == 4
assert count == [2, 2, 0, 0]
def test_omniglot_extra_shuffle():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case extraShuffle")
# define parameters.
repeat_count = 2
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, shuffle=True)
data1 = data1.shuffle(buffer_size=5)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
EXPECTED_SHAPE = {"82386": 2, "61235": 2, "159109": 4}
ACTUAL_SHAPE = {"82386": 0, "61235": 0, "159109": 0}
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
ACTUAL_SHAPE[str(item["image"].shape[0])] += 1
count[item["label"]] += 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 8
assert count == [4, 4, 0, 0]
assert ACTUAL_SHAPE == EXPECTED_SHAPE
def test_omniglot_decode():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case decode")
# define parameters.
repeat_count = 1
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, decode=True)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
def test_sequential_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case SequentialSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.SequentialSampler(num_samples=8)
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data_seq = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
SHAPE = [159109, 159109, 82386, 61235]
# each data is a dictionary.
for item in data_seq.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == SHAPE[num_iter]
count[item["label"]] += 1
num_iter += 1
assert num_iter == 4
assert count == [2, 2, 0, 0]
def test_random_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case RandomSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.RandomSampler()
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(repeat_count)
num_iter = 0
count = [0, 0, 0, 0]
RANDOM_EXPECTED_SHAPE = {"82386": 1, "61235": 1, "159109": 2}
ACTUAL_SHAPE = {"82386": 0, "61235": 0, "159109": 0}
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
ACTUAL_SHAPE[str(item["image"].shape[0])] += 1
count[item["label"]] += 1
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
assert count == [2, 2, 0, 0]
assert ACTUAL_SHAPE == RANDOM_EXPECTED_SHAPE
def test_distributed_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case DistributedSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.DistributedSampler(4, 1)
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
# in this example, each dictionary has keys "image" and "label".
assert item["image"].shape[0] == 159109
assert item["label"] == 0
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 1
def test_pk_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case PKSampler")
# define parameters.
repeat_count = 1
# apply dataset operations.
sampler = ds.PKSampler(1)
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(repeat_count)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 2
def test_chained_sampler():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info(
"Test Case Chained Sampler - Random and Sequential, with repeat")
# Create chained sampler, random and sequential.
sampler = ds.RandomSampler()
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create OmniglotDataset with sampler.
data1 = ds.OmniglotDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(count=3)
# Verify dataset size.
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 12
# Verify number of iterations.
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 12
def test_omniglot_evaluation():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case usage")
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, background=False, num_samples=6)
num_iter = 0
# each data is a dictionary.
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
def test_omniglot_zip():
"""
Feature: load_omniglot.
Description: load OmniglotDataset.
Expectation: get data of OmniglotDataset.
"""
logger.info("Test Case zip")
# define parameters.
repeat_count = 2
# apply dataset operations.
data1 = ds.OmniglotDataset(DATA_DIR, num_samples=8)
data2 = ds.OmniglotDataset(DATA_DIR, num_samples=8)
data1 = data1.repeat(repeat_count)
# rename dataset2 for no conflict.
data2 = data2.rename(input_columns=["image", "label"],
output_columns=["image1", "label1"])
data3 = ds.zip((data1, data2))
num_iter = 0
# each data is a dictionary.
for _ in data3.create_dict_iterator(num_epochs=1, output_numpy=True):
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 4
def test_omniglot_exception():
"""
Feature: test_omniglot_exception.
Description: test error cases for OmniglotDataset.
Expectation: raise exception.
"""
logger.info("Test omniglot exception")
def exception_func(item):
raise Exception("Error occur!")
def exception_func2(image, label):
raise Exception("Error occur!")
try:
data = ds.OmniglotDataset(DATA_DIR)
data = data.map(operations=exception_func,
input_columns=["image"],
num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(
e)
try:
data = ds.OmniglotDataset(DATA_DIR)
data = data.map(operations=exception_func2,
input_columns=["image", "label"],
output_columns=["image", "label", "label1"],
column_order=["image", "label", "label1"],
num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
try:
data = ds.OmniglotDataset(DATA_DIR)
data = data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
for _ in data.__iter__():
pass
assert False
except RuntimeError as e:
assert "map operation: [PyFunc] failed. The corresponding data files" in str(e)
if __name__ == '__main__':
test_omniglot_basic()
test_omniglot_num_samples()
test_sequential_sampler()
test_random_sampler()
test_distributed_sampler()
test_chained_sampler()
test_pk_sampler()
test_omniglot_num_shards()
test_omniglot_shard_id()
test_omniglot_no_shuffle()
test_omniglot_extra_shuffle()
test_omniglot_decode()
test_omniglot_evaluation()
test_omniglot_zip()
test_omniglot_exception()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
This module contains the Parameters class that is used to specify the input parameters of the tree.
"""
import numpy as np
class Parameters():
"""Class to specify the parameters of the fractal tree.
Attributes:
meshfile (str): path and filename to obj file name.
filename (str): name of the output files.
init_node (numpy array): the first node of the tree.
second_node (numpy array): this point is only used to calculate the initial direction of the tree and is not included in the tree. Please avoid selecting nodes that are connected to the init_node by a single edge in the mesh, because it causes numerical issues.
init_length (float): length of the first branch.
N_it (int): number of generations of branches.
length (float): average lenght of the branches in the tree.
std_length (float): standard deviation of the length. Set to zero to avoid random lengths.
min_length (float): minimum length of the branches. To avoid randomly generated negative lengths.
branch_angle (float): angle with respect to the direction of the previous branch and the new branch.
w (float): repulsivity parameter.
l_segment (float): length of the segments that compose one branch (approximately, because the lenght of the branch is random). It can be interpreted as the element length in a finite element mesh.
Fascicles (bool): include one or more straigth branches with different lengths and angles from the initial branch. It is motivated by the fascicles of the left ventricle.
fascicles_angles (list): angles with respect to the initial branches of the fascicles. Include one per fascicle to include.
fascicles_length (list): length of the fascicles. Include one per fascicle to include. The size must match the size of fascicles_angles.
save (bool): save text files containing the nodes, the connectivity and end nodes of the tree.
save_paraview (bool): save a .vtu paraview file. The tvtk module must be installed.
"""
def __init__(self):
self.meshfile='sphere.obj'
self.filename='sphere-line'
self.init_node=np.array([-1.0 ,0., 0.])
self.second_node=np.array([-0.964, 0.00, 0.266 ])
self.init_length=0.5
#Number of iterations (generations of branches)
self.N_it=10
#Median length of the branches
self.length=.3
#Standard deviation of the length
self.std_length = np.sqrt(0.2)*self.length
#Min length to avoid negative length
self.min_length = self.length/10.
self.branch_angle=0.15
self.w=0.1
#Length of the segments (approximately, because the lenght of the branch is random)
self.l_segment=.01
self.Fascicles=True
###########################################
# Fascicles data
###########################################
self.fascicles_angles=[-1.5,.2] #rad
self.fascicles_length=[.5,.5]
# Save data?
self.save=True
self.save_paraview=True
|
nilq/baby-python
|
python
|
from .fixup_resnet_cifar import *
from .resnet_cifar import *
from .rezero_resnet_cifar import *
from .rezero_dpn import *
from .dpn import *
from .rezero_preact_resnet import *
from .preact_resnet import *
|
nilq/baby-python
|
python
|
import os.path
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ui.mainwindow import Ui_MainWindow
from ui.worldview import WorldView
from world import World
class PsychSimUI(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
self.world = None
super(PsychSimUI, self).__init__(parent)
self.setupUi(self)
self.scene = WorldView(self.graphicsView)
self.graphicsView.setScene(self.scene)
@pyqtSlot() # signal with no arguments
def on_actionOpen_triggered(self):
filename = QFileDialog.getOpenFileName(self,"PsychSim -- Open File")
if not filename.isEmpty():
self.openScenario(str(filename))
def openScenario(self,filename):
self.world = World(filename)
settings = QSettings()
settings.setValue('LastFile',os.path.abspath(filename))
self.scene.displayWorld(self.world)
@pyqtSlot() # signal with no arguments
def on_actionSave_triggered(self):
settings = QSettings()
filename = settings.value('LastFile').toString()
self.scene.world.save(str(filename))
self.scene.unsetDirty()
@pyqtSlot() # signal with no arguments
def on_actionQuit_triggered(self):
app.quit()
@pyqtSlot() # signal with no arguments
def on_actionAgent_triggered(self):
self.scene.colorNodes('agent')
@pyqtSlot() # signal with no arguments
def on_actionLikelihood_triggered(self):
self.scene.colorNodes('likelihood')
@pyqtSlot() # signal with no arguments
def on_actionStep_triggered(self):
self.scene.step()
def wheelEvent(self,event):
factor = 1.41**(-event.delta()/240.)
self.graphicsView.scale(factor,factor)
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('scenario',default=None,nargs='?',
help='File containing an exising PsychSim scenario')
app = QApplication(sys.argv)
app.setOrganizationName('USC ICT')
app.setOrganizationDomain('ict.usc.edu')
app.setApplicationName('PsychSim')
args = parser.parse_args(args=[str(el) for el in app.arguments()][1:])
win = PsychSimUI()
if args.scenario is None:
settings = QSettings()
filename = settings.value('LastFile').toString()
if filename and QFile.exists(filename):
win.openScenario(str(filename))
else:
win.openScenario(args.scenario)
win.show()
app.exec_()
|
nilq/baby-python
|
python
|
from torch.optim import Optimizer
class ReduceLROnLambda():
def __init__(self, optimizer, func, factor=0.1,\
verbose=False, min_lr=0, eps=1e-8):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(\
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(\
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.func = func
self.verbose = verbose
self.eps = eps
self.history_data = None
def step(self, metrics):
flag, self.history_data = self.func(metrics, self.history_data)
if flag:
self._reduce_lr()
def _reduce_lr(self):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Reducing learning rate' \
' of group {} to {:.4e}.'.format(i, new_lr))
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'func'}}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
|
nilq/baby-python
|
python
|
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = pd.read_csv(path)
# look at the first five columns
print(dataset.head())
# Check if there's any column which is not useful and remove it like the column id
dataset = dataset.drop(["Id"],1)
# check the statistical description
print(dataset.info())
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns
#number of attributes (exclude target)
#x-axis has target attribute to distinguish between classes
x = dataset["Cover_Type"]
#y-axis shows values of an attribute
y = dataset.drop(["Cover_Type"],1)
size = y.columns
#Plot violin for all attributes
for i in size:
sns.violinplot(x=x,y=y[i])
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train = dataset.iloc[:,0:10]
data_corr = subset_train.corr()
sns.heatmap(data_corr,annot=True)
correlation = list(data_corr.unstack().sort_values(kind="quicksort"))
corr_var_list = []
for i in correlation:
if abs(i)>0.5 and i!=1:
corr_var_list.append(i)
print(corr_var_list)
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X = dataset.drop(["Cover_Type"],1)
Y = dataset["Cover_Type"]
X_train,X_test,Y_train,Y_test = cross_validation.train_test_split(X,Y,test_size=0.2,random_state=0)
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
#Standardized
scaler = StandardScaler()
#Apply transform only for continuous data
X_train_temp = scaler.fit_transform(X_train.iloc[:,0:53])
X_test_temp = scaler.fit_transform(X_test.iloc[:,0:53])
#Concatenate scaled continuous data and categorical
X_train1 = numpy.concatenate((X_train_temp,X_train.iloc[:,52:]),axis=1)
X_test1 = numpy.concatenate((X_test_temp,X_test.iloc[:,52:]),axis=1)
scaled_features_train_df = pd.DataFrame(X_train1)
scaled_features_train_df.columns = X_train.columns
scaled_features_train_df.index = X_train.index
scaled_features_test_df = pd.DataFrame(X_test1)
scaled_features_test_df.columns = X_test.columns
scaled_features_test_df.index = X_test.index
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
# Write your solution here:
skb = SelectPercentile(score_func=f_classif, percentile=20 )
predictors = skb.fit_transform(X_train1,Y_train)
scores = skb.scores_
Features = scaled_features_train_df.columns
dataframe = pd.DataFrame({"Features":Features,"scores":scores}).sort_values(ascending = False,by = "scores")
top_k_predictors = list(dataframe['Features'][:predictors.shape[1]])
print(top_k_predictors)
# --------------
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
clf = OneVsRestClassifier(LogisticRegression())
clf1 = OneVsRestClassifier(LogisticRegression())
model_fit_all_features = clf1.fit(X_train,Y_train)
predictions_all_features = model_fit_all_features.predict(X_test)
score_all_features = accuracy_score(Y_test,predictions_all_features)
print(score_all_features)
model_fit_top_features = clf.fit(scaled_features_train_df[top_k_predictors],Y_train)
predictions_top_features = model_fit_top_features.predict(scaled_features_test_df[top_k_predictors])
score_top_features = accuracy_score(Y_test,predictions_top_features)
print(score_top_features)
|
nilq/baby-python
|
python
|
import errno
import os
from tqdm import tqdm
from urllib.request import urlretrieve
def maybe_makedir(path: str) -> None:
try:
# Create output directory if it does not exist
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def download_file(url: str, path: str, verbose: bool = False) -> None:
if verbose:
def reporthook(t):
"""Wraps tqdm instance.
Don't forget to close() or __exit__()
the tqdm instance once you're done with it (easiest using `with` syntax).
"""
last_b = [0]
def update_to(b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=url) as t:
urlretrieve(url, path, reporthook=reporthook(t))
else:
urlretrieve(url, path)
|
nilq/baby-python
|
python
|
from typing import Union
import numpy as np
import pandas as pd
from fedot.api.api_utils.data_definition import data_strategy_selector
from fedot.core.data.data import InputData
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.core.pipelines.pipeline import Pipeline
class ApiDataHelper:
def define_data(self,
ml_task: Task,
features: Union[str, np.ndarray, pd.DataFrame, InputData, dict],
target: Union[str, np.ndarray, pd.Series] = None,
is_predict=False):
""" Prepare data for fedot pipeline composing """
try:
data = data_strategy_selector(features=features,
target=target,
ml_task=ml_task,
is_predict=is_predict)
except Exception as ex:
raise ValueError('Please specify a features as path to csv file or as Numpy array')
return data
def define_predictions(self,
task_type: TaskTypesEnum,
current_pipeline: Pipeline,
test_data: InputData):
if task_type == TaskTypesEnum.classification:
prediction = current_pipeline.predict(test_data, output_mode='labels')
output_prediction = prediction
elif task_type == TaskTypesEnum.ts_forecasting:
# Convert forecast into one-dimensional array
prediction = current_pipeline.predict(test_data)
forecast = np.ravel(np.array(prediction.predict))
prediction.predict = forecast
output_prediction = prediction
else:
prediction = current_pipeline.predict(test_data)
output_prediction = prediction
return output_prediction
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import pandas as pd
import os
import numpy as np
import SNPknock.fastphase as fp
from SNPknock import knockoffHMM
from joblib import Parallel, delayed
import utils_snpko as utils
logger = utils.logger
def make_knockoff(chromosome=None, grouped_by_chromosome=None, df_SNP=None,
df_geno_experiment=None, df_geno_ensembl=None,
SNP_to_wild_type=None, cache_dir=None, path_to_fp=None,
em_iterations=25, random_seed=123):
# assert chromosome!=None and grouped_by_chromosome!=None and df_SNP!=None
assert chromosome is not None
assert grouped_by_chromosome is not None
assert df_SNP is not None
logger.debug("################")
logger.debug("Chromosome %2d #" % chromosome)
logger.debug("################")
num_experiment_people = len(df_geno_experiment)
num_ensembl_people = len(df_geno_ensembl)
indices = grouped_by_chromosome.groups[chromosome]
df_SNP_chromo = df_SNP.iloc[indices].sort_values('chromosome_position')
SNPs_on_chromosome = df_SNP_chromo['SNP'].values
X_experiment = np.empty((num_experiment_people, len(SNPs_on_chromosome)))
X_ensembl = np.empty((num_ensembl_people, len(SNPs_on_chromosome)))
for X, df in [
(X_experiment, df_geno_experiment),
(X_ensembl, df_geno_ensembl)]:
for j, SNP in enumerate(SNPs_on_chromosome):
X[:, j] = utils.genotype_to_nonwild_type_count(
df[SNP].values, SNP_to_wild_type[SNP])
out_path = '%s/chrom_%d' % (cache_dir, chromosome)
# If all relevant files are found in cache, skip EM recomputation; otherwise,
# redo the whole thing.
target_file_suffix_list = [
'alphahat.txt', 'finallikelihoods', 'origchars', 'rhat.txt', 'thetahat.txt']
already_in_cache = True
for suffix in target_file_suffix_list:
target_path = os.path.join(
cache_dir, 'chrom_%d_%s' % (chromosome, suffix))
if not os.path.exists(target_path):
already_in_cache = False
break
if already_in_cache:
logger.debug("Found chrom %d HMM in cache" % chromosome)
else:
# Write array to file
Xfp_file = '%s/X_%d.inp' % (cache_dir, chromosome)
fp.writeX(X_ensembl, Xfp_file)
# Run fastPhase on data (which runs EM)
fp.runFastPhase(path_to_fp, Xfp_file, out_path,
K=12, numit=em_iterations)
# Read in fastPhase results (i.e., HMM parameters) from file:
r_file = out_path + "_rhat.txt"
alpha_file = out_path + "_alphahat.txt"
theta_file = out_path + "_thetahat.txt"
# Why is X_ensembl[0, :] in the function arguments below?
hmm = fp.loadFit(r_file, theta_file, alpha_file, X_ensembl[0, :])
# Actually produce the knockoffs
knockoffs = knockoffHMM(hmm["pInit"], hmm["Q"], hmm[
"pEmit"], seed=random_seed)
X_knockoffs = knockoffs.sample(X_experiment)
return(X_knockoffs, X_experiment, SNPs_on_chromosome)
def make_all_knockoffs(args):
'''
For each chromosome, independently:
Sort SNPs according to position on genome.
Train HMM parameters with EM on ENSEMBL data.
Generate knockoffs of experimentals SNP data.
For now, we ignore sex of persons, although that is
available in ENSEMBL
'''
logger.info("####################################")
logger.info("Fitting HMM and generating knockoffs")
path_to_fp = os.path.join(args.fastPHASE_path, 'fastPHASE')
if not(os.path.exists(path_to_fp)):
logger.info("Cannot find fastPHASE at %s" % path_to_fp)
raise Exception
cache_dir = os.path.join(args.working_dir, 'fastphase_cache')
utils.safe_mkdir(cache_dir)
df_geno_ensembl = pd.read_csv(os.path.join(
(args.working_dir), 'pruned_ensembl.csv'))
# SNP,wild_type,chromosome,chromosome_position
df_SNP = pd.read_csv(os.path.join(
(args.working_dir), 'pruned_SNP_facts.csv'))
df_wild = pd.read_csv(os.path.join(args.working_dir, 'wild_types.csv'))
SNP_to_wild_type = dict(
zip(df_wild['SNP'].values, df_wild['wild_type'].values))
chromosome_list = np.sort(np.unique(df_SNP['chromosome']))
for chromosome in chromosome_list:
assert chromosome in np.arange(1, 24)
df_geno_experiment = pd.read_csv(os.path.join(
(args.working_dir), 'pruned_experiment.csv'))
# Make sure we have the same SNPs everywhere.
assert (set([c for c in df_geno_ensembl.columns if c.startswith('rs')]) ==
set([c for c in df_geno_experiment.columns if c.startswith('rs')]))
for SNP in df_SNP.SNP.values:
assert SNP in df_geno_ensembl.columns
grouped_by_chromosome = df_SNP.groupby('chromosome')
num_experiment_people = len(df_geno_experiment)
knockoff_SNP_list = []
utils.safe_mkdir(os.path.join(args.working_dir, 'knockoffs'))
em_iterations = 500
logger.info('Number of EM iterations: %d' % em_iterations)
for knockoff_trial_count in xrange(args.num_knockoff_trials):
random_seed = knockoff_trial_count + args.random_seed
if ((args.num_knockoff_trials <= 20) or
knockoff_trial_count % ((args.num_knockoff_trials) // 20) == 0):
logger.info("Knockoff sampling %d of %d" % (
knockoff_trial_count, args.num_knockoff_trials))
if False:
# Serial version; code preserved for debugging purposes
for chromosome in chromosome_list:
knockoff_SNP_list.append(
make_knockoff(
chromosome=chromosome,
grouped_by_chromosome=grouped_by_chromosome, df_SNP=df_SNP,
df_geno_experiment=df_geno_experiment, df_geno_ensembl=df_geno_ensembl,
SNP_to_wild_type=SNP_to_wild_type, cache_dir=cache_dir,
path_to_fp=path_to_fp, em_iterations=em_iterations, random_seed=random_seed))
else:
knockoff_SNP_list = Parallel(n_jobs=args.num_workers)(
delayed(make_knockoff)(
chromosome=i,
grouped_by_chromosome=grouped_by_chromosome, df_SNP=df_SNP,
df_geno_experiment=df_geno_experiment, df_geno_ensembl=df_geno_ensembl,
SNP_to_wild_type=SNP_to_wild_type, cache_dir=cache_dir, path_to_fp=path_to_fp,
em_iterations=em_iterations, random_seed=random_seed)
for i in chromosome_list)
# Stitch results for each chromosome back together into a single dataframe
# Knockoff results
SNP_columns = [
x for x in df_geno_ensembl.columns if x.startswith('rs')]
df_knockoffs = pd.DataFrame(
columns=SNP_columns, index=np.arange(num_experiment_people))
# Matched experimental observations + knockoffs in one dataframe
matched_columns = []
data_labels = []
for field in df_geno_experiment.columns:
if field.startswith('rs'):
matched_columns.append(field)
matched_columns.append(field + '_knockoff')
elif field.startswith(args.data_prefix):
data_labels.append(field)
else:
continue
df_matched = pd.DataFrame(columns=matched_columns + data_labels,
index=np.arange(num_experiment_people))
for (X_knockoffs, X_experiment, SNPs_on_chromosome) in knockoff_SNP_list:
for i in xrange(num_experiment_people):
for j, SNP in enumerate(SNPs_on_chromosome):
df_knockoffs[SNP].values[i] = X_knockoffs[i, j]
df_matched[SNP].values[i] = int(X_experiment[i, j])
df_matched[
SNP + '_knockoff'].values[i] = int(X_knockoffs[i, j])
for data_label in data_labels:
df_matched[data_label] = df_geno_experiment[data_label]
# Sanity check that all fields are filled in.
for field in df_knockoffs:
for i in xrange(num_experiment_people):
assert pd.notnull(df_knockoffs[field].values[i])
df_matched.to_csv(os.path.join((args.working_dir), 'knockoffs',
'knockoffs_%03d.csv' % knockoff_trial_count),
index=False)
logger.info("Done making knockoffs!!!")
if __name__ == '__main__':
args = utils.parse_arguments()
utils.initialize_logger(args)
make_all_knockoffs(args)
|
nilq/baby-python
|
python
|
import datetime
import json
import time
from fate_manager.db.db_models import DeployComponent, FateSiteInfo, FateSiteCount, FateSiteJobInfo, ApplySiteInfo
from fate_manager.entity import item
from fate_manager.entity.types import SiteStatusType, FateJobEndStatus
from fate_manager.operation.db_operator import DBOperator
from fate_manager.settings import FATE_FLOW_SETTINGS, request_flow_logger, request_cloud_logger
from fate_manager.utils.request_cloud_utils import request_cloud_manager
from fate_manager.utils.request_fate_flow_utils import post_fate_flow
class CountJob:
@staticmethod
def count_fate_flow_job(account):
request_flow_logger.info("start count fate flow job")
site_list = DBOperator.query_entity(FateSiteInfo, status=SiteStatusType.JOINED)
component_name = 'FATEFLOW'
party_id_flow_url = {}
for site in site_list:
try:
deploy_fate_flow = DBOperator.query_entity(DeployComponent, party_id=site.party_id,
component_name=component_name)
if deploy_fate_flow:
query_job_url = "http://{}{}".format(deploy_fate_flow[0].address, FATE_FLOW_SETTINGS["QueryJob"])
party_id_flow_url[site.party_id] = query_job_url
fate_site_count = DBOperator.query_entity(FateSiteCount, reverse=True, order_by="version")
now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if fate_site_count:
if site.party_id in fate_site_count[0].party_id_list:
party_id_list = fate_site_count[0].party_id_list
time_list = [fate_site_count[0].strftime, now_time]
else:
party_id_list = fate_site_count[0].party_id_list
party_id_list.append(site.party_id)
time_list = [0, now_time]
else:
time_list = [0, now_time]
party_id_list = [site.party_id]
request_flow_logger.info(time_list)
job_list = post_fate_flow(query_job_url, data={"end_time": time_list})
CountJob.log_job_info(account, job_list, party_id=site.party_id, site_name=site.site_name)
request_flow_logger.info(f"start create fate site count: now_time{now_time}")
DBOperator.create_entity(FateSiteCount, {"strftime": now_time, "party_id_list": party_id_list})
except Exception as e:
request_flow_logger.exception(e)
return party_id_flow_url
@staticmethod
def detector_no_end_job(account, party_id_flow_url):
job_list = DBOperator.query_entity(FateSiteJobInfo, is_end=0)
synchronization_job_list = []
for job in job_list:
try:
update_status = FateJobEndStatus.FAILED
if job.party_id in party_id_flow_url:
job_list = post_fate_flow(party_id_flow_url[job.party_id], data={"job_id": job.job_id})
if job_list:
if job_list[0]["f_status"] not in FateJobEndStatus.status_list():
update_status = None
if update_status:
DBOperator.update_entity(FateSiteJobInfo, {"job_id": job.job_id, "status":update_status, "is_end": 1})
job.status = update_status
job = CountJob.job_adapter(job)
if job:
synchronization_job_list.append(job)
except Exception as e:
request_flow_logger.exception(e)
CountJob.job_synchronization(account, synchronization_job_list, m="no_end")
@staticmethod
def detector_no_report_job(account):
job_list = DBOperator.query_entity(FateSiteJobInfo, is_report=0)
synchronization_job_list = []
for job in job_list:
job = CountJob.job_adapter(job)
if job:
synchronization_job_list.append(job)
CountJob.job_synchronization(account, synchronization_job_list, is_report=1, m='no_report')
@staticmethod
def log_job_info(account, job_list, party_id, site_name):
request_flow_logger.info(job_list)
apply_site_list = DBOperator.query_entity(ApplySiteInfo)
all_institutions = {}
for site in apply_site_list:
all_institutions[str(site.party_id)] = site.institutions
synchronization_job_list = []
for job in job_list:
try:
if not CountJob.check_roles(job.get("f_roles")):
continue
site_job = CountJob.save_site_job_item(job, party_id, all_institutions, site_name, account)
site_job = CountJob.job_adapter(site_job)
if site_job:
synchronization_job_list.append(site_job)
except Exception as e:
request_flow_logger.exception(e)
CountJob.job_synchronization(account, synchronization_job_list, m='log_job')
@staticmethod
def check_roles(roles):
return True
@staticmethod
def save_site_job_item(job, party_id, all_institutions, site_name, account):
site_job = FateSiteJobInfo()
site_job.job_id = job.get("f_job_id")
site_job.institutions = account.institutions
site_job.party_id = party_id
site_job.site_name = site_name
site_job.job_create_time = int(time.mktime(time.strptime(job.get("f_job_id")[:20], "%Y%m%d%H%M%S%f"))*1000)
site_job.job_elapsed = job.get("f_elapsed")
site_job.job_start_time = job.get("f_start_time")
site_job.job_end_time = job.get("f_end_time")
site_job.roles = job.get("f_roles")
site_job.job_type = CountJob.get_job_type(job.get("f_dsl"))
site_job.status = FateJobEndStatus.end_status(job.get("f_status"))
site_job.is_end = 1 if site_job.status in FateJobEndStatus.status_list() else 0
site_job.job_create_day = job.get("f_job_id")[:8]
site_job.job_create_day_date = datetime.datetime.strptime(site_job.job_create_day, "%Y%m%d")
site_job.job_info = job
site_job.need_report = 1
other_party_id = set()
site_job.role = job.get("f_role")
institutions_party_id_list = []
if site_job.role == "local":
site_job.other_party_id = [party_id]
institutions_party_id_list = [party_id]
else:
for role, party_id_list in job["f_roles"].items():
for _party_id in party_id_list:
other_party_id.add(_party_id)
if str(_party_id) in all_institutions and all_institutions[str(_party_id)] == all_institutions[str(party_id)]:
institutions_party_id_list.append(_party_id)
if str(_party_id) not in all_institutions:
site_job.need_report = 0
return None
site_job.other_party_id = list(set(other_party_id))
if len(site_job.other_party_id) > 1 and party_id in site_job.other_party_id:
site_job.other_party_id.remove(site_job.party_id)
# set other institutions by other party id
site_job.institutions_party_id = list(set(institutions_party_id_list))
institutions_list = []
for _party_id in site_job.other_party_id:
if str(_party_id) in all_institutions.keys():
institutions_list.append(all_institutions[str(_party_id)])
site_job.other_institutions = list(set(institutions_list))
if len(site_job.other_institutions) > 1 and site_job.institutions in site_job.other_institutions:
site_job.other_institutions.remove(site_job.institutions)
site_job.save(force_insert=True)
return site_job
@staticmethod
def get_job_type(dsl):
job_type = ''
if isinstance(dsl, str):
dsl = json.loads(dsl)
cpn = dsl['components'].keys()
cpn = list(cpn)[0]
if 'upload' in cpn:
job_type = 'upload'
elif 'download' in cpn:
job_type = 'download'
elif 'intersect' in cpn:
for j in dsl['components'].keys():
if 'intersect' not in j:
job_type = 'modeling'
break
else:
job_type = 'intersect'
else:
job_type = 'modeling'
return job_type
@staticmethod
def job_adapter(site_job):
# for cloud job
if not site_job or not site_job.need_report:
return None
site_job.job_info = None
site_job.create_date = None
site_job.update_date = None
site_job.create_time = None
site_job.job_create_day_date = datetime.datetime.strptime(site_job.job_create_day, "%Y%m%d")
site_job.job_create_day_date = int(datetime.datetime.timestamp(site_job.job_create_day_date)) * 1000
site_job.roles = json.dumps(site_job.roles, separators=(',', ':'))
site_job.other_party_id = json.dumps(site_job.other_party_id, separators=(',', ':'))
site_job.other_institutions = json.dumps(site_job.other_institutions, separators=(',', ':'))
site_job = site_job.to_json()
del site_job["need_report"], site_job["is_report"], site_job["is_end"], site_job["institutions_party_id"]
return site_job
@staticmethod
def job_synchronization(account, synchronization_job_list, is_report=0, m='log_job'):
piece = 0
count_of_piece = 500
try:
while len(synchronization_job_list) > piece*count_of_piece:
start = piece*count_of_piece
end = piece*count_of_piece + count_of_piece
institution_signature_item = item.InstitutionSignatureItem(fateManagerId=account.fate_manager_id,
appKey=account.app_key,
appSecret=account.app_secret).to_dict()
resp = request_cloud_manager(uri_key="MonitorPushUri", data=institution_signature_item,
body=synchronization_job_list[start:end],
url=None)
piece += 1
except Exception as e:
request_cloud_logger.exception(e)
if piece*count_of_piece >= len(synchronization_job_list):
if is_report:
for job in synchronization_job_list[:piece*count_of_piece]:
DBOperator.update_entity(FateSiteJobInfo, {"job_id": job.get("job_id"), "is_report": is_report})
else:
if m in ["log_job", "no_end"]:
for job in synchronization_job_list[piece * count_of_piece:]:
DBOperator.update_entity(FateSiteJobInfo, {"job_id": job.get("job_id"), "is_report": is_report})
|
nilq/baby-python
|
python
|
from . import ShapeNet, SetMNIST, SetMultiMNIST, ArCH
def get_datasets(args):
if args.dataset_type == 'shapenet15k':
return ShapeNet.build(args)
if args.dataset_type == 'mnist':
return SetMNIST.build(args)
if args.dataset_type == 'multimnist':
return SetMultiMNIST.build(args)
if args.dataset_type == 'arch':
return ArCH.build(args)
raise NotImplementedError
|
nilq/baby-python
|
python
|
# flake8: noqa: W291
# pylint: disable=too-many-lines,trailing-whitespace
"""
AbstractAnnoworkApiのヘッダ部分
Note:
このファイルはopenapi-generatorで自動生成される。詳細は generate/README.mdを参照
"""
from __future__ import annotations
import abc
import warnings # pylint: disable=unused-import
from typing import Any, Optional, Union # pylint: disable=unused-import
import annoworkapi # pylint: disable=unused-import
class AbstractAnnoworkApi(abc.ABC):
"""
AnnoworkApiクラスの抽象クラス
"""
@abc.abstractmethod
def _request_wrapper(
self,
http_method: str,
url_path: str,
*,
query_params: Optional[dict[str, Any]] = None,
header_params: Optional[dict[str, Any]] = None,
request_body: Optional[Any] = None,
log_response_with_error: bool = True,
) -> Any:
pass
#########################################
# Public Method : AccountApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def confirm_reset_password(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""パスワードリセットstep2(新しいパスワードに変更)
新しいパスワードに変更します。 本人確認のため、[パスワードリセットを要求](#operation/resetPassword)で受信したメールに記載された検証コードを使用します。 パスワードリセットプロセスの最終ステップです。
Args:
request_body (Any): Request Body
confirm_reset_password_request (ConfirmResetPasswordRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/confirm-reset-password"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def confirm_sign_up(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""サインアップstep2(本登録)
アカウントのサインアップの最後のステップとして、アカウントを本登録します。
Args:
request_body (Any): Request Body
confirm_sign_up_request (ConfirmSignUpRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/confirm-sign-up"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_account_external_linkage_info(self, user_id: str, **kwargs) -> Any:
"""アカウント外部連携情報取得
Args:
user_id (str): ユーザーID (required)
Returns:
InlineResponse2001
"""
url_path = f"/accounts/{user_id}/external-linkage-info"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_account_external_linkage_info(self, user_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""アカウント外部連携情報更新
Args:
user_id (str): ユーザーID (required)
request_body (Any): Request Body
put_account_external_linkage_info_request (PutAccountExternalLinkageInfoRequest): (required)
Returns:
InlineResponse2001
"""
url_path = f"/accounts/{user_id}/external-linkage-info"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def reset_password(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""パスワードリセットstep1(開始)
パスワードリセットに必要な確認コードをメールで送付します。 後続の[新しいパスワードに変更](#operation/confirmResetPassword)を実行することで、新しいパスワードに変更できます。
Args:
request_body (Any): Request Body
reset_password_request (ResetPasswordRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/reset-password"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def sign_up(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""サインアップstep1(仮登録)
アカウントのサインアップの最初のステップとして、アカウントを仮登録します。 AnnoWorkに未登録のメールアドレスであれば、新規アカウントが仮登録状態で作成され、本登録フローのためのメールが送信されます。 このメールには仮パスワードなどが記載されています。 指定したメールアドレスを使うユーザーが仮登録であれば、本登録フローのメールが再送信されます。 指定したメールアドレスを使うユーザーが本登録であれば、不正なリクエストとしてエラーを返します(本登録が仮登録に戻ることはありません)。
Args:
request_body (Any): Request Body
sign_up_request (SignUpRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/sign-up"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : ActualWorkingTimeApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_actual_working_time_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, actual_working_time_id: str, **kwargs
) -> Any:
"""実績時間の削除
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
actual_working_time_id (str): 実績稼働時間ID (required)
Returns:
ActualWorkingTime
"""
url_path = (
f"/workspaces/{workspace_id}/members/{workspace_member_id}/actual-working-times/{actual_working_time_id}"
)
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_actual_working_times(
self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""ワークスペース全体の実績時間の一括取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
job_id (str): ジョブID
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
Returns:
[ActualWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/actual-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_actual_working_times_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""ワークスペースメンバーに対する実績時間の一括取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 取得する範囲の開始日時。日付での範囲検索で使用
term_end (str): 取得する範囲の終了日時。日付での範囲検索で使用
Returns:
[ActualWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/actual-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_sum_of_actual_working_times(
self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""ワークスペース全体の実績時間の合計取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
job_id (str): ジョブID
includes_archived_job (bool): アーカイブ化したジョブの合計も含めるかどうか
Returns:
SumOfTimes
"""
url_path = f"/workspaces/{workspace_id}/sum-of-actual-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_actual_working_time_by_workspace_member(
self,
workspace_id: str,
workspace_member_id: str,
actual_working_time_id: str,
request_body: Optional[Any] = None,
**kwargs,
) -> Any:
"""実績時間の更新
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
actual_working_time_id (str): 実績稼働時間ID (required)
request_body (Any): Request Body
put_actual_working_time_request (PutActualWorkingTimeRequest): (required)
Returns:
ActualWorkingTime
"""
url_path = (
f"/workspaces/{workspace_id}/members/{workspace_member_id}/actual-working-times/{actual_working_time_id}"
)
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : ExpectedWorkingTimeApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_expected_working_time_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, date: str, **kwargs
) -> Any:
"""予定稼働時間の日付指定削除
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
date (str): 予定の対象日 (required)
Returns:
ExpectedWorkingTime
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/expected-working-times/{date}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_expected_working_times(
self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""予定稼働時間の一括取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
Returns:
[ExpectedWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/expected-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_expected_working_times_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs
) -> Any:
"""予定稼働時間の一覧取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 取得する範囲の開始日。日付での範囲検索で使用
term_end (str): 取得する範囲の終了日。日付での範囲検索で使用
Returns:
[ExpectedWorkingTime]
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/expected-working-times"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_expected_working_time_by_workspace_member(
self, workspace_id: str, workspace_member_id: str, date: str, request_body: Optional[Any] = None, **kwargs
) -> Any:
"""予定稼働時間の日付指定更新
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
date (str): 予定の対象日 (required)
request_body (Any): Request Body
put_expected_working_time_request (PutExpectedWorkingTimeRequest): (required)
Returns:
ExpectedWorkingTime
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/expected-working-times/{date}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : JobApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_job(self, workspace_id: str, job_id: str, **kwargs) -> Any:
"""ジョブの削除
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
Returns:
Job
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_job(self, workspace_id: str, job_id: str, **kwargs) -> Any:
"""ジョブの取得
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
Returns:
Job
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_job_children(self, workspace_id: str, job_id: str, **kwargs) -> Any:
"""子ジョブの一覧取得
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
Returns:
JobChildren
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}/children"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_jobs(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""ジョブの一覧取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
sort (str): sort key(複数項目を利用したソートの場合は,(カンマ)区切りで指定してください。key(id or name)、降順にしたい場合は先頭に-(ハイフン)を付ける)
Returns:
[Job]
"""
url_path = f"/workspaces/{workspace_id}/jobs"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_job(self, workspace_id: str, job_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""ジョブの更新
Args:
workspace_id (str): ワークスペースID (required)
job_id (str): ジョブID (required)
request_body (Any): Request Body
put_job_request (PutJobRequest): (required)
Returns:
Job
"""
url_path = f"/workspaces/{workspace_id}/jobs/{job_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : LoginApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def post_login(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""ログイン
Args:
request_body (Any): Request Body
login_request (LoginRequest): (required)
Returns:
LoginToken
"""
url_path = f"/login"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : MyApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def change_password(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""パスワード変更
パスワード変更
Args:
request_body (Any): Request Body
change_password_request (ChangePasswordRequest): (required)
Returns:
InlineResponse200
"""
url_path = f"/my/account/password"
http_method = "POST"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_account(self, **kwargs) -> Any:
"""ログイン中のアカウント情報を取得する
Args:
Returns:
Account
"""
url_path = f"/my/account"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_schedules(self, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""自身がアサインされているスケジュール一覧を取得する
Args:
query_params (dict[str, Any]): Query Parameters
workspace_id (str): ワークスペースIDを指定することで対象のワークスペースでアサインされているスケジュールのみを取得できる
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
Returns:
[Schedule]
"""
url_path = f"/my/schedules"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_workspace_members(self, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""自身のワークスペースメンバー情報一覧を取得する
Args:
query_params (dict[str, Any]): Query Parameters
workspace_id (str): ワークスペースIDを指定することで対象のワークスペースに所属しているワークスペースメンバー情報のみを取得できる
Returns:
[WorkspaceMember]
"""
url_path = f"/my/workspace-members"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_my_workspaces(self, **kwargs) -> Any:
"""自身の所属するワークスペース情報一覧を取得する
Args:
Returns:
[Workspace]
"""
url_path = f"/my/workspaces"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_my_account(self, request_body: Optional[Any] = None, **kwargs) -> Any:
"""アカウント情報更新
Args:
request_body (Any): Request Body
put_my_account_request (PutMyAccountRequest): (required)
Returns:
Account
"""
url_path = f"/my/account"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : ScheduleApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_schedule(self, workspace_id: str, schedule_id: str, **kwargs) -> Any:
"""作業計画の削除
Args:
workspace_id (str): ワークスペースID (required)
schedule_id (str): スケジュールID (required)
Returns:
Schedule
"""
url_path = f"/workspaces/{workspace_id}/schedules/{schedule_id}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_schedule(self, workspace_id: str, schedule_id: str, **kwargs) -> Any:
"""作業計画の取得
Args:
workspace_id (str): ワークスペースID (required)
schedule_id (str): スケジュールID (required)
Returns:
Schedule
"""
url_path = f"/workspaces/{workspace_id}/schedules/{schedule_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_schedules(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""作業計画の一覧取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
term_start (str): 日付での範囲検索で使用
term_end (str): 日付での範囲検索で使用
job_id (str): ジョブID
Returns:
[Schedule]
"""
url_path = f"/workspaces/{workspace_id}/schedules"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_sum_of_schedules(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""ワークスペース全体のスケジュール時間の合計取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
job_id (str): ジョブID
includes_archived_job (bool): アーカイブ化したジョブの合計も含めるかどうか
Returns:
SumOfTimes
"""
url_path = f"/workspaces/{workspace_id}/sum-of-schedules"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_schedule(self, workspace_id: str, schedule_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""作業計画の更新
Args:
workspace_id (str): ワークスペースID (required)
schedule_id (str): スケジュールID (required)
request_body (Any): Request Body
put_schedule_request (PutScheduleRequest): (required)
Returns:
Schedule
"""
url_path = f"/workspaces/{workspace_id}/schedules/{schedule_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : WorkspaceApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_workspace(self, workspace_id: str, **kwargs) -> Any:
"""ワークスペースの取得
Args:
workspace_id (str): ワークスペースID (required)
Returns:
Workspace
"""
url_path = f"/workspaces/{workspace_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_tag(self, workspace_id: str, workspace_tag_id: str, **kwargs) -> Any:
"""ワークスペースタグの取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_tag_id (str): ワークスペースタグID (required)
Returns:
WorkspaceTag
"""
url_path = f"/workspaces/{workspace_id}/tags/{workspace_tag_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_tag_members(self, workspace_id: str, workspace_tag_id: str, **kwargs) -> Any:
"""ワークスペースタグに紐付いているワークスペースメンバーの一覧取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_tag_id (str): ワークスペースタグID (required)
Returns:
WorkspaceTagMembers
"""
url_path = f"/workspaces/{workspace_id}/tags/{workspace_tag_id}/members"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_tags(self, workspace_id: str, **kwargs) -> Any:
"""ワークスペースタグ一覧の取得
Args:
workspace_id (str): ワークスペースID (required)
Returns:
[WorkspaceTag]
"""
url_path = f"/workspaces/{workspace_id}/tags"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_workspace(self, workspace_id: str, request_body: Optional[Any] = None, **kwargs) -> Any:
"""ワークスペースの更新
Args:
workspace_id (str): ワークスペースID (required)
request_body (Any): Request Body
put_workspace_request (PutWorkspaceRequest): (required)
Returns:
Workspace
"""
url_path = f"/workspaces/{workspace_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_workspace_tag(
self, workspace_id: str, workspace_tag_id: str, request_body: Optional[Any] = None, **kwargs
) -> Any:
"""ワークスペースタグの更新
Args:
workspace_id (str): ワークスペースID (required)
workspace_tag_id (str): ワークスペースタグID (required)
request_body (Any): Request Body
put_workspace_tag_request (PutWorkspaceTagRequest): (required)
Returns:
WorkspaceTag
"""
url_path = f"/workspaces/{workspace_id}/tags/{workspace_tag_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : WorkspaceMemberApi
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def delete_workspace_member(self, workspace_id: str, workspace_member_id: str, **kwargs) -> Any:
"""ワークスペースメンバーの削除
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
Returns:
WorkspaceMember
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}"
http_method = "DELETE"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_member(self, workspace_id: str, workspace_member_id: str, **kwargs) -> Any:
"""ワークスペースメンバーの取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
Returns:
WorkspaceMember
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_member_tags(self, workspace_id: str, workspace_member_id: str, **kwargs) -> Any:
"""ワークスペースメンバーのタグ一覧取得
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
Returns:
WorkspaceMemberTags
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}/tags"
http_method = "GET"
keyword_params: dict[str, Any] = {}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_workspace_members(self, workspace_id: str, query_params: Optional[dict[str, Any]] = None, **kwargs) -> Any:
"""ワークスペースメンバー一覧の取得
Args:
workspace_id (str): ワークスペースID (required)
query_params (dict[str, Any]): Query Parameters
sort (str): sort key(降順にしたい場合は先頭に-(ハイフン)を付ける)
includes_inactive_members (bool): 無効化したワークスペースメンバーも含めるかどうか
Returns:
[WorkspaceMember]
"""
url_path = f"/workspaces/{workspace_id}/members"
http_method = "GET"
keyword_params: dict[str, Any] = {
"query_params": query_params,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
def put_workspace_member(
self, workspace_id: str, workspace_member_id: str, request_body: Optional[Any] = None, **kwargs
) -> Any:
"""ワークスペースメンバーの変更
Args:
workspace_id (str): ワークスペースID (required)
workspace_member_id (str): ワークスペースメンバーID (required)
request_body (Any): Request Body
put_workspace_member_request (PutWorkspaceMemberRequest): (required)
Returns:
WorkspaceMember
"""
url_path = f"/workspaces/{workspace_id}/members/{workspace_member_id}"
http_method = "PUT"
keyword_params: dict[str, Any] = {
"request_body": request_body,
}
keyword_params.update(**kwargs)
return self._request_wrapper(http_method, url_path, **keyword_params)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, os
setup(
name='PyBabel-json-md',
version='0.1.0',
description='PyBabel json metadef (md) gettext strings extractor',
author='Wayne Okuma',
author_email='wayne.okuma@hpe.com',
packages=['pybabel_json_md'],
url="https://github.com/wkoathp/pybabel-json-md",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'babel',
],
include_package_data=True,
entry_points = """
[babel.extractors]
json_md = pybabel_json_md.extractor:extract_json_md
""",
)
|
nilq/baby-python
|
python
|
# Tai Sakuma <tai.sakuma@gmail.com>
import pytest
has_no_ROOT = False
try:
import ROOT
except ImportError:
has_no_ROOT = True
from alphatwirl.roottree import Events
if not has_no_ROOT:
from alphatwirl.roottree import BEvents as BEvents
##__________________________________________________________________||
events_classes = [Events]
if not has_no_ROOT:
events_classes.append(BEvents)
events_classes_ids = [c.__name__ for c in events_classes]
##__________________________________________________________________||
class MockFile(object):
pass
class MockTree(object):
def __init__(self, entries=100):
self.entries = entries
self.ievent = -1
self.branchstatus = [ ]
self.branch1 = 1111
self.directory = MockFile()
def GetDirectory(self):
return self.directory
def GetEntries(self):
return self.entries
def GetEntry(self, ientry):
if ientry < self.entries:
nbytes = 10
self.ievent = ientry
else:
nbytes = 0
self.ievent = -1
return nbytes
def SetBranchStatus(self, bname, status):
self.branchstatus.append((bname, status))
def test_mocktree():
tree = MockTree(entries=3)
assert isinstance(tree.GetDirectory(), MockFile)
assert 3 == tree.GetEntries()
assert -1 == tree.ievent
nbytes = 10
assert nbytes == tree.GetEntry(0)
assert 0 == tree.ievent
assert nbytes == tree.GetEntry(1)
assert 1 == tree.ievent
assert nbytes == tree.GetEntry(2)
assert 2 == tree.ievent
assert 0 == tree.GetEntry(3)
assert -1 == tree.ievent
##__________________________________________________________________||
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_init(Events):
tree = MockTree()
events = Events(tree)
events = Events(tree, 100)
assert tree is events.tree
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_repr(Events):
tree = MockTree()
events = Events(tree)
repr(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_nEvents_default(Events):
tree = MockTree(entries=100)
events = Events(tree)
assert 100 == events.nEvents # default the same as entries
assert 100 == len(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
@pytest.mark.parametrize('maxEvents, expected ', [
pytest.param(-1, 100, id='default'),
pytest.param(50, 50, id='less'),
pytest.param(120, 100, id='more'),
pytest.param(100, 100, id='exact'),
])
def test_nEvents(Events, maxEvents, expected):
tree = MockTree(entries=100)
events = Events(tree, maxEvents)
assert expected == events.nEvents
assert expected == len(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
@pytest.mark.parametrize('maxEvents, start, expected ', [
pytest.param(-1, 1, 99, id='all_events_start_2nd'),
pytest.param(10, 1, 10, id='nEvents_equal_maxEvents'),
pytest.param(-1, 99, 1, id='all_events_start_last'),
pytest.param(20, 99, 1, id='nEvents_less_than_maxEvents'),
pytest.param(-1, 100, 0, id='nEvents_zero_1'),
pytest.param(-1, 110, 0, id='nEvents_zero_2'),
pytest.param(10, 100, 0, id='nEvents_zero_3'),
])
def test_nEvents_start(Events, maxEvents, start, expected):
tree = MockTree(entries=100)
events = Events(tree, maxEvents=maxEvents, start=start)
assert expected == events.nEvents
assert expected == len(events)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_nEvents_start_raise(Events):
tree = MockTree(entries=100)
with pytest.raises(ValueError):
Events(tree, maxEvents=-1, start=-10)
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_iEvent(Events):
tree = MockTree(entries=4)
events = Events(tree)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
assert 0 == tree.ievent
event = next(it)
assert 1 == event.iEvent
assert 1 == tree.ievent
event = next(it)
assert 2 == event.iEvent
assert 2 == tree.ievent
event = next(it)
assert 3 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(StopIteration):
next(it)
assert -1 == event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_maxEvents(Events):
tree = MockTree(entries=40)
events = Events(tree, maxEvents=4)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
event = next(it)
assert 1 == event.iEvent
event = next(it)
assert 2 == event.iEvent
event = next(it)
assert 3 == event.iEvent
with pytest.raises(StopIteration):
next(it)
assert -1 == event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_iEvent_start(Events):
tree = MockTree(entries=4)
events = Events(tree, start=2)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
assert 2 == tree.ievent
event = next(it)
assert 1 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(StopIteration):
next(it)
assert -1 ==event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_iter_maxEvents_start(Events):
tree = MockTree(entries=40)
events = Events(tree, maxEvents=4, start=2)
assert -1 == events.iEvent
it = iter(events)
event = next(it)
assert 0 == event.iEvent
assert 2 == tree.ievent
event = next(it)
assert 1 == event.iEvent
assert 3 == tree.ievent
event = next(it)
assert 2 == event.iEvent
assert 4 == tree.ievent
event = next(it)
assert 3 == event.iEvent
assert 5 == tree.ievent
with pytest.raises(StopIteration):
next(it)
assert -1 == event.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_getitem(Events):
tree = MockTree(entries=4)
events = Events(tree)
assert -1 == events.iEvent
event = events[0]
assert 0 == event.iEvent
assert 0 == tree.ievent
event = events[1]
assert 1 == event.iEvent
assert 1 == tree.ievent
event = events[2]
assert 2 == event.iEvent
assert 2 == tree.ievent
event = events[3]
assert 3 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(IndexError):
events[4]
assert -1 == events.iEvent
@pytest.mark.parametrize('Events', events_classes, ids=events_classes_ids)
def test_getitem_start(Events):
tree = MockTree(entries=4)
events = Events(tree, start=2)
assert -1 == events.iEvent
event = events[0]
assert 0 == event.iEvent
assert 2 == tree.ievent
event = events[1]
assert 1 == event.iEvent
assert 3 == tree.ievent
with pytest.raises(IndexError):
events[4]
assert -1 == events.iEvent
##__________________________________________________________________||
|
nilq/baby-python
|
python
|
import qimpy as qp
import numpy as np
from scipy.special import sph_harm
from typing import Sequence, Any, List, Tuple
def get_harmonics_ref(l_max: int, r: np.ndarray) -> np.ndarray:
"""Reference real solid harmonics based on SciPy spherical harmonics."""
rMag = np.linalg.norm(r, axis=-1)
theta = np.arccos(r[..., 2] / rMag)
phi = np.arctan2(r[..., 1], r[..., 0])
phi += np.where(phi < 0.0, 2 * np.pi, 0)
results = []
for l in range(l_max + 1):
result = np.zeros((2 * l + 1,) + r.shape[:-1])
for m in range(0, l + 1):
ylm = ((-1) ** m) * (rMag ** l) * sph_harm(m, l, phi, theta)
if m == 0:
result[l] = ylm.real
else:
result[l + m] = np.sqrt(2) * ylm.real
result[l - m] = np.sqrt(2) * ylm.imag
results.append(result)
return np.concatenate(results, axis=0)
def get_lm(l_max: int) -> List[Tuple[int, int]]:
"""Get list of all (l,m) in order up to (and including) l_max"""
return [(l, m) for l in range(l_max + 1) for m in range(-l, l + 1)]
def format_array(array: Sequence[Any], fmt: str) -> str:
"""Convert `array` to string with format `fmt` for each entry."""
return "[" + ", ".join(fmt.format(elem) for elem in array) + "]"
def generate_harmonic_coefficients(l_max_hlf: int) -> None:
"""Generate tables of recursion coefficients for computing real
solid harmonics up to l_max = 2 * l_max_hlf, as well as tables of
product coefficients (Clebsch-Gordon coefficients) for real solid
harmonics up to order l_max_hlf. Print results formatted as Python
code that can be pasted into _spherical_harmonics_data.py."""
l_max = 2 * l_max_hlf
qp.log.info(
"from typing import List, Tuple, Dict\n\n"
f"L_MAX: int = {l_max} # Maximum l for harmonics\n"
f"L_MAX_HLF: int = {l_max_hlf} # Maximum l for products"
)
# Calculate solid harmonics on a mesh covering unit cube:
grids1d = 3 * (np.linspace(-1.0, 1.0, 2 * l_max),) # avoids zero
r = np.array(np.meshgrid(*grids1d)).reshape(3, -1).T
r_sq = (r ** 2).sum(axis=-1)
ylm = get_harmonics_ref(l_max, r)
# Calculate recursion coefficients:
ERR_TOL = 1e-14
COEFF_TOL = 1e-8
qp.log.info(
"CooIndices = Tuple[List[int], List[int], List[float]]\n\n"
"# Recursion coefficients for computing real harmonics at l>1\n"
"# from products of those at l = 1 and l-1. The integers index\n"
"# a sparse matrix with (2l+1) rows and 3*(2l-1) columns.\n"
"YLM_RECUR: List[CooIndices] = ["
)
Y_00 = np.sqrt(0.25 / np.pi)
Y_1m_prefac = np.sqrt(0.75 / np.pi)
qp.log.info(f" ([], [], [{Y_00:.16f}]), ([], [], [{Y_1m_prefac:.16f}]),")
for l in range(2, l_max + 1):
l_minus_1_slice = slice((l - 1) ** 2, l ** 2)
y_product = ylm[l_minus_1_slice, None, :] * ylm[None, 1:4, :]
y_product = y_product.reshape((2 * l - 1) * 3, -1)
index_row = []
index_col = []
values = []
for m in range(-l, l + 1):
# List pairs of m at l = 1 and l-1 that can add up to m:
m_pairs_all = set(
[
(sign * m + dsign * dm, dm)
for sign in (-1, 1)
for dsign in (-1, 1)
for dm in (-1, 0, 1)
]
)
m_pairs = [m_pair for m_pair in m_pairs_all if abs(m_pair[0]) < l]
m_pair_indices = [3 * (l - 1 + m) + (1 + dm) for m, dm in m_pairs]
# Solve for coefficients of the linear combination:
for n_sel in range(1, len(m_pair_indices) + 1):
# Try increasing numbers till we get one:
y_product_allowed = y_product[m_pair_indices[:n_sel]]
y_target = ylm[l * (l + 1) + m]
coeff = np.linalg.lstsq(y_product_allowed.T, y_target, rcond=None)[0]
residual = np.dot(coeff, y_product_allowed) - y_target
err = np.linalg.norm(residual) / np.linalg.norm(y_target)
if err < ERR_TOL:
break
assert err < ERR_TOL
# Select non-zero coefficients to form product expansion:
sel = np.where(np.abs(coeff) > COEFF_TOL * np.linalg.norm(coeff))[0]
indices = np.array(m_pair_indices)[sel]
coeff = coeff[sel]
# Sort by index and add to lists for current l:
sort_index = indices.argsort()
index_row += [l + m] * len(sort_index)
index_col += list(indices[sort_index])
values += list(coeff[sort_index])
# Format as python code:
qp.log.info(
f" ("
f"{format_array(index_row, '{:d}')}, "
f"{format_array(index_col, '{:d}')}, "
f"{format_array(values, '{:.16f}')}),"
)
qp.log.info("]\n")
# Calculate Clebsch-Gordon coefficients:
lm_hlf = get_lm(l_max_hlf)
qp.log.info(
"# Clebsch-Gordon coefficients for products of real harmonics.\n"
"# The integer indices correspond to l*(l+1)+m for each (l,m).\n"
"YLM_PROD: Dict[Tuple[int, int],"
" Tuple[List[int], List[float]]] = {"
)
for ilm1, (l1, m1) in enumerate(lm_hlf):
for ilm2, (l2, m2) in enumerate(lm_hlf[: ilm1 + 1]):
# List (l,m) pairs allowed by angular momentum addition rules:
m_allowed = {m1 + m2, m1 - m2, m2 - m1, -(m1 + m2)}
l_allowed = range(l1 - l2, l1 + l2 + 1, 2)
lm_all = np.array(
[(l, m) for l in l_allowed for m in m_allowed if (abs(m) <= l)]
)
l_all = lm_all[:, 0]
m_all = lm_all[:, 1]
ilm = l_all * (l_all + 1) + m_all # flattened index
# Solve for coefficients of the linear combination:
y_product = ylm[ilm1] * ylm[ilm2]
y_terms = ylm[ilm] * (r_sq[None, :] ** ((l1 + l2 - l_all) // 2)[:, None])
results = np.linalg.lstsq(y_terms.T, y_product, rcond=None)
coeff = results[0]
err = np.sqrt(results[1][0]) / np.linalg.norm(y_product)
assert err < ERR_TOL
# Select non-zero coefficients to form product expansion:
sel = np.where(np.abs(coeff) > COEFF_TOL * np.linalg.norm(coeff))[0]
ilm = ilm[sel]
coeff = coeff[sel]
# Sort by (l,m):
sort_index = ilm.argsort()
ilm = ilm[sort_index]
coeff = coeff[sort_index]
# Format as python code:
qp.log.info(
f" ({ilm1}, {ilm2}): ("
f"{format_array(ilm, '{:d}')}, "
f"{format_array(coeff, '{:.16f}')}),"
)
qp.log.info("}")
def main():
qp.rc.init()
assert qp.rc.n_procs == 1 # no MPI
qp.utils.log_config() # after rc to suppress header messages
generate_harmonic_coefficients(l_max_hlf=3)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software tool from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import asyncio
from typing import Set
from typing import List
from dateutil.parser import parse
from pytz import UTC
from packageurl import PackageURL
from univers.version_specifier import VersionSpecifier
from univers.versions import SemverVersion
from vulnerabilities.data_source import Advisory
from vulnerabilities.data_source import GitDataSource
from vulnerabilities.data_source import Reference
from vulnerabilities.package_managers import RubyVersionAPI
from vulnerabilities.helpers import load_yaml
from vulnerabilities.helpers import nearest_patched_package
class RubyDataSource(GitDataSource):
def __enter__(self):
super(RubyDataSource, self).__enter__()
if not getattr(self, "_added_files", None):
self._added_files, self._updated_files = self.file_changes(
recursive=True, file_ext="yml", subdir="./gems"
)
self.pkg_manager_api = RubyVersionAPI()
self.set_api(self.collect_packages())
def set_api(self, packages):
asyncio.run(self.pkg_manager_api.load_api(packages))
def updated_advisories(self) -> Set[Advisory]:
files = self._updated_files
advisories = []
for f in files:
processed_data = self.process_file(f)
if processed_data:
advisories.append(processed_data)
return self.batch_advisories(advisories)
def added_advisories(self) -> Set[Advisory]:
files = self._added_files
advisories = []
for f in files:
processed_data = self.process_file(f)
if processed_data:
advisories.append(processed_data)
return self.batch_advisories(advisories)
def collect_packages(self):
packages = set()
files = self._updated_files.union(self._added_files)
for f in files:
data = load_yaml(f)
if data.get("gem"):
packages.add(data["gem"])
return packages
def process_file(self, path) -> List[Advisory]:
record = load_yaml(path)
package_name = record.get("gem")
if not package_name:
return
if "cve" in record:
cve_id = "CVE-{}".format(record["cve"])
else:
return
publish_time = parse(record["date"]).replace(tzinfo=UTC)
safe_version_ranges = record.get("patched_versions", [])
# this case happens when the advisory contain only 'patched_versions' field
# and it has value None(i.e it is empty :( ).
if not safe_version_ranges:
safe_version_ranges = []
safe_version_ranges += record.get("unaffected_versions", [])
safe_version_ranges = [i for i in safe_version_ranges if i]
if not getattr(self, "pkg_manager_api", None):
self.pkg_manager_api = RubyVersionAPI()
all_vers = self.pkg_manager_api.get(package_name, until=publish_time).valid_versions
safe_versions, affected_versions = self.categorize_versions(all_vers, safe_version_ranges)
impacted_purls = [
PackageURL(
name=package_name,
type="gem",
version=version,
)
for version in affected_versions
]
resolved_purls = [
PackageURL(
name=package_name,
type="gem",
version=version,
)
for version in safe_versions
]
references = []
if record.get("url"):
references.append(Reference(url=record.get("url")))
return Advisory(
summary=record.get("description", ""),
affected_packages=nearest_patched_package(impacted_purls, resolved_purls),
references=references,
vulnerability_id=cve_id,
)
@staticmethod
def categorize_versions(all_versions, unaffected_version_ranges):
for id, elem in enumerate(unaffected_version_ranges):
unaffected_version_ranges[id] = VersionSpecifier.from_scheme_version_spec_string(
"semver", elem
)
safe_versions = []
vulnerable_versions = []
for i in all_versions:
vobj = SemverVersion(i)
is_vulnerable = False
for ver_rng in unaffected_version_ranges:
if vobj in ver_rng:
safe_versions.append(i)
is_vulnerable = True
break
if not is_vulnerable:
vulnerable_versions.append(i)
return safe_versions, vulnerable_versions
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/app_ui.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
SETUP_DIR="/usr/share/gnome-extensions-loader"
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(250, 300)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/gnome-extensions-loader.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setContentsMargins(3, 3, 3, 3)
self.horizontalLayout.setObjectName("horizontalLayout")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setObjectName("listWidget")
self.horizontalLayout.addWidget(self.listWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 250, 22))
self.menubar.setObjectName("menubar")
self.menuLayouts = QtWidgets.QMenu(self.menubar)
self.menuLayouts.setObjectName("menuLayouts")
self.menu_Help = QtWidgets.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menu_File = QtWidgets.QMenu(self.menubar)
self.menu_File.setObjectName("menu_File")
MainWindow.setMenuBar(self.menubar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.action_Add = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/add.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Add.setIcon(icon1)
self.action_Add.setObjectName("action_Add")
self.action_Remove = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/remove.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Remove.setIcon(icon2)
self.action_Remove.setObjectName("action_Remove")
self.action_Overwrite = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/edit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Overwrite.setIcon(icon3)
self.action_Overwrite.setObjectName("action_Overwrite")
self.action_About = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/about.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_About.setIcon(icon4)
self.action_About.setObjectName("action_About")
self.action_Exit = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/exit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Exit.setIcon(icon5)
self.action_Exit.setObjectName("action_Exit")
self.action_Apply = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(f"{SETUP_DIR}/ui/icons/apply.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_Apply.setIcon(icon6)
self.action_Apply.setObjectName("action_Apply")
self.menuLayouts.addAction(self.action_Add)
self.menuLayouts.addAction(self.action_Remove)
self.menuLayouts.addAction(self.action_Overwrite)
self.menu_Help.addAction(self.action_About)
self.menu_File.addAction(self.action_Apply)
self.menu_File.addAction(self.action_Exit)
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuLayouts.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar.addAction(self.action_Apply)
self.toolBar.addSeparator()
self.toolBar.addAction(self.action_Add)
self.toolBar.addAction(self.action_Remove)
self.toolBar.addAction(self.action_Overwrite)
self.toolBar.addSeparator()
self.toolBar.addAction(self.action_About)
self.toolBar.addSeparator()
self.toolBar.addAction(self.action_Exit)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Gnome Extensions Loader"))
self.menuLayouts.setTitle(_translate("MainWindow", "&Layouts"))
self.menu_Help.setTitle(_translate("MainWindow", "&Help"))
self.menu_File.setTitle(_translate("MainWindow", "&File"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.action_Add.setText(_translate("MainWindow", "&Add"))
self.action_Add.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.action_Remove.setText(_translate("MainWindow", "&Remove"))
self.action_Remove.setShortcut(_translate("MainWindow", "Ctrl+R"))
self.action_Overwrite.setText(_translate("MainWindow", "&Overwrite"))
self.action_Overwrite.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.action_About.setText(_translate("MainWindow", "&About"))
self.action_About.setShortcut(_translate("MainWindow", "Ctrl+I"))
self.action_Exit.setText(_translate("MainWindow", "&Exit"))
self.action_Exit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.action_Apply.setText(_translate("MainWindow", "&Apply"))
self.action_Apply.setShortcut(_translate("MainWindow", "Ctrl+A"))
|
nilq/baby-python
|
python
|
from cloudshell.shell.core.resource_driver_interface import ResourceDriverInterface
from cloudshell.shell.core.context import InitCommandContext, ResourceCommandContext
import cloudshell.api.cloudshell_api as api
from natsort import natsorted, ns
import ipcalc
import json
class IpcalcDriver (ResourceDriverInterface):
# Calc sizes for common subnets
NetSizes = {}
NetSizes["24"] = 254 + 2
NetSizes["25"] = 126 + 2
NetSizes["26"] = 62 + 2
NetSizes["27"] = 30 + 2
NetSizes["28"] = 14 + 2
NetSizes["29"] = 6 + 2
NetSizes["30"] = 2 + 2
NetSizes["31"] = 2
NetSizes["32"] = 1
def cleanup(self):
"""
Destroy the driver session, this function is called everytime a driver instance is destroyed
This is a good place to close any open sessions, finish writing to log files
"""
pass
def __init__(self):
"""
ctor must be without arguments, it is created with reflection at run time
"""
pass
def initialize(self, context):
"""
Initialize the driver session, this function is called everytime a new instance of the driver is created
This is a good place to load and cache the driver configuration, initiate sessions etc.
:param InitCommandContext context: the context the command runs on
"""
pass
def printIPsInContainer(self, context, containerName):
ApiSession = api.CloudShellAPISession(host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain="Global")
try:
containerResource = ApiSession.GetResourceDetails(containerName)
except:
raise ValueError("Specified container does not exist.")
rl = ApiSession.FindResources(resourceFamily="Address",resourceModel="IP Address", includeSubResources=True)
cleanList = []
for address in rl.Resources:
if (containerName in address.FullName):
cleanList.append(address.Name)
cleanList = natsorted(cleanList, alg=ns.IGNORECASE)
return json.dumps(cleanList)
def getNextIP(self, context, containerName, CIDR):
ApiSession = api.CloudShellAPISession(host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain="Global")
# validate that the container to pull from exists in RM in this domain
try:
containerResource = ApiSession.GetResourceDetails(containerName)
except:
raise ValueError("Specified container does not exist.")
rl = ApiSession.FindResources(resourceFamily="Address",resourceModel="IP Address", includeSubResources=True)
cleanList = []
for address in rl.Resources:
if (containerName in address.FullName):
if ((address.ReservedStatus == "Not In Reservations") and (address.Excluded == False)):
cleanList.append(address.Name)
cleanList = natsorted(cleanList, alg=ns.IGNORECASE)
# we now have a sorted list of IPs which are available
# that are in the given container (cleanList). It is
# sorted to be in numeric order. We also have the
# original list of resource objects still (rl)
containerCidr = str(containerResource.ResourceAttributes[0].Value)
# Confirm that the requested size is possible given the allocated range we are managing
if(int(CIDR)<int(containerCidr)):
raise ValueError("Requested network size is greater than allocated container has to offer.")
try:
numAddressesNeeded = self.NetSizes[CIDR]
except:
raise ValueError("The subnet size requested cannot be converted into available IP space.")
# confirm that we still have enough addresses to handle the requested subnet size
if(numAddressesNeeded > len(cleanList)):
raise ValueError("The requested number of IPs needed for this sandbox do not exist in this allocation range of " + containerName)
# I guess we are ok now so handle this request
i = 0
returnedAddresses = []
try:
while (i < numAddressesNeeded):
newIP = containerName + "/" + cleanList[i]
returnedAddresses.append(newIP)
i = i + 1
ApiSession.AddResourcesToReservation(reservationId=context.reservation.reservation_id,resourcesFullPath=returnedAddresses)
except:
raise ValueError("Something went wrong allocating the IPs.")
return json.dumps(returnedAddresses)
|
nilq/baby-python
|
python
|
"""
Routine to create the light cones shells
L1 L2 L3 u11 u12 u13 u21 u22 u23 u31 u32 u33 (periodicity)
C2 '2.2361', '1.0954', '0.4082', '2', '1', '0', '1', '0', '1', '1', '0', '0', '(1)'
C15 '1.4142', '1.0000', '0.7071', '1', '1', '0', '0', '0', '1', '1', '0', '0', '(12)'
C6 '5.9161', '0.4140', '0.4082', '5', '3', '1', '1', '1', '0', '0', '1', '0', '(1)'
C3 '2.4495', '0.7071', '0.5774', '2', '1', '1', '1', '1', '0', '0', '1', '0', '(1)'
python3 create_light_cone_shells.py 10 MD10 1000
python3 create_light_cone_shells.py 10 MD10 1000
import numpy as n
import os
for ii in n.arange(50,115,1)[::-1]:
comm="python3 create_light_cone_shells.py "+str(ii)+" MD10 1000"
print(comm)
os.system(comm)
"""
import sys
ii = int(sys.argv[1])
env = sys.argv[2] # 'MD10'
L_box = float(sys.argv[3]) / 0.6777
positions_group_name = sys.argv[4] # 'remaped_position_L3'
if positions_group_name == 'remaped_position_L3' :
positions_group = 'remaped_position_L3'
x_obs, y_obs, z_obs = 0., 0.7071/2.*L_box, 0.5774/2.*L_box
if positions_group_name == 'remaped_position_L3_z1' :
positions_group = 'remaped_position_L3'
x_obs, y_obs, z_obs = -2.4495*L_box, 0.7071/2.*L_box, 0.5774/2.*L_box
if positions_group_name == 'remaped_position_L2' :
positions_group = 'remaped_position_L2'
x_obs, y_obs, z_obs = 2.2361/2.*L_box, -1.5400*L_box, 0.4082/2.*L_box
if positions_group_name == 'remaped_position_L6' :
positions_group = 'remaped_position_L6'
x_obs, y_obs, z_obs = 0., 0.4140/2.*L_box, 0.4082/2.*L_box
if positions_group_name == 'remaped_position_L15' :
positions_group = 'remaped_position_L15'
#1.4142', '1.0000', '0.7071
x_obs, y_obs, z_obs = 0., 1.0000/2.*L_box, 0.7071/2.*L_box
import h5py # HDF5 support
import os
import glob
import numpy as n
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
h5_lc_dir = os.path.join(os.environ[env], 'h5_lc', 'shells_'+positions_group_name )
if os.path.isdir(h5_lc_dir)==False:
os.mkdir(h5_lc_dir)
h5_dir = os.path.join(os.environ[env], 'h5' )
input_list_i = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????_emerge.hdf5")))
input_list_i.sort()
# removing snapshots that cannot be remapped ...
input_list = n.delete(input_list_i,n.array([
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.08000_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.08180_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.08360_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.13320_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.13620_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.15210_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.16620_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.17380_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.17770_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.18570_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.18990_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.19410_emerge.hdf5")), # Ms, LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.20750_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.21210_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.22170_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.22670_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.23690_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.24230_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.25320_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.25890_emerge.hdf5")), # LSAR issue 51
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.26470_emerge.hdf5")), # LSAR issue 52
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.27060_emerge.hdf5")), # LSAR + remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.28920_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.29570_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.30910_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.34530_emerge.hdf5")), # LSAR issue
#n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.27060_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.43090_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.71730_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.93570_emerge.hdf5")) # remap issue
]) )
# creates the redshift list
redshifts = []
for file_1 in input_list :
f1 = h5py.File(file_1, "r")
redshifts.append(f1.attrs['redshift'])
f1.close()
redshifts = n.array(redshifts)
# creates the shell list
Dcom = cosmoMD.comoving_distance(redshifts).value
Dmax = n.hstack((Dcom[0],(Dcom[1:]+Dcom[:-1])/2.))
Dmin = n.hstack(((Dcom[1:]+Dcom[:-1])/2., Dcom[-1]))
def copylc_data(ii, option=False):
"""
Creates the selection array to obtain the shell in a snapshot to be added in the light cone
Writes a lightcone shell for each snapshot
"""
file_1 = input_list[ii]
file_out = os.path.join(h5_lc_dir, 'shell_'+os.path.basename( input_list[ii] ) )
print(file_1, "==>>", file_out)
f1 = h5py.File(file_1, "r")
print( "n halos=",f1['/halo_properties/'].attrs['N_halos'])
x,y,z=f1[positions_group + '/xyx_Lbox'].value.T*L_box
distance = ((x-x_obs)**2.+(y-y_obs)**2.+(z-z_obs)**2.)**0.5
selection = (distance>=Dmin[ii])&(distance<Dmax[ii])
print( len(distance[selection])," halos in shell ", Dmin[ii], "<d comoving<",Dmax[ii])
if len(distance[selection])>1:
f = h5py.File(file_out, "a")
f.attrs['file_name'] = os.path.basename(file_out)
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
halo_data = f.create_group('halo_position')
ds = halo_data.create_dataset('x', data = x[selection] )
ds.attrs['units'] = 'Mpc/h'
ds.attrs['long_name'] = 'x'
ds = halo_data.create_dataset('y', data = y[selection] )
ds.attrs['units'] = 'Mpc/h'
ds.attrs['long_name'] = 'y'
ds = halo_data.create_dataset('z', data = z[selection] )
ds.attrs['units'] = 'Mpc/h'
ds.attrs['long_name'] = 'z'
ds = halo_data.create_dataset('vx', data = f1['/halo_position/vx'].value[selection] )
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = 'vx'
ds = halo_data.create_dataset('vy', data = f1['/halo_position/vy'].value[selection] )
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = 'vy'
ds = halo_data.create_dataset('vz', data = f1['/halo_position/vz'].value[selection] )
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = 'vz'
halo_data = f.create_group('halo_properties')
ds = halo_data.create_dataset('id', data = f1['/halo_properties/id'].value[selection] )
ds.attrs['units'] = '-'
ds.attrs['long_name'] = 'halo identifier'
ds = halo_data.create_dataset('pid', data = f1['/halo_properties/pid'].value[selection] )
ds.attrs['units'] = '-'
ds.attrs['long_name'] = 'parent identifier, -1 if distinct halo'
ds = halo_data.create_dataset('mvir', data = f1['/halo_properties/mvir'].value[selection] )
ds.attrs['units'] = r'$h^{-1} M_\odot$'
ds.attrs['long_name'] = r'$M_{vir}$'
ds = halo_data.create_dataset('rvir', data = f1['/halo_properties/rvir'].value[selection] )
ds.attrs['units'] = r'$h^{-1} kpc$'
ds.attrs['long_name'] = r'$r_{vir}$'
ds = halo_data.create_dataset('rs', data = f1['/halo_properties/rs'].value[selection] )
ds.attrs['units'] = r'$h^{-1} kpc$'
ds.attrs['long_name'] = r'$r_{s}$'
ds = halo_data.create_dataset('Vmax' , data = f1['/halo_properties/Vmax'].value[selection])
ds.attrs['units'] = 'km/s'
ds.attrs['long_name'] = r'$V_{max}$'
ds = halo_data.create_dataset('Mpeak' , data = f1['/halo_properties/Mpeak'].value[selection])
ds.attrs['units'] = r'$h^{-1} M_\odot$'
ds.attrs['long_name'] = r'$M_{peak}$'
moster_2013_data = f.create_group('moster_2013_data')
ds = moster_2013_data.create_dataset('stellar_mass', data = f1['/moster_2013_data/stellar_mass'].value[selection])
ds.attrs['units'] = r'$ M_\odot$'
ds.attrs['long_name'] = 'stellar mass'
agn_properties = f.create_group('agn_properties')
ds = agn_properties.create_dataset('log_lambda_sar', data = f1['/agn_properties/log_lambda_sar'].value[selection])
ds.attrs['units'] = r'log lambda SAR'
ds.attrs['long_name'] = 'log lambda SAR'
ds = agn_properties.create_dataset('agn_activity', data = f1['/agn_properties/agn_activity'].value[selection])
emerge_data = f.create_group('emerge_data')
ds = emerge_data.create_dataset('dMdt', data = f1['/emerge_data/dMdt'].value[selection])
ds.attrs['units'] = r'$ M_\odot/yr$'
ds.attrs['long_name'] = 'halo growth rate'
ds = emerge_data.create_dataset('mvir_dot', data = f1['/emerge_data/mvir_dot'].value[selection] )
ds.attrs['units'] = r'$ M_\odot/yr$'
ds.attrs['long_name'] = 'mvir variation with respect to last snapshot'
ds = emerge_data.create_dataset('rvir_dot', data = f1['/emerge_data/rvir_dot'].value[selection] )
ds.attrs['units'] = r'$ kpc /yr $'
ds.attrs['long_name'] = 'rvir variation with respect to last snapshot'
c4 = f.create_group('cosmo_4most')
ds = c4.create_dataset('is_BG_lz', data = f1['cosmo_4most/is_BG_lz'].value[selection])
ds = c4.create_dataset('is_BG_hz', data = f1['cosmo_4most/is_BG_hz'].value[selection])
ds = c4.create_dataset('is_ELG', data = f1['cosmo_4most/is_ELG'].value[selection])
ds = c4.create_dataset('is_QSO', data = f1['cosmo_4most/is_QSO'].value[selection])
ds = c4.create_dataset('is_Lya', data = f1['cosmo_4most/is_Lya'].value[selection])
f.close()
f1.close()
copylc_data(ii)
|
nilq/baby-python
|
python
|
# This module is avaible both in the Python and Transcrypt environments
# It is included in-between the __core__ and the __builtin__ module, so the latter can adapt __envir__
# In Transcrypt, __base__ is available inline, it isn't nested and cannot be imported in the normal way
class __Envir__:
def __init__ (self):
self.interpreter_name = 'python'
self.transpiler_name = 'transcrypt'
self.transpiler_version = '3.6.92'
self.target_subdir = '__javascript__'
__envir__ = __Envir__ ()
|
nilq/baby-python
|
python
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_param_X86System')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_param_X86System')
_param_X86System = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_X86System', [dirname(__file__)])
except ImportError:
import _param_X86System
return _param_X86System
try:
_mod = imp.load_module('_param_X86System', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_param_X86System = swig_import_helper()
del swig_import_helper
else:
import _param_X86System
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.param_X86ACPIRSDP
import m5.internal.param_X86ACPIRSDT
import m5.internal.X86ACPISysDescTable_vector
import m5.internal.param_X86ACPISysDescTable
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
import m5.internal.param_X86ACPIXSDT
import m5.internal.param_X86IntelMPFloatingPointer
import m5.internal.param_X86IntelMPConfigTable
import m5.internal.X86IntelMPBaseConfigEntry_vector
import m5.internal.param_X86IntelMPBaseConfigEntry
import m5.internal.X86IntelMPExtConfigEntry_vector
import m5.internal.param_X86IntelMPExtConfigEntry
import m5.internal.param_X86SMBiosSMBiosTable
import m5.internal.X86SMBiosSMBiosStructure_vector
import m5.internal.param_X86SMBiosSMBiosStructure
import m5.internal.param_System
import m5.internal.enum_MemoryMode
import m5.internal.AddrRange_vector
import m5.internal.AbstractMemory_vector
import m5.internal.param_AbstractMemory
import m5.internal.param_MemObject
import m5.internal.param_ClockedObject
import m5.internal.param_ClockDomain
class X86System(m5.internal.param_System.System):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
X86System_swigregister = _param_X86System.X86System_swigregister
X86System_swigregister(X86System)
class X86SystemParams(m5.internal.param_System.SystemParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self):
return _param_X86System.X86SystemParams_create(self)
acpi_description_table_pointer = _swig_property(_param_X86System.X86SystemParams_acpi_description_table_pointer_get, _param_X86System.X86SystemParams_acpi_description_table_pointer_set)
intel_mp_pointer = _swig_property(_param_X86System.X86SystemParams_intel_mp_pointer_get, _param_X86System.X86SystemParams_intel_mp_pointer_set)
intel_mp_table = _swig_property(_param_X86System.X86SystemParams_intel_mp_table_get, _param_X86System.X86SystemParams_intel_mp_table_set)
smbios_table = _swig_property(_param_X86System.X86SystemParams_smbios_table_get, _param_X86System.X86SystemParams_smbios_table_set)
def __init__(self):
this = _param_X86System.new_X86SystemParams()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _param_X86System.delete_X86SystemParams
__del__ = lambda self: None
X86SystemParams_swigregister = _param_X86System.X86SystemParams_swigregister
X86SystemParams_swigregister(X86SystemParams)
|
nilq/baby-python
|
python
|
import hashlib
class HashUtils(object):
@staticmethod
def md5(string: str):
md5 = hashlib.md5(string.encode("utf-8"))
return md5.hexdigest()
@staticmethod
def sha1(string: str):
sha1 = hashlib.sha1(string.encode("utf-8"))
return sha1.hexdigest()
@staticmethod
def sha256(string: str):
sha256 = hashlib.sha256(string.encode("utf-8"))
return sha256.hexdigest()
if __name__ == '__main__':
print(HashUtils.sha1("wen"))
|
nilq/baby-python
|
python
|
#!/bin/python
# Solution for https://www.hackerrank.com/challenges/jumping-on-the-clouds-revisited
import sys
n,k = raw_input().strip().split(' ')
n,k = [int(n),int(k)]
c = map(int,raw_input().strip().split(' '))
E = 100
current = 0
time = 0
while not (time > 0 and current == 0):
current += k
current = current % n
if c[current] == 0:
E -= 1
if c[current] == 1:
E -= 3
time += 1
print E
|
nilq/baby-python
|
python
|
import pickle
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import os
def feat_eng(df_fe):
'''
Función que realiza la selección de los features que serán utilizdos para la clasificación
inputs: Data Frame limpio (df_clean.pkl)
outputs: Data Frame con la matriz de diseño para el modelo (df_clean.pkl)
'''
# Transformación de variables facility_type y zip
tipo = pd.DataFrame(df_fe.facility_type.value_counts())
tipo['name'] = tipo.index
tipo.index = range(len(tipo.name))
grupo1 = tipo.iloc[0:4,1].tolist()
grupo2 = tipo.iloc[[5,6,7,11],1].tolist()
df_fe['class'] = df_fe['facility_type'].apply(lambda x: x if x in grupo1 else ('daycare' if x in grupo2 else 'other'))
lev = pd.read_csv(os.path.realpath('src/utils/zip_catalog.csv'))
lev['zip'] = lev['zip'].astype(str)
lev.index = lev.zip
dic = lev.level.to_dict()
df_fe['level'] = df_fe['zip'].apply(lambda x: zips(x,lev,dic))
# Transformación a OHE
df_fe = df_fe.sort_values(by='inspection_date', ascending=True)
df_input = pd.DataFrame(df_fe[['label_risk','label_results','level','class']])
data_input_ohe = pd.get_dummies(df_input)
etiqueta = data_input_ohe.label_results
data_input_ohe= data_input_ohe.drop('label_results', axis = 1)
variables_lista = list(data_input_ohe.columns)
# Grid Search
np.random.seed(20201124)
# ocuparemos un RF
classifier = RandomForestClassifier(oob_score=True, n_jobs=-1, random_state=1234)
# separando en train, test
#X_train, X_test, y_train, y_test = train_test_split(data_input_ohe, etiqueta, test_size=0.3)
# definicion de los hiperparametros que queremos probar
hyper_param_grid = {'n_estimators': [300, 400], #'min_samples_leaf': [3,5,7],
'max_depth': [7, 10],
#'min_samples_split': [3],
'max_features': [3, 5, 6],
'criterion': ['gini']}
# usamos TimeSeriesSplit para dividir respetando el orden cronológico
tscv = TimeSeriesSplit(n_splits=3)
# This was the trickiest part as a newbie. Straight from the docs
# If you only have experience with CV splits this way
# of making the splits might seem foreign. Fret not.
for train_index, test_index in tscv.split(data_input_ohe):
X_train, X_test = data_input_ohe.iloc[train_index, :], data_input_ohe.iloc[test_index,:]
y_train, y_test = etiqueta.iloc[train_index], etiqueta.iloc[test_index]
# ocupemos grid search
gs = GridSearchCV(classifier,
hyper_param_grid,
scoring = 'precision', return_train_score=True,
cv = tscv)
start_time = time.time()
gs.fit(X_train, y_train)
best_rf = gs.best_estimator_
best_score = gs.best_estimator_.oob_score_
feature_importance = pd.DataFrame({'importance':\
best_rf.feature_importances_,\
'feature': variables_lista})
feature_importance=feature_importance.sort_values(by="importance", ascending=False)
#fi_out = feature_importance.head(10)
time_exec = time.time() - start_time
nrows_ohe = data_input_ohe.shape[0]
ncols_ohe = data_input_ohe.shape[1]
#print("Tiempo en ejecutar: ", time.time() - start_time)
df_input = pd.DataFrame(df_fe[['aka_name','license','label_risk','label_results','level','class']])
return df_input, nrows_ohe, ncols_ohe, float(best_score), time_exec, str(best_rf)
def zips(x,lev,dic):
if x in lev.zip.to_list():
return dic[x]
else:
return 'other'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: transaction/v4/transaction_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
from common.v3 import model_pb2 as common_dot_v3_dot_model__pb2
from common.v4 import model_pb2 as common_dot_v4_dot_model__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='transaction/v4/transaction_service.proto',
package='kin.agora.transaction.v4',
syntax='proto3',
serialized_options=b'\n org.kin.agora.gen.transaction.v4ZEgithub.com/kinecosystem/agora-api/genproto/transaction/v4;transaction\242\002\020APBTransactionV4',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(transaction/v4/transaction_service.proto\x12\x18kin.agora.transaction.v4\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\x1a\x15\x63ommon/v3/model.proto\x1a\x15\x63ommon/v4/model.proto\"\x19\n\x17GetServiceConfigRequest\"\xe2\x01\n\x18GetServiceConfigResponse\x12@\n\x12subsidizer_account\x18\x01 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountId\x12\x45\n\rtoken_program\x18\x02 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12=\n\x05token\x18\x03 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\"\x1d\n\x1bGetMinimumKinVersionRequest\"/\n\x1cGetMinimumKinVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\r\"\x1b\n\x19GetRecentBlockhashRequest\"Y\n\x1aGetRecentBlockhashResponse\x12;\n\tblockhash\x18\x01 \x01(\x0b\x32\x1e.kin.agora.common.v4.BlockhashB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\"8\n(GetMinimumBalanceForRentExemptionRequest\x12\x0c\n\x04size\x18\x01 \x01(\x04\"=\n)GetMinimumBalanceForRentExemptionResponse\x12\x10\n\x08lamports\x18\x01 \x01(\x04\"\xf3\x01\n\x11GetHistoryRequest\x12\x42\n\naccount_id\x18\x01 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x30\n\x06\x63ursor\x18\x02 \x01(\x0b\x32 .kin.agora.transaction.v4.Cursor\x12H\n\tdirection\x18\x03 \x01(\x0e\x32\x35.kin.agora.transaction.v4.GetHistoryRequest.Direction\"\x1e\n\tDirection\x12\x07\n\x03\x41SC\x10\x00\x12\x08\n\x04\x44\x45SC\x10\x01\"\xbd\x01\n\x12GetHistoryResponse\x12\x43\n\x06result\x18\x01 \x01(\x0e\x32\x33.kin.agora.transaction.v4.GetHistoryResponse.Result\x12\x41\n\x05items\x18\x02 \x03(\x0b\x32%.kin.agora.transaction.v4.HistoryItemB\x0b\xfa\x42\x08\x92\x01\x05\x08\x00\x10\x80\x01\"\x1f\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\"\x91\x01\n\x16SignTransactionRequest\x12?\n\x0btransaction\x18\x01 \x01(\x0b\x32 .kin.agora.common.v4.TransactionB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x36\n\x0cinvoice_list\x18\x02 \x01(\x0b\x32 .kin.agora.common.v3.InvoiceList\"\x8f\x02\n\x17SignTransactionResponse\x12H\n\x06result\x18\x01 \x01(\x0e\x32\x38.kin.agora.transaction.v4.SignTransactionResponse.Result\x12<\n\tsignature\x18\x02 \x01(\x0b\x32).kin.agora.common.v4.TransactionSignature\x12\x39\n\x0einvoice_errors\x18\x04 \x03(\x0b\x32!.kin.agora.common.v3.InvoiceError\"1\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08REJECTED\x10\x03\x12\x11\n\rINVOICE_ERROR\x10\x04\"\x83\x02\n\x18SubmitTransactionRequest\x12?\n\x0btransaction\x18\x01 \x01(\x0b\x32 .kin.agora.common.v4.TransactionB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x36\n\x0cinvoice_list\x18\x02 \x01(\x0b\x32 .kin.agora.common.v3.InvoiceList\x12\x33\n\ncommitment\x18\x03 \x01(\x0e\x32\x1f.kin.agora.common.v4.Commitment\x12\x1a\n\tdedupe_id\x18\x04 \x01(\x0c\x42\x07\xfa\x42\x04z\x02\x18@\x12\x1d\n\x15send_simulation_event\x18\x05 \x01(\x08\"\x8c\x03\n\x19SubmitTransactionResponse\x12J\n\x06result\x18\x01 \x01(\x0e\x32:.kin.agora.transaction.v4.SubmitTransactionResponse.Result\x12<\n\tsignature\x18\x02 \x01(\x0b\x32).kin.agora.common.v4.TransactionSignature\x12@\n\x11transaction_error\x18\x03 \x01(\x0b\x32%.kin.agora.common.v4.TransactionError\x12\x39\n\x0einvoice_errors\x18\x04 \x03(\x0b\x32!.kin.agora.common.v3.InvoiceError\"h\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x15\n\x11\x41LREADY_SUBMITTED\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02\x12\x0c\n\x08REJECTED\x10\x03\x12\x11\n\rINVOICE_ERROR\x10\x04\x12\x12\n\x0ePAYER_REQUIRED\x10\x05\"\x92\x01\n\x15GetTransactionRequest\x12\x44\n\x0etransaction_id\x18\x01 \x01(\x0b\x32\".kin.agora.common.v4.TransactionIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x33\n\ncommitment\x18\x02 \x01(\x0e\x32\x1f.kin.agora.common.v4.Commitment\"\xf9\x01\n\x16GetTransactionResponse\x12\x45\n\x05state\x18\x01 \x01(\x0e\x32\x36.kin.agora.transaction.v4.GetTransactionResponse.State\x12\x10\n\x04slot\x18\x02 \x01(\x04\x42\x02\x30\x01\x12\x15\n\rconfirmations\x18\x03 \x01(\r\x12\x33\n\x04item\x18\x04 \x01(\x0b\x32%.kin.agora.transaction.v4.HistoryItem\":\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\n\n\x06\x46\x41ILED\x10\x02\x12\x0b\n\x07PENDING\x10\x03\"\xc1\x05\n\x0bHistoryItem\x12\x44\n\x0etransaction_id\x18\x01 \x01(\x0b\x32\".kin.agora.common.v4.TransactionIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x30\n\x06\x63ursor\x18\x02 \x01(\x0b\x32 .kin.agora.transaction.v4.Cursor\x12>\n\x12solana_transaction\x18\x03 \x01(\x0b\x32 .kin.agora.common.v4.TransactionH\x00\x12\x46\n\x13stellar_transaction\x18\x04 \x01(\x0b\x32\'.kin.agora.common.v4.StellarTransactionH\x00\x12@\n\x11transaction_error\x18\x05 \x01(\x0b\x32%.kin.agora.common.v4.TransactionError\x12?\n\x08payments\x18\x06 \x03(\x0b\x32-.kin.agora.transaction.v4.HistoryItem.Payment\x12\x36\n\x0cinvoice_list\x18\x07 \x01(\x0b\x32 .kin.agora.common.v3.InvoiceList\x12\x34\n\x10transaction_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xad\x01\n\x07Payment\x12>\n\x06source\x18\x01 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x43\n\x0b\x64\x65stination\x18\x02 \x01(\x0b\x32$.kin.agora.common.v4.SolanaAccountIdB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x0e\n\x06\x61mount\x18\x03 \x01(\x03\x12\r\n\x05index\x18\x04 \x01(\rB\x11\n\x0fraw_transaction\"#\n\x06\x43ursor\x12\x19\n\x05value\x18\x01 \x01(\x0c\x42\n\xfa\x42\x07z\x05\x10\x01\x18\x80\x01\x32\x94\x08\n\x0bTransaction\x12y\n\x10GetServiceConfig\x12\x31.kin.agora.transaction.v4.GetServiceConfigRequest\x1a\x32.kin.agora.transaction.v4.GetServiceConfigResponse\x12\x85\x01\n\x14GetMinimumKinVersion\x12\x35.kin.agora.transaction.v4.GetMinimumKinVersionRequest\x1a\x36.kin.agora.transaction.v4.GetMinimumKinVersionResponse\x12\x7f\n\x12GetRecentBlockhash\x12\x33.kin.agora.transaction.v4.GetRecentBlockhashRequest\x1a\x34.kin.agora.transaction.v4.GetRecentBlockhashResponse\x12\xac\x01\n!GetMinimumBalanceForRentExemption\x12\x42.kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest\x1a\x43.kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse\x12g\n\nGetHistory\x12+.kin.agora.transaction.v4.GetHistoryRequest\x1a,.kin.agora.transaction.v4.GetHistoryResponse\x12v\n\x0fSignTransaction\x12\x30.kin.agora.transaction.v4.SignTransactionRequest\x1a\x31.kin.agora.transaction.v4.SignTransactionResponse\x12|\n\x11SubmitTransaction\x12\x32.kin.agora.transaction.v4.SubmitTransactionRequest\x1a\x33.kin.agora.transaction.v4.SubmitTransactionResponse\x12s\n\x0eGetTransaction\x12/.kin.agora.transaction.v4.GetTransactionRequest\x1a\x30.kin.agora.transaction.v4.GetTransactionResponseB|\n org.kin.agora.gen.transaction.v4ZEgithub.com/kinecosystem/agora-api/genproto/transaction/v4;transaction\xa2\x02\x10\x41PBTransactionV4b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,common_dot_v3_dot_model__pb2.DESCRIPTOR,common_dot_v4_dot_model__pb2.DESCRIPTOR,])
_GETHISTORYREQUEST_DIRECTION = _descriptor.EnumDescriptor(
name='Direction',
full_name='kin.agora.transaction.v4.GetHistoryRequest.Direction',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ASC', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DESC', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=965,
serialized_end=995,
)
_sym_db.RegisterEnumDescriptor(_GETHISTORYREQUEST_DIRECTION)
_GETHISTORYRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='kin.agora.transaction.v4.GetHistoryResponse.Result',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1156,
serialized_end=1187,
)
_sym_db.RegisterEnumDescriptor(_GETHISTORYRESPONSE_RESULT)
_SIGNTRANSACTIONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='kin.agora.transaction.v4.SignTransactionResponse.Result',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REJECTED', index=1, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVOICE_ERROR', index=2, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1560,
serialized_end=1609,
)
_sym_db.RegisterEnumDescriptor(_SIGNTRANSACTIONRESPONSE_RESULT)
_SUBMITTRANSACTIONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='kin.agora.transaction.v4.SubmitTransactionResponse.Result',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ALREADY_SUBMITTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REJECTED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVOICE_ERROR', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PAYER_REQUIRED', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2166,
serialized_end=2270,
)
_sym_db.RegisterEnumDescriptor(_SUBMITTRANSACTIONRESPONSE_RESULT)
_GETTRANSACTIONRESPONSE_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='kin.agora.transaction.v4.GetTransactionResponse.State',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2613,
serialized_end=2671,
)
_sym_db.RegisterEnumDescriptor(_GETTRANSACTIONRESPONSE_STATE)
_GETSERVICECONFIGREQUEST = _descriptor.Descriptor(
name='GetServiceConfigRequest',
full_name='kin.agora.transaction.v4.GetServiceConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=199,
)
_GETSERVICECONFIGRESPONSE = _descriptor.Descriptor(
name='GetServiceConfigResponse',
full_name='kin.agora.transaction.v4.GetServiceConfigResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subsidizer_account', full_name='kin.agora.transaction.v4.GetServiceConfigResponse.subsidizer_account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='token_program', full_name='kin.agora.transaction.v4.GetServiceConfigResponse.token_program', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='token', full_name='kin.agora.transaction.v4.GetServiceConfigResponse.token', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=202,
serialized_end=428,
)
_GETMINIMUMKINVERSIONREQUEST = _descriptor.Descriptor(
name='GetMinimumKinVersionRequest',
full_name='kin.agora.transaction.v4.GetMinimumKinVersionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=459,
)
_GETMINIMUMKINVERSIONRESPONSE = _descriptor.Descriptor(
name='GetMinimumKinVersionResponse',
full_name='kin.agora.transaction.v4.GetMinimumKinVersionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='kin.agora.transaction.v4.GetMinimumKinVersionResponse.version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=461,
serialized_end=508,
)
_GETRECENTBLOCKHASHREQUEST = _descriptor.Descriptor(
name='GetRecentBlockhashRequest',
full_name='kin.agora.transaction.v4.GetRecentBlockhashRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=510,
serialized_end=537,
)
_GETRECENTBLOCKHASHRESPONSE = _descriptor.Descriptor(
name='GetRecentBlockhashResponse',
full_name='kin.agora.transaction.v4.GetRecentBlockhashResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='blockhash', full_name='kin.agora.transaction.v4.GetRecentBlockhashResponse.blockhash', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=539,
serialized_end=628,
)
_GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST = _descriptor.Descriptor(
name='GetMinimumBalanceForRentExemptionRequest',
full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest.size', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=630,
serialized_end=686,
)
_GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE = _descriptor.Descriptor(
name='GetMinimumBalanceForRentExemptionResponse',
full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='lamports', full_name='kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse.lamports', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=688,
serialized_end=749,
)
_GETHISTORYREQUEST = _descriptor.Descriptor(
name='GetHistoryRequest',
full_name='kin.agora.transaction.v4.GetHistoryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='kin.agora.transaction.v4.GetHistoryRequest.account_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cursor', full_name='kin.agora.transaction.v4.GetHistoryRequest.cursor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='direction', full_name='kin.agora.transaction.v4.GetHistoryRequest.direction', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETHISTORYREQUEST_DIRECTION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=752,
serialized_end=995,
)
_GETHISTORYRESPONSE = _descriptor.Descriptor(
name='GetHistoryResponse',
full_name='kin.agora.transaction.v4.GetHistoryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='kin.agora.transaction.v4.GetHistoryResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='items', full_name='kin.agora.transaction.v4.GetHistoryResponse.items', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\010\222\001\005\010\000\020\200\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETHISTORYRESPONSE_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=998,
serialized_end=1187,
)
_SIGNTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SignTransactionRequest',
full_name='kin.agora.transaction.v4.SignTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='kin.agora.transaction.v4.SignTransactionRequest.transaction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_list', full_name='kin.agora.transaction.v4.SignTransactionRequest.invoice_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1190,
serialized_end=1335,
)
_SIGNTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SignTransactionResponse',
full_name='kin.agora.transaction.v4.SignTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='kin.agora.transaction.v4.SignTransactionResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='kin.agora.transaction.v4.SignTransactionResponse.signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_errors', full_name='kin.agora.transaction.v4.SignTransactionResponse.invoice_errors', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SIGNTRANSACTIONRESPONSE_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1338,
serialized_end=1609,
)
_SUBMITTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SubmitTransactionRequest',
full_name='kin.agora.transaction.v4.SubmitTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.transaction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_list', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.invoice_list', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='commitment', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.commitment', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dedupe_id', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.dedupe_id', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004z\002\030@', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='send_simulation_event', full_name='kin.agora.transaction.v4.SubmitTransactionRequest.send_simulation_event', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1612,
serialized_end=1871,
)
_SUBMITTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SubmitTransactionResponse',
full_name='kin.agora.transaction.v4.SubmitTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.signature', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transaction_error', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.transaction_error', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_errors', full_name='kin.agora.transaction.v4.SubmitTransactionResponse.invoice_errors', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SUBMITTRANSACTIONRESPONSE_RESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1874,
serialized_end=2270,
)
_GETTRANSACTIONREQUEST = _descriptor.Descriptor(
name='GetTransactionRequest',
full_name='kin.agora.transaction.v4.GetTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='kin.agora.transaction.v4.GetTransactionRequest.transaction_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='commitment', full_name='kin.agora.transaction.v4.GetTransactionRequest.commitment', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2273,
serialized_end=2419,
)
_GETTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='GetTransactionResponse',
full_name='kin.agora.transaction.v4.GetTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='kin.agora.transaction.v4.GetTransactionResponse.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='slot', full_name='kin.agora.transaction.v4.GetTransactionResponse.slot', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'0\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='confirmations', full_name='kin.agora.transaction.v4.GetTransactionResponse.confirmations', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='item', full_name='kin.agora.transaction.v4.GetTransactionResponse.item', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETTRANSACTIONRESPONSE_STATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2422,
serialized_end=2671,
)
_HISTORYITEM_PAYMENT = _descriptor.Descriptor(
name='Payment',
full_name='kin.agora.transaction.v4.HistoryItem.Payment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='kin.agora.transaction.v4.HistoryItem.Payment.source', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='destination', full_name='kin.agora.transaction.v4.HistoryItem.Payment.destination', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount', full_name='kin.agora.transaction.v4.HistoryItem.Payment.amount', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='index', full_name='kin.agora.transaction.v4.HistoryItem.Payment.index', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3187,
serialized_end=3360,
)
_HISTORYITEM = _descriptor.Descriptor(
name='HistoryItem',
full_name='kin.agora.transaction.v4.HistoryItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='kin.agora.transaction.v4.HistoryItem.transaction_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cursor', full_name='kin.agora.transaction.v4.HistoryItem.cursor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='solana_transaction', full_name='kin.agora.transaction.v4.HistoryItem.solana_transaction', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stellar_transaction', full_name='kin.agora.transaction.v4.HistoryItem.stellar_transaction', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transaction_error', full_name='kin.agora.transaction.v4.HistoryItem.transaction_error', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='payments', full_name='kin.agora.transaction.v4.HistoryItem.payments', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='invoice_list', full_name='kin.agora.transaction.v4.HistoryItem.invoice_list', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transaction_time', full_name='kin.agora.transaction.v4.HistoryItem.transaction_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_HISTORYITEM_PAYMENT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='raw_transaction', full_name='kin.agora.transaction.v4.HistoryItem.raw_transaction',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2674,
serialized_end=3379,
)
_CURSOR = _descriptor.Descriptor(
name='Cursor',
full_name='kin.agora.transaction.v4.Cursor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='kin.agora.transaction.v4.Cursor.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\007z\005\020\001\030\200\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3381,
serialized_end=3416,
)
_GETSERVICECONFIGRESPONSE.fields_by_name['subsidizer_account'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETSERVICECONFIGRESPONSE.fields_by_name['token_program'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETSERVICECONFIGRESPONSE.fields_by_name['token'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETRECENTBLOCKHASHRESPONSE.fields_by_name['blockhash'].message_type = common_dot_v4_dot_model__pb2._BLOCKHASH
_GETHISTORYREQUEST.fields_by_name['account_id'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_GETHISTORYREQUEST.fields_by_name['cursor'].message_type = _CURSOR
_GETHISTORYREQUEST.fields_by_name['direction'].enum_type = _GETHISTORYREQUEST_DIRECTION
_GETHISTORYREQUEST_DIRECTION.containing_type = _GETHISTORYREQUEST
_GETHISTORYRESPONSE.fields_by_name['result'].enum_type = _GETHISTORYRESPONSE_RESULT
_GETHISTORYRESPONSE.fields_by_name['items'].message_type = _HISTORYITEM
_GETHISTORYRESPONSE_RESULT.containing_type = _GETHISTORYRESPONSE
_SIGNTRANSACTIONREQUEST.fields_by_name['transaction'].message_type = common_dot_v4_dot_model__pb2._TRANSACTION
_SIGNTRANSACTIONREQUEST.fields_by_name['invoice_list'].message_type = common_dot_v3_dot_model__pb2._INVOICELIST
_SIGNTRANSACTIONRESPONSE.fields_by_name['result'].enum_type = _SIGNTRANSACTIONRESPONSE_RESULT
_SIGNTRANSACTIONRESPONSE.fields_by_name['signature'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONSIGNATURE
_SIGNTRANSACTIONRESPONSE.fields_by_name['invoice_errors'].message_type = common_dot_v3_dot_model__pb2._INVOICEERROR
_SIGNTRANSACTIONRESPONSE_RESULT.containing_type = _SIGNTRANSACTIONRESPONSE
_SUBMITTRANSACTIONREQUEST.fields_by_name['transaction'].message_type = common_dot_v4_dot_model__pb2._TRANSACTION
_SUBMITTRANSACTIONREQUEST.fields_by_name['invoice_list'].message_type = common_dot_v3_dot_model__pb2._INVOICELIST
_SUBMITTRANSACTIONREQUEST.fields_by_name['commitment'].enum_type = common_dot_v4_dot_model__pb2._COMMITMENT
_SUBMITTRANSACTIONRESPONSE.fields_by_name['result'].enum_type = _SUBMITTRANSACTIONRESPONSE_RESULT
_SUBMITTRANSACTIONRESPONSE.fields_by_name['signature'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONSIGNATURE
_SUBMITTRANSACTIONRESPONSE.fields_by_name['transaction_error'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONERROR
_SUBMITTRANSACTIONRESPONSE.fields_by_name['invoice_errors'].message_type = common_dot_v3_dot_model__pb2._INVOICEERROR
_SUBMITTRANSACTIONRESPONSE_RESULT.containing_type = _SUBMITTRANSACTIONRESPONSE
_GETTRANSACTIONREQUEST.fields_by_name['transaction_id'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONID
_GETTRANSACTIONREQUEST.fields_by_name['commitment'].enum_type = common_dot_v4_dot_model__pb2._COMMITMENT
_GETTRANSACTIONRESPONSE.fields_by_name['state'].enum_type = _GETTRANSACTIONRESPONSE_STATE
_GETTRANSACTIONRESPONSE.fields_by_name['item'].message_type = _HISTORYITEM
_GETTRANSACTIONRESPONSE_STATE.containing_type = _GETTRANSACTIONRESPONSE
_HISTORYITEM_PAYMENT.fields_by_name['source'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_HISTORYITEM_PAYMENT.fields_by_name['destination'].message_type = common_dot_v4_dot_model__pb2._SOLANAACCOUNTID
_HISTORYITEM_PAYMENT.containing_type = _HISTORYITEM
_HISTORYITEM.fields_by_name['transaction_id'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONID
_HISTORYITEM.fields_by_name['cursor'].message_type = _CURSOR
_HISTORYITEM.fields_by_name['solana_transaction'].message_type = common_dot_v4_dot_model__pb2._TRANSACTION
_HISTORYITEM.fields_by_name['stellar_transaction'].message_type = common_dot_v4_dot_model__pb2._STELLARTRANSACTION
_HISTORYITEM.fields_by_name['transaction_error'].message_type = common_dot_v4_dot_model__pb2._TRANSACTIONERROR
_HISTORYITEM.fields_by_name['payments'].message_type = _HISTORYITEM_PAYMENT
_HISTORYITEM.fields_by_name['invoice_list'].message_type = common_dot_v3_dot_model__pb2._INVOICELIST
_HISTORYITEM.fields_by_name['transaction_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_HISTORYITEM.oneofs_by_name['raw_transaction'].fields.append(
_HISTORYITEM.fields_by_name['solana_transaction'])
_HISTORYITEM.fields_by_name['solana_transaction'].containing_oneof = _HISTORYITEM.oneofs_by_name['raw_transaction']
_HISTORYITEM.oneofs_by_name['raw_transaction'].fields.append(
_HISTORYITEM.fields_by_name['stellar_transaction'])
_HISTORYITEM.fields_by_name['stellar_transaction'].containing_oneof = _HISTORYITEM.oneofs_by_name['raw_transaction']
DESCRIPTOR.message_types_by_name['GetServiceConfigRequest'] = _GETSERVICECONFIGREQUEST
DESCRIPTOR.message_types_by_name['GetServiceConfigResponse'] = _GETSERVICECONFIGRESPONSE
DESCRIPTOR.message_types_by_name['GetMinimumKinVersionRequest'] = _GETMINIMUMKINVERSIONREQUEST
DESCRIPTOR.message_types_by_name['GetMinimumKinVersionResponse'] = _GETMINIMUMKINVERSIONRESPONSE
DESCRIPTOR.message_types_by_name['GetRecentBlockhashRequest'] = _GETRECENTBLOCKHASHREQUEST
DESCRIPTOR.message_types_by_name['GetRecentBlockhashResponse'] = _GETRECENTBLOCKHASHRESPONSE
DESCRIPTOR.message_types_by_name['GetMinimumBalanceForRentExemptionRequest'] = _GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST
DESCRIPTOR.message_types_by_name['GetMinimumBalanceForRentExemptionResponse'] = _GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetHistoryRequest'] = _GETHISTORYREQUEST
DESCRIPTOR.message_types_by_name['GetHistoryResponse'] = _GETHISTORYRESPONSE
DESCRIPTOR.message_types_by_name['SignTransactionRequest'] = _SIGNTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SignTransactionResponse'] = _SIGNTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['SubmitTransactionRequest'] = _SUBMITTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SubmitTransactionResponse'] = _SUBMITTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetTransactionRequest'] = _GETTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['GetTransactionResponse'] = _GETTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['HistoryItem'] = _HISTORYITEM
DESCRIPTOR.message_types_by_name['Cursor'] = _CURSOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetServiceConfigRequest = _reflection.GeneratedProtocolMessageType('GetServiceConfigRequest', (_message.Message,), {
'DESCRIPTOR' : _GETSERVICECONFIGREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetServiceConfigRequest)
})
_sym_db.RegisterMessage(GetServiceConfigRequest)
GetServiceConfigResponse = _reflection.GeneratedProtocolMessageType('GetServiceConfigResponse', (_message.Message,), {
'DESCRIPTOR' : _GETSERVICECONFIGRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetServiceConfigResponse)
})
_sym_db.RegisterMessage(GetServiceConfigResponse)
GetMinimumKinVersionRequest = _reflection.GeneratedProtocolMessageType('GetMinimumKinVersionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMKINVERSIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumKinVersionRequest)
})
_sym_db.RegisterMessage(GetMinimumKinVersionRequest)
GetMinimumKinVersionResponse = _reflection.GeneratedProtocolMessageType('GetMinimumKinVersionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMKINVERSIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumKinVersionResponse)
})
_sym_db.RegisterMessage(GetMinimumKinVersionResponse)
GetRecentBlockhashRequest = _reflection.GeneratedProtocolMessageType('GetRecentBlockhashRequest', (_message.Message,), {
'DESCRIPTOR' : _GETRECENTBLOCKHASHREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetRecentBlockhashRequest)
})
_sym_db.RegisterMessage(GetRecentBlockhashRequest)
GetRecentBlockhashResponse = _reflection.GeneratedProtocolMessageType('GetRecentBlockhashResponse', (_message.Message,), {
'DESCRIPTOR' : _GETRECENTBLOCKHASHRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetRecentBlockhashResponse)
})
_sym_db.RegisterMessage(GetRecentBlockhashResponse)
GetMinimumBalanceForRentExemptionRequest = _reflection.GeneratedProtocolMessageType('GetMinimumBalanceForRentExemptionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionRequest)
})
_sym_db.RegisterMessage(GetMinimumBalanceForRentExemptionRequest)
GetMinimumBalanceForRentExemptionResponse = _reflection.GeneratedProtocolMessageType('GetMinimumBalanceForRentExemptionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetMinimumBalanceForRentExemptionResponse)
})
_sym_db.RegisterMessage(GetMinimumBalanceForRentExemptionResponse)
GetHistoryRequest = _reflection.GeneratedProtocolMessageType('GetHistoryRequest', (_message.Message,), {
'DESCRIPTOR' : _GETHISTORYREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetHistoryRequest)
})
_sym_db.RegisterMessage(GetHistoryRequest)
GetHistoryResponse = _reflection.GeneratedProtocolMessageType('GetHistoryResponse', (_message.Message,), {
'DESCRIPTOR' : _GETHISTORYRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetHistoryResponse)
})
_sym_db.RegisterMessage(GetHistoryResponse)
SignTransactionRequest = _reflection.GeneratedProtocolMessageType('SignTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _SIGNTRANSACTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SignTransactionRequest)
})
_sym_db.RegisterMessage(SignTransactionRequest)
SignTransactionResponse = _reflection.GeneratedProtocolMessageType('SignTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _SIGNTRANSACTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SignTransactionResponse)
})
_sym_db.RegisterMessage(SignTransactionResponse)
SubmitTransactionRequest = _reflection.GeneratedProtocolMessageType('SubmitTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBMITTRANSACTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SubmitTransactionRequest)
})
_sym_db.RegisterMessage(SubmitTransactionRequest)
SubmitTransactionResponse = _reflection.GeneratedProtocolMessageType('SubmitTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _SUBMITTRANSACTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.SubmitTransactionResponse)
})
_sym_db.RegisterMessage(SubmitTransactionResponse)
GetTransactionRequest = _reflection.GeneratedProtocolMessageType('GetTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETTRANSACTIONREQUEST,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetTransactionRequest)
})
_sym_db.RegisterMessage(GetTransactionRequest)
GetTransactionResponse = _reflection.GeneratedProtocolMessageType('GetTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETTRANSACTIONRESPONSE,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.GetTransactionResponse)
})
_sym_db.RegisterMessage(GetTransactionResponse)
HistoryItem = _reflection.GeneratedProtocolMessageType('HistoryItem', (_message.Message,), {
'Payment' : _reflection.GeneratedProtocolMessageType('Payment', (_message.Message,), {
'DESCRIPTOR' : _HISTORYITEM_PAYMENT,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.HistoryItem.Payment)
})
,
'DESCRIPTOR' : _HISTORYITEM,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.HistoryItem)
})
_sym_db.RegisterMessage(HistoryItem)
_sym_db.RegisterMessage(HistoryItem.Payment)
Cursor = _reflection.GeneratedProtocolMessageType('Cursor', (_message.Message,), {
'DESCRIPTOR' : _CURSOR,
'__module__' : 'transaction.v4.transaction_service_pb2'
# @@protoc_insertion_point(class_scope:kin.agora.transaction.v4.Cursor)
})
_sym_db.RegisterMessage(Cursor)
DESCRIPTOR._options = None
_GETSERVICECONFIGRESPONSE.fields_by_name['token_program']._options = None
_GETSERVICECONFIGRESPONSE.fields_by_name['token']._options = None
_GETRECENTBLOCKHASHRESPONSE.fields_by_name['blockhash']._options = None
_GETHISTORYREQUEST.fields_by_name['account_id']._options = None
_GETHISTORYRESPONSE.fields_by_name['items']._options = None
_SIGNTRANSACTIONREQUEST.fields_by_name['transaction']._options = None
_SUBMITTRANSACTIONREQUEST.fields_by_name['transaction']._options = None
_SUBMITTRANSACTIONREQUEST.fields_by_name['dedupe_id']._options = None
_GETTRANSACTIONREQUEST.fields_by_name['transaction_id']._options = None
_GETTRANSACTIONRESPONSE.fields_by_name['slot']._options = None
_HISTORYITEM_PAYMENT.fields_by_name['source']._options = None
_HISTORYITEM_PAYMENT.fields_by_name['destination']._options = None
_HISTORYITEM.fields_by_name['transaction_id']._options = None
_CURSOR.fields_by_name['value']._options = None
_TRANSACTION = _descriptor.ServiceDescriptor(
name='Transaction',
full_name='kin.agora.transaction.v4.Transaction',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3419,
serialized_end=4463,
methods=[
_descriptor.MethodDescriptor(
name='GetServiceConfig',
full_name='kin.agora.transaction.v4.Transaction.GetServiceConfig',
index=0,
containing_service=None,
input_type=_GETSERVICECONFIGREQUEST,
output_type=_GETSERVICECONFIGRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMinimumKinVersion',
full_name='kin.agora.transaction.v4.Transaction.GetMinimumKinVersion',
index=1,
containing_service=None,
input_type=_GETMINIMUMKINVERSIONREQUEST,
output_type=_GETMINIMUMKINVERSIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetRecentBlockhash',
full_name='kin.agora.transaction.v4.Transaction.GetRecentBlockhash',
index=2,
containing_service=None,
input_type=_GETRECENTBLOCKHASHREQUEST,
output_type=_GETRECENTBLOCKHASHRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMinimumBalanceForRentExemption',
full_name='kin.agora.transaction.v4.Transaction.GetMinimumBalanceForRentExemption',
index=3,
containing_service=None,
input_type=_GETMINIMUMBALANCEFORRENTEXEMPTIONREQUEST,
output_type=_GETMINIMUMBALANCEFORRENTEXEMPTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetHistory',
full_name='kin.agora.transaction.v4.Transaction.GetHistory',
index=4,
containing_service=None,
input_type=_GETHISTORYREQUEST,
output_type=_GETHISTORYRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SignTransaction',
full_name='kin.agora.transaction.v4.Transaction.SignTransaction',
index=5,
containing_service=None,
input_type=_SIGNTRANSACTIONREQUEST,
output_type=_SIGNTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SubmitTransaction',
full_name='kin.agora.transaction.v4.Transaction.SubmitTransaction',
index=6,
containing_service=None,
input_type=_SUBMITTRANSACTIONREQUEST,
output_type=_SUBMITTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetTransaction',
full_name='kin.agora.transaction.v4.Transaction.GetTransaction',
index=7,
containing_service=None,
input_type=_GETTRANSACTIONREQUEST,
output_type=_GETTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_TRANSACTION)
DESCRIPTOR.services_by_name['Transaction'] = _TRANSACTION
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from datetime import datetime
from typing import Dict
import braintree
from airbyte_protocol import SyncMode
from base_python import AirbyteLogger
from base_singer import BaseSingerSource, SyncModeInfo
from braintree.exceptions.authentication_error import AuthenticationError
from dateutil import parser
from dateutil.relativedelta import relativedelta
class SourceBraintreeSinger(BaseSingerSource):
tap_cmd = "tap-braintree"
tap_name = "BrainTree API"
api_error = AuthenticationError
force_full_refresh = True
def transform_config(self, raw_config: json) -> json:
config = raw_config
if "start_date" in raw_config:
config["start_date"] = (parser.parse(raw_config["start_date"]) + relativedelta(months=+1)).strftime("%Y-%m-%dT%H:%M:%SZ")
else:
config["start_date"] = (datetime.now() + relativedelta(months=+1)).strftime("%Y-%m-%dT%H:%M:%SZ")
return config
def try_connect(self, logger: AirbyteLogger, config: json):
"""Test provided credentials, raises self.api_error if something goes wrong"""
client = braintree.BraintreeGateway(
braintree.Configuration(
environment=getattr(braintree.Environment, config["environment"]),
merchant_id=config["merchant_id"],
public_key=config["public_key"],
private_key=config["private_key"],
)
)
client.transaction.search(braintree.TransactionSearch.created_at.between(datetime.now() + relativedelta(days=-1), datetime.now()))
def get_sync_mode_overrides(self) -> Dict[str, SyncModeInfo]:
return {"transactions": SyncModeInfo(supported_sync_modes=[SyncMode.incremental])}
def discover_cmd(self, logger: AirbyteLogger, config_path: str) -> str:
return (
f"{self.tap_cmd} -c {config_path} --discover"
+ ' | grep "\\"type\\": \\"SCHEMA\\"" | head -1'
+ '| jq -c "{\\"streams\\":[{\\"stream\\": .stream, \\"schema\\": .schema}]}"'
)
def read_cmd(self, logger: AirbyteLogger, config_path: str, catalog_path: str, state_path: str = None) -> str:
state_option = f"--state {state_path}" if state_path else ""
return f"{self.tap_cmd} -c {config_path} -p {catalog_path} {state_option}"
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.