id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6428142 | <reponame>wesferr/Zombicide
# Copyright (c) 2018 by <NAME>. All Rights Reserved.
from pygame import *
from pygame.locals import *
class spriteButton(sprite.Sprite):
def __init__(self, screen, link, (x,y), (wid, hei)):
sprite.Sprite.__init__(self)
self.imgButton = image.load(link).convert_alpha()
self.posButton = (x,y)
self.imgButton = transform.scale(self.imgButton, (wid, hei))
screen.blit(self.imgButton, self.posButton)
def show(self, screen):
screen.blit(self.imgButton, self.posButton)
def collide(self, pos):
return self.imgButton.get_rect(topleft=(self.posButton)).collidepoint(pos)
class spriteSobrevivente(sprite.Sprite):
def __init__(self, screen, (x,y), imgSobrevivente, (w,h)):
sprite.Sprite.__init__(self)
self.imgSobrevivente = image.load(imgSobrevivente).convert_alpha()
self.imgSobrevivente = transform.scale(self.imgSobrevivente, (w,h))
self.posSobrevivente = (x,y)
def show(self, screen):
screen.blit(self.imgSobrevivente, self.posSobrevivente)
def collide(self, pos):
return self.imgSobrevivente.get_rect(topleft=(self.posSobrevivente)).collidepoint(pos)
class spriteEquip(sprite.Sprite):
def __init__(self, screen, (x,y), imgCard, scale):
sprite.Sprite.__init__(self)
self.imgEquip = image.load(imgCard).convert_alpha()
self.posEquip = (x,y)
proporcao = 200/scale
self.background = (x-5,y-5, int(150/proporcao)+10, int(200/proporcao)+10)
self.imgEquip = transform.scale(self.imgEquip, (int(150/proporcao), int(200/proporcao)))
def show(self, screen):
draw.rect(screen, (0,0,0), self.background)
screen.blit(self.imgEquip, self.posEquip)
def collide(self, pos):
return self.imgEquip.get_rect(topleft=(self.posEquip)).collidepoint(pos)
| StarcoderdataPython |
3564386 | <gh_stars>0
import torch
import numpy as np
from utils.functions import evaluation
from utils.re_ranking import re_ranking, re_ranking_gpu
from model.lmbn_n_fused import LMBN_n_Fused
from dgnet.utils import get_all_data_loaders, prepare_sub_folder, write_loss, get_config, write_2images, Timer
from dgnet.trainer import DGNet_Trainer
# from model.cacenet import CACENET
# from model.rga_modules import RGA_Module
class Engine():
def __init__(self, args, model, optimizer, scheduler, loss, loader, loader_head, ckpt):#, config_dg_net, train_loader_a, train_loader_b)
self.args = args
self.train_loader = loader.train_loader
self.head_loader = loader_head.train_loader
self.test_loader = loader.test_loader
self.query_loader = loader.query_loader
self.testset = loader.galleryset
self.queryset = loader.queryset
# self.train_loader_a = train_loader_a
# self.train_loader_b = train_loader_b
# self.config_dg_net = config_dg_net
# self.trainer_dg = DGNet_Trainer(config_dg_net, 0).cuda()
self.ckpt = ckpt
self.model = model
# self.model_cace = CACENET().to('cuda')
# self.model_fused = LMBN_n_Fused(args).to('cuda')
self.optimizer = optimizer
self.scheduler = scheduler
self.loss = loss
self.lr = 0.
self.device = torch.device('cpu' if args.cpu else 'cuda')
if torch.cuda.is_available():
self.ckpt.write_log('[INFO] GPU: ' + torch.cuda.get_device_name(0))
self.ckpt.write_log(
'[INFO] Starting from epoch {}'.format(self.scheduler.last_epoch + 1))
def train(self):
# for it, ((images_a,labels_a, pos_a), (images_b, labels_b, pos_b)) in enumerate(zip(self.train_loader_a, self.train_loader_b)):
# # if num_gpu>1:
# # trainer.module.update_learning_rate()
# # else:
# self.trainer_dg.update_learning_rate()
# images_a, images_b = images_a.cuda().detach(), images_b.cuda().detach()
# pos_a, pos_b = pos_a.cuda().detach(), pos_b.cuda().detach()
# labels_a, labels_b = labels_a.cuda().detach(), labels_b.cuda().detach()
# # Main training code
# x_ab, x_ba, s_a, s_b, f_a, f_b, p_a, p_b, pp_a, pp_b, x_a_recon, x_b_recon, x_a_recon_p, x_b_recon_p = \
# self.trainer_dg.forward(images_a, images_b, pos_a, pos_b)
# # if num_gpu>1:
# # trainer.module.dis_update(x_ab.clone(), x_ba.clone(), images_a, images_b, config, num_gpu)
# # trainer.module.gen_update(x_ab, x_ba, s_a, s_b, f_a, f_b, p_a, p_b, pp_a, pp_b, x_a_recon, x_b_recon, x_a_recon_p, x_b_recon_p, images_a, images_b, pos_a, pos_b, labels_a, labels_b, config, iterations, num_gpu)
# # else:
# self.trainer_dg.dis_update(x_ab.clone(), x_ba.clone(), images_a, images_b, self.config_dg_net, num_gpu=1)
# self.trainer_dg.gen_update(x_ab, x_ba, s_a, s_b, f_a, f_b, p_a, p_b, pp_a, pp_b, x_a_recon, x_b_recon, x_a_recon_p, x_b_recon_p, images_a, images_b, pos_a, pos_b, labels_a, labels_b, self.config_dg_net, iterations=0, num_gpu=1)
# torch.cuda.synchronize()
print('inputs===========: ')
epoch = self.scheduler.last_epoch
lr = self.scheduler.get_last_lr()[0]
if lr != self.lr:
self.ckpt.write_log(
'[INFO] Epoch: {}\tLearning rate: {:.2e} '.format(epoch + 1, lr))
self.lr = lr
self.loss.start_log()
self.model.train()
for batch, d in enumerate(zip(self.train_loader,self.head_loader)):
inputs, labels = self._parse_data_for_train(d[0])
inputs_head, labels_head = self._parse_data_for_train(d[1])
inputs = inputs.to(self.device)
inputs_head = inputs_head.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
# Raw images
outputs = self.model_cace(inputs, labels)
loss_1 = self.loss.compute(outputs, labels)
loss_1.backward()
# Head images
# outputs_head = self.model(inputs_head)#, inputs_head)
# loss_2 = self.loss.compute(outputs_head, labels_head)
# loss_2.backward()
# Fused
# outputs_fused = self.model_fused(inputs, inputs_head)#, inputs_head)
# loss_3 = self.loss.compute(outputs_fused, labels)
# loss_3.backward()
self.optimizer.step()
self.ckpt.write_log('\r[INFO] [{}/{}]\t{}/{}\t{}'.format(
epoch + 1, self.args.epochs,
batch + 1, len(self.train_loader),
self.loss.display_loss(batch)),
end='' if batch + 1 != len(self.train_loader) else '\n')
self.scheduler.step()
self.loss.end_log(len(self.train_loader))
# self._save_checkpoint(epoch, 0., self.ckpt.dir, is_best=True)
def test(self):
epoch = self.scheduler.last_epoch
self.ckpt.write_log('\n[INFO] Test:')
self.model.eval()
self.ckpt.add_log(torch.zeros(1, 6))
with torch.no_grad():
qf, query_ids, query_cams = self.extract_feature(
self.query_loader, self.args)
gf, gallery_ids, gallery_cams = self.extract_feature(
self.test_loader, self.args)
if self.args.re_rank:
# q_g_dist = np.dot(qf, np.transpose(gf))
# q_q_dist = np.dot(qf, np.transpose(qf))
# g_g_dist = np.dot(gf, np.transpose(gf))
# dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)
dist = re_ranking_gpu(qf, gf, 20, 6, 0.3)
else:
# cosine distance
dist = 1 - torch.mm(qf, gf.t()).cpu().numpy()
r, m_ap = evaluation(
dist, query_ids, gallery_ids, query_cams, gallery_cams, 50)
self.ckpt.log[-1, 0] = epoch
self.ckpt.log[-1, 1] = m_ap
self.ckpt.log[-1, 2] = r[0]
self.ckpt.log[-1, 3] = r[2]
self.ckpt.log[-1, 4] = r[4]
self.ckpt.log[-1, 5] = r[9]
best = self.ckpt.log.max(0)
self.ckpt.write_log(
'[INFO] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} (Best: {:.4f} @epoch {})'.format(
m_ap,
r[0], r[2], r[4], r[9],
best[0][1], self.ckpt.log[best[1][1], 0]
), refresh=True
)
if not self.args.test_only:
self._save_checkpoint(epoch, r[0], self.ckpt.dir, is_best=(
self.ckpt.log[best[1][1], 0] == epoch))
self.ckpt.plot_map_rank(epoch)
def fliphor(self, inputs):
inv_idx = torch.arange(inputs.size(
3) - 1, -1, -1).long() # N x C x H x W
return inputs.index_select(3, inv_idx)
def extract_feature(self, loader, args):
features = torch.FloatTensor()
pids, camids = [], []
for d in loader:
inputs, pid, camid = self._parse_data_for_eval(d)
input_img = inputs.to(self.device)
outputs = self.model(input_img)
f1 = outputs.data.cpu()
# flip
inputs = inputs.index_select(
3, torch.arange(inputs.size(3) - 1, -1, -1))
input_img = inputs.to(self.device)
outputs = self.model(input_img)
f2 = outputs.data.cpu()
ff = f1 + f2
if ff.dim() == 3:
fnorm = torch.norm(
ff, p=2, dim=1, keepdim=True) # * np.sqrt(ff.shape[2])
ff = ff.div(fnorm.expand_as(ff))
ff = ff.view(ff.size(0), -1)
else:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features, ff), 0)
pids.extend(pid)
camids.extend(camid)
return features, np.asarray(pids), np.asarray(camids)
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.scheduler.last_epoch + 1
return epoch > self.args.epochs
# tools for reid datamanager data_v2
def _parse_data_for_train(self, data):
imgs = data[0]
pids = data[1]
camids = data[2]
# imgs = data[0][0]
# head = data[0][1]
# pids = data[0][2]
return imgs, pids
def _parse_data_for_eval(self, data):
# imgs = data[0][0]
# head = data[0][1]
# pids = data[0][2]
# camids = data[0][3]
imgs = data[0]
pids = data[1]
camids = data[2]
return imgs, pids, camids
def _save_checkpoint(self, epoch, rank1, save_dir, is_best=False):
self.ckpt.save_checkpoint(
{
'state_dict': self.model.state_dict(),
'epoch': epoch,
'rank1': rank1,
'optimizer': self.optimizer.state_dict(),
'log': self.ckpt.log,
# 'scheduler': self.scheduler.state_dict(),
},
save_dir,
is_best=is_best
)
| StarcoderdataPython |
12849305 | # Copyright 2018 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
# This function is modified from https://github.com/Kyubyong/transformer/blob/master/modules.py
# with Apache License V2
def positional_encoding(inputs,
num_units,
scope="positional_encoding"):
"""Positional encoding as described in https://arxiv.org/abs/1706.03762.
Args:
inputs: A 2-d tensor with shape [B, L]. B->Batch size, L->Time steps
num_units: The model's dimension
scope: Variable scope
Returns:
A tensor with shape [B,L,D]. D->Model's dimension
"""
batch_size, time_steps = inputs.get_shape().as_list()
with tf.variable_scope(scope):
position_index = tf.tile(
tf.expand_dims(tf.range(time_steps), 0), [batch_size, 1])
position_encoding = np.array([
[pos / np.power(10000, 2. * i / num_units) for i in range(num_units)]
for pos in range(time_steps)])
position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2]) # dim 2i
position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2]) # dim 2i+1
# Convert to a tensor
lookup_table = tf.convert_to_tensor(position_encoding)
outputs = tf.nn.embedding_lookup(lookup_table, position_index)
return outputs
def layer_norm(inputs, epsilon=1e-8, scope="layer_norm"):
"""Layer normalization.
norm = gamma * (inputs - mean) / sqrt(variance + epsilon)
Args:
inputs: Input tensor, shape is [B,L,D]. B->Batch size, L->Time steps, D->Model's dim
epsilon: A very small float number to avoid zero division error
scope: Variable scope or name
Returns:
The normalized tensor with shape [B,L,D]
"""
with tf.variable_scope(scope):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** .5)
outputs = gamma * normalized + beta
return outputs
def scaled_dot_product_attention(q, k, v, scale=None, mask=None, dropout=0.2):
"""Scaled dot-product attention.
Args:
q: Query tensor, with shape [h*B, L, D/h]. h->num_heads
k: Key tensor, with shape [h*B, L, D/h]
v: Value tensor, with shape [h*B, L, D/h]
scale: A scalar, scale factor, sqrt(D)
mask: Attention mask, with shape [h*B, L, L]
dropout: A scalar, dropout rate
Returns:
An output tensor and a attention tensor
"""
dot = tf.matmul(q, k, transpose_b=True) # [h*B,L,L]
if scale:
dot = dot * scale
if mask:
padding = tf.ones_like(dot) * dot.dtype.min
dot = tf.where(tf.equal(mask, 0), padding, dot)
attention = tf.nn.softmax(dot)
attention = tf.nn.dropout(attention, dropout)
output = tf.matmul(attention, v)
return output, attention
def multihead_attention(queries,
keys,
values,
num_heads=8,
dropout=0.2,
mask=None,
scope="multihead_attention"):
"""Multi-head attention mechanism.
Args:
queries: Query tensor, with shape [h*B, L, D/h]. h->num_heads
keys: Key tensor, with shape [h*B, L, D/h]
values: Value tensor, with shape [h*B, L, D/h]
num_heads: A scalar, number of heads to split
dropout: A scalar, dropout rate.
mask: Making tensor, with shape [B, L, L]
scope: A string, variable scope name.
Returns:
An output tensor and a attention tensor
"""
with tf.variable_scope(scope) as scope:
model_dim = queries.get_shape()[-1]
q = tf.layers.dense(
queries, model_dim, activation=tf.nn.relu) # (B, L_q, D]
k = tf.layers.dense(
keys, model_dim, activation=tf.nn.relu)
v = tf.layers.dense(
values, model_dim, activation=tf.nn.relu)
# split and concat
q = tf.concat(tf.split(q, num_heads, axis=2), 0) # [h*B, L_q, D/h]
k = tf.concat(tf.split(k, num_heads, axis=2), 0)
v = tf.concat(tf.split(v, num_heads, axis=2), 0)
scale = (model_dim // num_heads) ** -0.5
output, attention = scaled_dot_product_attention(
q, k, v, scale, mask, dropout)
output = tf.concat(tf.split(output, num_heads, axis=0), 2)
output = tf.layers.dense(output, model_dim)
output = tf.nn.dropout(output, dropout)
# residual
output += queries
# layer norm
output = layer_norm(output)
return output, attention
def positional_wise_feed_forward_network(inputs,
model_dim=512,
ffn_dim=2048,
dropout=0.2,
scope="ffn"):
"""Positional-wise feed forward network.
Args:
inputs: Input tensor with shape [B,L,D]
model_dim: Model's dimension
ffn_dim: FFN's inner dimension
dropout: A scalar, dropout rate
scope: Variable's scope or name
Returns:
An output tensor with shape [B,L,D]
"""
with tf.variable_scope(scope) as scope:
params = {"inputs": inputs, "filters": model_dim, "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": ffn_dim, "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(outputs, dropout)
# residual and layer norm
outputs += inputs
outputs = layer_norm(outputs)
return outputs
def padding_mask(seq_k, seq_q, num_heads):
"""Padding mask.
Args:
seq_k: Keys tensor with shape [B,L,D]
seq_q: Queries tensor with shape [B,L,D]
num_heads: A scalar, number of heads
Returns:
A masking tensor with shape [B,L,L]
"""
mask = tf.sign(tf.abs(tf.reduce_sum(seq_k, axis=-1))) # [B,L]
mask = tf.tile(mask, [num_heads, 1]) # [h*B,L]
mask = tf.tile(tf.expand_dims(mask, 1), [1, tf.shape(seq_q)[1], 1]) # [B,L,L]
return mask
def sequence_mask(seq, num_heads, dtype=tf.float32):
"""Sequence mask to blind feature time steps.
Args:
seq: Input tensor with shape [B,L,D]
num_heads: A scalar, number of heads
dtype: Data type
Returns:
A maksing tensor with shape [h*B,L,L]
"""
batch_size = tf.shape(seq)[0]
length = tf.shape(seq)[1]
diag = tf.ones(shape=[length, length], dtype=dtype) # [L,L]
tril = tf.linalg.LinearOperatorLowerTriangular(diag).to_dense() # [L,L]
mask = tf.tile(tf.expand_dims(tril, 0), [num_heads * batch_size, 1, 1]) # [h*B,L,L]
return mask
| StarcoderdataPython |
4974485 | """
Created by Sayem on 18 April, 2021
All rights reserved. Copyright © 2020.
"""
from Crypto import Random
from Crypto.Cipher import AES
import base64
from hashlib import md5, sha256
__author__ = "Sayem"
class AESCipher(object):
def __init__(self, key):
self.bs = AES.block_size
self.key = sha256(key.encode()).digest()
@staticmethod
def unpad(data):
return data[:-(data[-1] if type(data[-1]) == int else ord(data[-1]))]
@staticmethod
def bytes_to_key(data, salt, output=48):
assert len(salt) == 8, len(salt)
data += salt
key = md5(data).digest()
final_key = key
while len(final_key) < output:
key = md5(key + data).digest()
final_key += key
return final_key[:output]
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw.encode()))
def decrypt(self, encrypted, passphrase):
encrypted = base64.b64decode(encrypted)
assert encrypted[0:8] == b"Salted__"
salt = encrypted[8:16]
key_iv = self.bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return self.unpad(aes.decrypt(encrypted[16:]))
| StarcoderdataPython |
8172201 | <filename>dashboard/urls.py
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.notifications, name="dashboard"),
#path("class", views.classupdates, name="classupdates")
] | StarcoderdataPython |
4820588 | from fabric.api import env, task
from envassert import detect, package, port, process, service
from hot.utils.test import get_artifacts, http_check
@task
def check():
env.platform_family = detect.detect()
site = "http://localhost/"
string = "example.com"
apache_process = 'apache2'
php_package = 'php5'
mysql_process = 'mysql'
assert port.is_listening(80), 'Port 80 is not listening.'
assert package.installed(php_package), 'PHP is not installed.'
assert process.is_up(apache_process), 'Apache is not running.'
assert process.is_up(mysql_process), 'MySQL is not running.'
assert service.is_enabled(apache_process), 'Apache is disabled at boot.'
assert service.is_enabled(mysql_process), 'MySQL is disabled at boot.'
assert http_check(site, string), 'Drupal is not responding as expected.'
@task
def artifacts():
env.platform_family = detect.detect()
get_artifacts()
| StarcoderdataPython |
3546231 | #/u/GoldenSights
import praw
import time
import traceback
import sqlite3
''' USER CONFIG '''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
# This is a short description of what the bot does.
# For example "/u/GoldenSights' Newsletter bot to notify of new posts"
SUBREDDIT = "GoldTesting"
# This is the sub or list of subs to scan for new posts. For a single sub, use "sub1".
# For multiple subs, use "sub1+sub2+sub3+...". For all use "all"
WIKI_PAGE_PREFIX = ''
# This text will prefix the user's name in their wiki page URL.
# If the prefix has a slash at the end, it will become a "folder" of pages.
# Take a look at the "bios" folder here: https://www.reddit.com/r/goldtesting/wiki/pages
# This is done with a prefix of 'bios/'
MESSAGE_INITIAL_SUBJECT = 'Welcome to /r/_subreddit_, _author_!'
MESSAGE_INITIAL_BODY = '''
Hey _author_,
This is the first time we've seen you post in /r/_subreddit_, welcome!
Your [first submission](_permalink_) has been added to your new bio page
at /r/_subreddit_/wiki/_author_.
'''
MESSAGE_UPDATE_SUBJECT = 'Your /r/_subreddit_ bio has been updated'
MESSAGE_UPDATE_BODY = '''
Hey _author_,
Your [submission](_permalink_) to /r/_subreddit_ has been added
to the bottom of your bio at /r/_subreddit_/wiki/_author_.
'''
MESSAGE_FULL_SUBJECT = 'Your /r/_subreddit_ bio is full!'
MESSAGE_FULL_BODY = '''
Hey _author_,
I attempted to update your bio page at /r/_subreddit_/wiki/_author_,
but found that it was too full for me to add more text!
'''
# The subject and body of the messages you will to send to users.
# If you put _author_ in either one of these texts, it will be automatically
# replaced with their username.
# Feel free to send me a message if you want more injectors
WIKI_PAGE_INITIAL_TEXT = '''
This is the bio page for /u/_author_
'''
# When creating a user's wiki page, put this text at the top.
WIKI_POST_FORMAT = '''
---
**[_title_](_permalink_)**
_text_
'''
# The format used when putting the submission text into user's wiki page
# If it's a linkpost, then _text_ will be the link they submitted.
# This one puts a horizontal line above each post to separate them
# Available injectors are _title_, _permalink_, _text_
WIKI_PERMLEVEL = 1
# Who can edit this page?
# 0 - Use global wiki settings
# 1 - Use a whitelist of names
# 2 - Only mods can read and see this page
MAXPOSTS = 10
# How many submissions / how many comments to get on each run
# PRAW can get up to 100 in a single call
MAX_MAILTRIES = 15
# The maximum number of times to attempt sending mail
# in the event of server outage etc.
WAIT = 30
# How many seconds to wait between runs.
# The bot is completely inactive during this time.
''' All done! '''
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
sql = sqlite3.connect('biowiki.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(name TEXT, submissions TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS userindex on users(name)')
sql.commit()
print('Logging in')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
START_TIME = time.time()
def get_page_content(pagename):
subreddit = r.get_subreddit(SUBREDDIT)
try:
page = subreddit.get_wiki_page(pagename)
page = page.content_md
except praw.errors.NotFound:
page = ''
return page
def send_message(recipient, subject, body):
for x in range(MAX_MAILTRIES):
try:
print('\tSending mail')
return r.send_message(recipient, subject, body)
return
except praw.errors.HTTPException as e:
if isinstance(e, praw.errors.NotFound):
return
if isinstance(e, praw.errors.Forbidden):
return
time.sleep(20)
def update_wikipage(author, submission, newuser=False):
'''
Given a username and Submission object, publish a wiki page
under their name containing the selftext of the post.
If the wikipage already exists just put the text underneath
the current content.
'''
print('\tChecking current page')
pagename = WIKI_PAGE_PREFIX + author
content = get_page_content(pagename)
if content == '':
content = WIKI_PAGE_INITIAL_TEXT.replace('_author_', author)
newtext = WIKI_POST_FORMAT
newtext = newtext.replace('_title_', submission.title)
newtext = newtext.replace('_permalink_', submission.short_link)
if submission.is_self:
newtext = newtext.replace('_text_', submission.selftext)
else:
newtext = newtext.replace('_text_', submission.url)
if newtext not in content:
complete = content + newtext
else:
complete = content
print('\tUpdating page text')
subreddit = r.get_subreddit(SUBREDDIT)
try:
subreddit.edit_wiki_page(pagename, complete)
except praw.errors.PRAWException as e:
if e._raw.status_code in [500, 413]:
print('\tThe bio page for %s is too full!')
return 'full'
else:
raise e
if newuser is True:
print('\tAssigning permission')
page = subreddit.get_wiki_page(pagename)
page.edit_settings(permlevel=WIKI_PERMLEVEL, listed=True)
page.add_editor(author)
return True
def biowikibot():
'''
- watch /new queue
- If a new user is found:
- Create his wiki page
- Add the Submission's text as the page text
- Set permissions for him to edit
- PM him with a link to the page
- If an existing user is found:
- Add permalink to the Submission at the bottom of his wiki page.
- PM him to notify of the update.
'''
print('Checking /r/%s/new' % SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
new = list(subreddit.get_new(limit=MAXPOSTS))
new.sort(key=lambda x: x.created_utc)
for submission in new:
if submission.author is None:
# Post is deleted. Ignore
continue
if submission.created_utc < START_TIME:
# Post made before the bot started. Ignore
continue
author = submission.author.name
cur.execute('SELECT * FROM users WHERE name=?', [author])
fetch = cur.fetchone()
if fetch is None:
print('New user: %s' % author)
posts = submission.fullname
cur.execute('INSERT INTO users VALUES(?, ?)', [author, posts])
result = update_wikipage(author, submission, newuser=True)
subject = MESSAGE_INITIAL_SUBJECT
body = MESSAGE_INITIAL_BODY
else:
posts = fetch[1].split(',')
if submission.fullname in posts:
# Already processed this post. Ignore
continue
print('Returning user: %s' % author)
posts.append(submission.fullname)
posts = ','.join(posts)
cur.execute('UPDATE users SET submissions=? WHERE name=?',
[posts, author])
result = update_wikipage(author, submission, newuser=False)
subject = MESSAGE_UPDATE_SUBJECT
body = MESSAGE_UPDATE_BODY
if result == 'full':
subject = MESSAGE_FULL_SUBJECT
body = MESSAGE_FULL_BODY
subject = subject.replace('_author_', author)
subject = subject.replace('_subreddit_', SUBREDDIT)
body = body.replace('_author_', author)
body = body.replace('_permalink_', submission.short_link)
body = body.replace('_subreddit_', SUBREDDIT)
if result is not None:
send_message(author, subject, body)
sql.commit()
while True:
try:
biowikibot()
except:
traceback.print_exc()
print('Running again in %d seconds' % WAIT)
time.sleep(WAIT) | StarcoderdataPython |
104765 | <reponame>kampelmuehler/synthesizing_human_like_sketches
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from collections import namedtuple
class PSim_Alexnet(nn.Module):
def __init__(self, num_classes=125, train=True, with_classifier=False):
super(PSim_Alexnet, self).__init__()
self.train_mode = train
self.with_classifier = with_classifier
alexnet_model = torchvision.models.alexnet(pretrained=True)
feature_layers = list(alexnet_model.features.children())
self.Conv1 = nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2)
# load weight from first channel of pretrained RGB model - narrow(dim, start, length)
self.Conv1.weight.data.copy_(feature_layers[0].weight.data.narrow(1, 0, 1))
self.Conv1.bias.data.copy_(feature_layers[0].bias.data)
self.Conv2 = feature_layers[3]
self.Conv3 = feature_layers[6]
self.Conv4 = feature_layers[8]
self.Conv5 = feature_layers[10]
# take pretrained from Alexnet, replace last layer
if train is True:
linear = nn.Linear(4096, num_classes)
# init with first num_classes weights from pretrained model
linear.weight.data.copy_(alexnet_model.classifier.state_dict()['6.weight'].narrow(0, 0, num_classes))
linear.bias.data.copy_(alexnet_model.classifier.state_dict()['6.bias'].narrow(0, 0, num_classes))
alexnet_classifier = list(alexnet_model.classifier.children())
self.classifier = nn.Sequential(nn.Dropout(p=0.5),
*alexnet_classifier[1:3],
nn.Dropout(p=0.5),
*alexnet_classifier[4:-1],
linear)
def forward(self, x):
conv1_activation = F.relu(self.Conv1(x))
x = F.max_pool2d(conv1_activation, kernel_size=3, stride=2)
conv2_activation = F.relu(self.Conv2(x))
x = F.max_pool2d(conv2_activation, kernel_size=3, stride=2)
conv3_activation = F.relu(self.Conv3(x))
conv4_activation = F.relu(self.Conv4(conv3_activation))
conv5_activation = F.relu(self.Conv5(conv4_activation))
if self.with_classifier is True:
x = F.max_pool2d(conv5_activation, kernel_size=3, stride=2)
x = x.view(x.size(0), -1)
x = self.classifier(x)
net_outputs = namedtuple("AlexnetActivations", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
return net_outputs(conv1_activation,
conv2_activation,
conv3_activation,
conv4_activation,
conv5_activation), F.log_softmax(x, dim=1)
elif self.train_mode is True:
x = F.max_pool2d(conv5_activation, kernel_size=3, stride=2)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
else:
net_outputs = namedtuple("AlexnetActivations", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
return net_outputs(conv1_activation, conv2_activation, conv3_activation, conv4_activation, conv5_activation)
def load_weights(self, state_dict):
# load only weights that are in the model (eg. if train=False, the classifier weights don't need to be loaded)
model_dict = self.state_dict()
# 1. filter out unnecessary keys
state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(state_dict)
# 3. load the new state dict
self.load_state_dict(state_dict)
| StarcoderdataPython |
8035449 | <filename>serving/video/common/steps.py
import time
from itertools import cycle
import cv2
import numpy as np
from .meters import MovingAverageMeter
from .pipeline import PipelineStep, AsyncPipeline
from .queue import Signal
from .models import AsyncWrapper
def preprocess_frame(frame, input_height, input_width):
height, width, _ = frame.shape
scale = (input_width / width, input_height / height)
in_frame = cv2.resize(frame, (0, 0), fx=scale[0], fy=scale[1],
interpolation=cv2.INTER_CUBIC)
in_frame = (in_frame - 128) / 255.0
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = np.expand_dims(in_frame, axis=0)
return in_frame, scale
def run_pipeline(video, estimator, render_obj):
pipeline = AsyncPipeline()
pipeline.add_step('Data', DataStep(video), parallel=False)
pipeline.add_step('Estimator', EstimatorStep(estimator), parallel=False)
pipeline.add_step('Render', RenderStep(render_obj, fps=30), parallel=False)
pipeline.run()
pipeline.close()
pipeline.print_statistics()
class DataStep(PipelineStep):
def __init__(self, video_list, loop=True):
super().__init__()
self.video_list = video_list
self.cap = None
if loop:
self._video_cycle = cycle(self.video_list)
else:
self._video_cycle = iter(self.video_list)
def setup(self):
self._open_video()
def process(self, item):
if not self.cap.isOpened() and not self._open_video():
return Signal.STOP
status, frame = self.cap.read()
if not status:
return Signal.STOP
return frame
def end(self):
self.cap.release()
def _open_video(self):
next_video = next(self._video_cycle)
try:
next_video = int(next_video)
except ValueError:
pass
self.cap = cv2.VideoCapture(next_video)
if not self.cap.isOpened():
return False
return True
class EstimatorStep(PipelineStep):
def __init__(self, estimator):
super(EstimatorStep, self).__init__()
self.estimator = estimator
self.async_model = AsyncWrapper(self.estimator, self.estimator.num_requests)
def process(self, frame):
# TODO: Extract shape from self.estimator
preprocessed, scale = preprocess_frame(frame, 368, 368)
outputs, frame = self.async_model.infer(preprocessed, frame)
if outputs is None:
return None
return frame, outputs, scale, {'estimation': self.own_time.last}
class RenderStep(PipelineStep):
"""Passes inference result to render function"""
def __init__(self, renderer, fps):
super().__init__()
self.renderer = renderer
self.render = renderer.render_frame
self.meta = renderer.meta
self.fps = fps
self._frames_processed = 0
self._t0 = None
self._render_time = MovingAverageMeter(0.9)
def process(self, item):
if item is None:
return
self._sync_time()
render_start = time.time()
status = self.render(*item, self._frames_processed)
self._render_time.update(time.time() - render_start)
self._frames_processed += 1
if status is not None and status < 0:
return Signal.STOP_IMMEDIATELY
return status
def end(self):
cv2.destroyAllWindows()
if hasattr(self.renderer, 'writer'):
self.renderer.writer.release()
def _sync_time(self):
now = time.time()
if self._t0 is None:
self._t0 = now
expected_time = self._t0 + (self._frames_processed + 1) / self.fps
if self._render_time.avg:
expected_time -= self._render_time.avg
if expected_time > now:
time.sleep(expected_time - now)
| StarcoderdataPython |
6570323 | <reponame>Abraham-Xu/TF2
# Copyright 2019 Inspur Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from numpy import mat
""" 1D Winograd, the filter should be transformed by the winograd filter transformation matrix. 1D winograd math:: A^T[(Gg)*(B^Td)], the filter transformation is 'Gg', where 'g' is the filter and the G is the filter transformation matrix, the '^T' means the transpose of the matrix. The 1D filter transformation implementation includes F(2,3), F(4,3), F(5,3), for the F(a,b), where the first number 'a' called 'trans_size'.
2D Winograd, the filter should be transformed by the winograd filter transformation matrix. 2D winograd math:: A^T[(GgG^T)*(B^TdB)]A, the filter transformation is 'GgG^T', where 'g' is the filter and the G and G^T is the filter transformation matrix and its transpose, respectively.The 2D filter transformation implementation includes F(2x2,3x3), F(4x4,3x3), F(5x5,3x3), for the F(axa,bxb), where the first number 'a' called 'trans_size'. """
def FilterTransform(filter_shape,filter_raw, filter_trans, trans_size, trans_dim):
k = 0
w = 3
""" F(2,3) or F(2x2,3x3) """
if trans_size = 2:
G_list = [1,0,0,0.5,0.5,0.5,0.5,-0.5,0.5,0,0,1]
h = 4
""" F(4,3) or F(4x4,3x3) """
if trans_size = 4:
G_list = [6,0,0,-4,-4,-4,-4,4,-4,1,2,4,1,-2,4,0,0,24]
h = 6
""" F(5,3) or F(5x5,3x3) """
if trans_size = 5:
G_list = [10,0,0,10,10,10,5,-5,5,-5,-10,-20,-1,2,-4,1,3,9,0,0,120]
h = 7
G = np.zeros((h,w))
for i in range(h):
for j in range(w):
G[i][j] = G_list[k]
k = k + 1
G = mat(G)
"""1D transform"""
if trans_dim = 1:
for i in range(filter_shape[0]):
for j in range(filter_shape[1]):
filter_raw[i,j] = mat(filter_raw[i,j])
filter_mid = G*(filter_raw[i,j].T)
filter_trans[i,j] = filter_mid.T
"""2D transform"""
if trans_dim = 2:
for i in range(filter_shape[0]):
for j in range(filter_shape[1]):
filter_raw[i,j] = mat(filter_raw[i,j])
filter_mid = G*(filter_raw[i,j].T)
filter_trans[i,j] = filter_mid*(G.T)
return filter_trans
| StarcoderdataPython |
8064520 | <filename>blog/models.py<gh_stars>1-10
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import QuerySet
from auth.models import CustomUser
class Post(models.Model):
NORMAl = 'N'
HIDDEN = 'H'
DELETED = 'D'
PUBLIC = (
NORMAl,
)
STATUS_CHOICES = (
(NORMAl, 'Normal'),
(HIDDEN, 'Masqué'),
(DELETED, 'Supprimé'),
)
text = models.TextField(max_length=500)
author: CustomUser = models.ForeignKey(CustomUser, on_delete=models.CASCADE,
related_name='posts', verbose_name='Auteur')
created_at = models.DateTimeField("date de création", auto_now_add=True)
updated_at = models.DateTimeField("dernière modification", auto_now=True)
status = models.CharField("état", max_length=1, choices=STATUS_CHOICES, default=NORMAl)
is_anonymous = models.BooleanField("post anonyme", default=True)
likes: QuerySet["Like"]
reports: QuerySet["Report"]
comments: QuerySet["Comment"]
class Meta:
verbose_name = "post"
verbose_name_plural = "posts"
permissions = (
('edit_other_users_posts', 'Can edit other users\' posts'),
('delete_other_users_posts', 'Can delete other users\' posts'),
('hide_unhide_posts', 'Can hide/unhide posts'),
('view_hidden_posts', 'Can view hidden posts'),
('show_author_name_when_anonymous', 'Can show author name when anonymous'),
)
@property
def nb_of_likes(self) -> int:
return self.likes.count()
@property
def nb_of_comments(self) -> int:
return self.comments.count()
@property
def nb_of_reports(self) -> int:
return self.reports.count()
def is_liked_by(self, user: CustomUser) -> bool:
""" Returns True if the user has liked the post. """
return self.likes.filter(user=user).exists()
def is_reported_by(self, user: CustomUser) -> bool:
""" Returns True if the user has reported the post. """
return self.reports.filter(user=user).exists()
@property
def short_text(self) -> str:
""" Returns the first 40 characters of the post's text. """
MAX_LENGTH = 40
if len(self.text) > MAX_LENGTH:
return self.text[:MAX_LENGTH] + '...'
else:
return self.text
def __str__(self) -> str:
return f"{self.author.username} - {self.created_at.strftime('%d/%m/%Y %H:%M')}"
class Comment(models.Model):
post: Post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="comments", verbose_name="post")
author: CustomUser = models.ForeignKey(CustomUser, on_delete=models.CASCADE,
related_name="comments", verbose_name='auteur')
text = models.TextField(max_length=300)
created_at = models.DateTimeField("date de creation", auto_now_add=True)
is_anonymous = models.BooleanField("commentaire anonyme", default=True)
class Meta:
verbose_name = "commentaire"
verbose_name_plural = "commentaires"
permissions = (
('delete_other_users_comments', 'Can delete other users\' comments'),
('edit_other_users_comments', 'Can edit other users\' comments'),
('hide_unhide_comments', 'Can hide/unhide comments'),
('view_hidden_comments', 'Can view hidden comments'),
)
@property
def short_text(self) -> str:
""" Returns the first 40 characters of the post's text. """
MAX_LENGTH = 40
if len(self.text) > MAX_LENGTH:
return self.text[:MAX_LENGTH] + '...'
else:
return self.text
def __str__(self) -> str:
return f"{self.author.username} - {self.created_at.strftime('%d/%m/%Y %H:%M')}"
class PostReport(models.Model):
MAX_REPORT_COUNT = 3
post: Post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="reports", verbose_name="post signalé")
user: CustomUser = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
related_name="reports", verbose_name="auteur du signalement")
created_at = models.DateTimeField("date du signalement", auto_now_add=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=['post', 'user'], name='unique_report_post_user'),
]
verbose_name = "signalement"
verbose_name_plural = "signalements"
permissions = (
('reset_reports', 'Can reset reports'),
('view_reports', 'Can view reports'),
)
def validate_unique(self, exclude: iter = None) -> None:
""" Check you can't report the same post twice """
if PostReport.objects.filter(post=self.post, user=self.user).exists():
raise ValidationError("Vous avez déjà signalé ce post")
super().validate_unique(exclude)
def clean(self) -> None:
""" Check you can't report your own post """
if self.post.author == self.user:
raise ValidationError("Vous ne pouvez pas signaler votre propre post")
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
""" Override save method to prevent more than 3 reports per post """
super().save(force_insert, force_update, using, update_fields)
reports = PostReport.objects.filter(post=self.post)
if reports >= self.MAX_REPORT_COUNT:
self.post.status = self.post.HIDDEN
self.post.save()
def __repr__(self) -> str:
return f'{self.post} - {self.user.username}'
class Like(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="likes", verbose_name="post aimé")
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
related_name="likes", verbose_name="utilisateur aimant")
created_at = models.DateTimeField("date", auto_now_add=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=['post', 'user'], name='unique_like_post_user')
]
verbose_name = "like"
verbose_name_plural = "likes"
permissions = (
('reset_likes', 'Can reset likes'),
('view_likes', 'Can view likes'),
)
def validate_unique(self, exclude: iter = None) -> None:
""" Check you can't like the same post twice """
if Like.objects.filter(post=self.post, user=self.user).exists():
raise ValidationError("Vous avez déjà liké ce post")
super().validate_unique(exclude)
def __str__(self) -> str:
return f'{self.post} - {self.user.username}'
| StarcoderdataPython |
9753590 | <filename>Chapter13.whileandforLoops/continue.py
#!/usr/bin/env python3
#encoding=utf-8
#--------------------------------
# Usage: python3 continue.py
# Description: example for continue
#--------------------------------
x = 10
print('even result: ', end='')
while x:
x -= 1 # x = x - 1
if x % 2 != 0:
continue
print(x, end=' ')
else:
print()
# similar implementation with the followed code
y = 10
print('even result: ', end='')
while y:
y -= 1
if y % 2 == 0:
print(y, end=' ')
else:
print()
| StarcoderdataPython |
11200861 | import hashlib
file = open("test.txt", "rb")
data = file.read()
data_2 = hashlib.md5(data).hexdigest()
print(data_2[1:33])
print(len(data_2))
| StarcoderdataPython |
9795197 | # Standard library imports
from scrapy import Spider
from scrapy.loader import ItemLoader
# Local application imports
from quotes.items import QuotesItem
quote_item = QuotesItem()
class QuotesSpider(Spider):
""" Web Scraping Spider for Goodreads website. """
# Class attributes
name = "quotes"
start_urls = ["https://www.goodreads.com/quotes?page=1"]
def parse(self, response):
""" Parsing function for the spider's requests. """
# Remove the line breaks on the html
response = response.replace(body=response.body.replace(b"<br>", b""))
for quote in response.css(".quoteDetails"):
# Create an item loader with the quote data and add it as a new quote_item
self.logger.info("Creating quote_item")
loader = ItemLoader(item=QuotesItem(), selector=quote)
loader.add_css("quote_content", ".quoteText::text")
loader.add_css("author_name", ".quoteText .authorOrTitle::text")
loader.add_css("author_image", ".leftAlignedImage img::attr(src)")
loader.add_css("tags", ".greyText.smallText.left a::text")
quote_item = loader.load_item()
yield quote_item
# Scrape the next page
next_page = response.css("a.next_page::attr(href)").get()
if next_page is not None:
yield response.follow(next_page, self.parse)
| StarcoderdataPython |
11206452 | <gh_stars>0
#!/usr/bin/python2.6
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
import operator
import os
import sys
import time
import traceback
import getopt
import getpass
import socket
import struct
from libsoat import *
from soat_config import *
sys.path.append("../../")
try:
import TorCtl.TorUtil
except ImportError:
from os import getcwd, path
print "TorCtl not found in %s. Did you run torflow.git/add_torctl.sh?" % path.abspath(getcwd()+'../..')
print "Exiting..."
exit()
TorCtl.TorUtil.loglevel="INFO"
if TorCtl.TorUtil.loglevels[TorCtl.TorUtil.loglevel] > TorCtl.TorUtil.loglevels["INFO"]:
# Kill stderr (jsdiffer and exception noise) if our loglevel is above INFO
sys.stderr = file("/dev/null", "w")
def usage(argv=None):
print "Usage: "+argv[0]+" with 0 or more of the following filters: "
print " --dir <datadir>"
print " --file <.result file>"
print " --exit <idhex>"
print " --after <timestamp as string (eg. \"Thu Jan 1 00:00:00 1970\")>"
print " --before <timestamp as string (eg. \"Mon Jan 19 03:14:07 2038\")>"
print " --finishedafter <timestamp as string>"
print " --finishedbefore <timestamp as string>"
print " --reason <soat failure reason> # may be repeated"
print " --noreason <soat failure reason> # may be repeated"
print " --proto <protocol>"
print " --resultfilter <TestResult class name>"
print " --statuscode <'Failure' or 'Inconclusive'>"
print " --siterate <integer n; print result if <n% of exits failed that site>"
print " --exitrate <integer n; print result if the exit failed >n% of sites>"
print " --sortby <'proto' or 'url' or 'exit' or 'reason'>"
print " --falsepositives"
print " --verbose"
sys.exit(1)
class SIConf(object):
def __init__(self, argv=None):
# FIXME: make all these repeatable
self.use_dir="./data/"
self.use_file=None
self.node=None
self.reasons=[]
self.noreasons=[]
self.statuscode=2
self.verbose=1
self.proto=None
self.resultfilter=None
self.before = 0xffffffff
self.after = 0
self.finishedbefore = 0xffffffff
self.finishedafter = 0
self.finished = False
self.sortby="proto"
self.siterate = 100
self.exitrate = 0
self.falsepositives=False
self.send_email = False
self.confirmed = False
self.cron_interval = 0
if argv:
self.getargs(argv)
def getargs(self, argv):
try:
opts,args = getopt.getopt(argv[1:],"d:f:x:r:n:a:b:t:p:o:s:h:Fmcv",
["dir=", "file=", "exit=", "reason=", "resultfilter=", "proto=",
"verbose", "statuscode=", "siterate=", "exitrate=", "sortby=",
"noreason=", "after=", "before=", "finishedafter=",
"finishedbefore=", "croninterval=", "falsepositives",
"email", "confirmed","help"])
except getopt.GetoptError,err:
print str(err)
usage(argv)
for o,a in opts:
if o == '-h' or o == '--help':
usage(argv)
elif o == '-d' or o == '--dir':
self.use_dir = a
elif o == '-f' or o == '--file':
self.use_file = a
elif o == '-x' or o == '--exit':
self.node = a
elif o == '-r' or o == '--reason':
self.reasons.append(a)
elif o == '-n' or o == '--noreason':
self.noreasons.append(a)
elif o == '-a' or o == '--after':
self.after = time.mktime(time.strptime(a))
elif o == '-b' or o == '--before':
self.before = time.mktime(time.strptime(a))
elif o == '--finishedbefore':
self.finishedbefore = time.mktime(time.strptime(a))
self.finished = True
elif o == '--finishedafter':
self.finishedafter = time.mktime(time.strptime(a))
self.finished = True
elif o == '--croninterval':
self.cron_interval = int(a)*3600
elif o == '-t' or o == '--resultfilter':
self.resultfilter = a
elif o == '-p' or o == '--proto':
self.proto = a
elif o == '--siterate':
self.siterate = int(a)
elif o == '--exitrate':
self.exitrate = int(a)
elif o == '-F' or o == '--falsepositives':
self.falsepositives = True
elif o == '-m' or o == '--email':
self.send_email = True
elif o == '-c' or o == '--confirmed':
self.confirmed = True
elif o == '-v' or o == '--verbose':
self.verbose += 1
elif o == '-o' or o == '--sortby':
if a not in ["proto", "site", "exit", "reason"]:
usage(argv)
else:
self.sortby = a
elif o == '-s' or o == '--statuscode':
try:
self.statuscode = int(a)
except ValueError:
self.statuscode = RESULT_CODES[a]
def send_mail(fro, to, subject, text, files=[]):
assert type(to)==list
assert type(files)==list
msg = MIMEMultipart()
msg['From'] = fro
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"'
% os.path.basename(f))
msg.attach(part)
if mail_auth and not (mail_tls or mail_starttls):
print "You've requested authentication but have not set"
print "mail_tls or mail_starttls to True. As a friend,"
print "I just can't let you do that to yourself."
return
try:
if mail_tls:
if sys.version_info >= (2, 6):
smtp = smtplib.SMTP_SSL(host=mail_server)
else:
print "mail_tls requires Python >= 2.6"
else:
smtp = smtplib.SMTP(host=mail_server)
if mail_starttls:
smtp.starttls()
if mail_auth:
passwd = <PASSWORD>_password or <PASSWORD>()
smtp.login(mail_user, passwd)
smtp.sendmail(fro, to, msg.as_string() )
smtp.close()
except smtplib.SMTPException, e:
print e
def main(argv):
now = time.time()
conf=SIConf(argv)
dh = DataHandler(conf.use_dir)
if conf.use_file:
results = [dh.getResult(conf.use_file)]
elif conf.node:
results = dh.filterByNode(dh.getAll(), conf.node)
else:
results = dh.getAll()
if conf.sortby == "url":
results.sort(lambda x, y: cmp(x.site, y.site))
elif conf.sortby == "reason":
results.sort(lambda x, y: cmp(x.reason, y.reason))
elif conf.sortby == "exit":
results.sort(lambda x, y: cmp(x.exit_node, y.exit_node))
by_reason = {}
for r in results:
r.verbose = conf.verbose
if r.reason in conf.noreasons: continue
if conf.reasons and r.reason not in conf.reasons: continue
if r.timestamp < conf.after or conf.before < r.timestamp: continue
if conf.finished:
if not r.finish_timestamp: continue
if r.finish_timestamp < conf.finishedafter: continue
if conf.finishedbefore < r.finish_timestamp: continue
if (conf.falsepositives) ^ r.false_positive: continue
# XXX: r.confirmed is not used. The variable to use here is r.from_rescan.
# We should fix this in libsoat.py and soat.py someday.
if conf.confirmed != r.from_rescan: continue
if conf.confirmed:
if conf.cron_interval and r.finish_timestamp < now-conf.cron_interval-60:
continue
else:
if conf.cron_interval and r.timestamp < now-conf.cron_interval-60:
continue
# Don't display sites that either do not have enough tests
# or have resulted in too many positive results.
if r.site_result_rate[1] != 0 and \
conf.siterate < (100.0*r.site_result_rate[0])/r.site_result_rate[1]:
continue
if r.exit_result_rate[1] != 0 and \
conf.exitrate > (100.0*r.exit_result_rate[0])/r.exit_result_rate[1]:
continue
if (not conf.statuscode or r.status == conf.statuscode) and \
(not conf.proto or r.proto == conf.proto) and \
(not conf.resultfilter or r.__class__.__name__ == conf.resultfilter):
if conf.send_email:
by_reason.setdefault(r.reason, []).append(r)
continue
try:
print r
except KeyboardInterrupt:
raise KeyboardInterrupt
except (Exception, IOError), e:
traceback.print_exc()
sys.stderr.write("\n-----------------------------\n")
else:
print "\n-----------------------------\n"
if conf.send_email:
for rsn in by_reason.iterkeys():
for r in by_reason[rsn]:
print "Mailing result..."
subject = rsn+" on "+socket.inet_ntoa(struct.pack(">I",r.exit_ip))+" ("+r.exit_name+")"
text = ""
try:
text += str(r)
text += "\n# torrc:\n"
text += "authdirbadexit "+socket.inet_ntoa(struct.pack(">I",r.exit_ip))+" # "+r.exit_name+"\n"
text += "\n# approved-routers\n"
text += "!badexit "+r.exit_node+" # "+r.exit_name+"\n"
text += "\n------------------------------------------\n"
except Exception, e:
text += traceback.format_exc()
# TODO: Attach files? Or is that too much.. Maybe serve
# them via http and include links?
if isinstance(r, HttpTestResult) or \
isinstance(r, JsTestResult) or \
isinstance(r, HtmlTestResult):
attach = []
if r.content:
attach.append(r.content)
if r.content_old:
attach.append(r.content_old)
if r.content_exit:
attach.append(r.content_exit)
send_mail(mail_from_email, mail_to_email, subject, text,
attach)
else:
send_mail(mail_from_email, mail_to_email, subject, text)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
1650163 | <filename>tests/test_utils/test_helpers.py
import unittest
class TestHelpers(unittest.TestCase):
def test_slugify(self):
from plenario.utils.helpers import slugify
self.assertEqual(slugify("A-Awef-Basdf-123"), "a_awef_basdf_123")
| StarcoderdataPython |
3407387 | import torch
import numpy as np
# import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as functional
from torchvision import transforms, datasets
a = np.array([[0,1], [1,2]])
scalar1 = torch.tensor([1.])
print(scalar1)
scalar2 = torch.tensor([3.])
print(scalar2)
add_scalar = scalar1+scalar2
print(add_scalar) | StarcoderdataPython |
4857977 | <gh_stars>0
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from .. import settings
_base = declarative_base()
_engines = dict()
_sessions = dict()
def get_connection_string(engine, user, password, host, port, name, **options):
conn = None
if engine == 'postgresql':
conn = f"postgresql://{user}:{password}@{host}:{port}/{name}"
if options:
conn += '?'
for key, value in options.items():
conn += f'{key}={value}'
if not conn:
raise NotImplementedError('Engine type not supported.')
return conn
for db, settings in settings.DATABASES.items():
extra_config = dict()
connection_string = get_connection_string(
engine=settings['ENGINE'],
host=settings['HOST'],
name=settings['NAME'],
port=settings['PORT'],
user=settings['USER'],
password=settings['PASSWORD'],
**settings['OPTIONS']
)
engine = create_engine(connection_string, **extra_config)
session = scoped_session(sessionmaker())
session.configure(bind=engine)
_engines[db] = engine
_sessions[db] = session
| StarcoderdataPython |
1911145 | #!/usr/bin/python3
# 第一个注释
print("hello,word") # 第二个注释
'''
多行注释
多行注释
'''
print('hello,python')
'''
多行语句
'''
sum = 1 + 2 \
+ 3
print(sum)
'''
数字(Number)类型
python中数字有四种类型:整数、布尔型、浮点数和复数。
int (整数), 如 1, 只有一种整数类型 int,表示为长整型,没有 python2 中的 Long。
bool (布尔), 如 True。
float (浮点数), 如 1.23、3E-2
complex (复数), 如 1 + 2j、 1.1 + 2.2j
'''
print(type(1000))
print(type(True))
print(type(1.23))
print(type(1 + 2j))
'''
字符串(String)
python中单引号和双引号使用完全相同。
使用三引号(\'''或""")可以指定一个多行字符串。
转义符 '\'
反斜杠可以用来转义,使用r可以让反斜杠不发生转义。。 如 r"this is a line with \n" 则\n会显示,并不是换行。
按字面意义级联字符串,如"this " "is " "string"会被自动转换为this is string。
字符串可以用 + 运算符连接在一起,用 * 运算符重复。
Python 中的字符串有两种索引方式,从左往右以 0 开始,从右往左以 -1 开始。
Python中的字符串不能改变。
Python 没有单独的字符类型,一个字符就是长度为 1 的字符串。
字符串的截取的语法格式如下:变量[头下标:尾下标:步长]
'''
str = '123456'
print(str[1:3])
'''
等待用户输入
'''
print(input("\n\n按下 enter 键后退出。\n"))
'''
Print 输出
print 默认输出是换行的,如果要实现不换行需要在变量末尾加上 end=""
'''
print('张三', end="")
print('李四', end="")
'''
import 与 from...import
在 python 用 import 或者 from...import 来导入相应的模块。
将整个模块(somemodule)导入,格式为: import somemodule
从某个模块中导入某个函数,格式为: from somemodule import somefunction
从某个模块中导入多个函数,格式为: from somemodule import firstfunc, secondfunc, thirdfunc
将某个模块中的全部函数导入,格式为: from somemodule import *
'''
import sys
print('================Python import mode==========================')
print('命令行参数为:')
for i in sys.argv:
print(i)
print('\n python 路径为', sys.path)
| StarcoderdataPython |
8182524 | <filename>openkongqi/exceptions.py<gh_stars>0
# -*- coding: utf-8 -*-
class OpenKongqiError(Exception):
pass
class ConfigError(OpenKongqiError):
pass
class CacheError(OpenKongqiError):
pass
class SourceError(OpenKongqiError):
pass
class FeedError(OpenKongqiError):
pass
class UUIDNotFoundError(OpenKongqiError):
pass
class APIKeyNotFoundError(OpenKongqiError):
pass
| StarcoderdataPython |
3401880 | <filename>SourceCode/Module5/global_variables1.py
"""
Demonstrates a global variable
"""
#Creates a global variable.
my_value = 10
#The showvalue function prints
#the value of the global variable.
def showvalue():
print(my_value)
# Call the showvalue function.
showvalue()
| StarcoderdataPython |
3226922 | from indra.tools import assemble_corpus as ac
filter_functions = {}
def register_filter(function):
"""
Decorator to register a function as a filter for tests.
A function should take an agent as an argument and return True if the agent
is allowed to be in a path and False otherwise.
"""
filter_functions[function.__name__] = function
return function
@register_filter
def filter_chem_mesh_go(agent):
"""Filter ungrounded agents and agents grounded to MESH, CHEBI, GO unless
also grounded to HMDB.
"""
gr = agent.get_grounding()
return gr[0] not in {'MESH', 'CHEBI', 'GO', None}
| StarcoderdataPython |
3539936 | <reponame>shouvikch97/Hacktoberfest-2k20
import numpy as np
class Function:
def __init__(self, before=None):
self.before = before
if before is None:
self.before_forward = lambda x: x
self.before_diff = lambda _: 1
else:
self.before_forward = before.forward
self.before_diff = before.diff
def _forward(self, x):
pass
def _diff(self, x):
pass
def _backward(self, diff):
pass
def _str(self):
pass
def forward(self, x):
return self._forward(self.before_forward(x))
def diff(self, x):
return self._diff(self.before_forward(x)) * self.before_diff(x)
def backward(self, x, diff):
diff = diff * self._diff(self.before_forward(x))
self._backward(diff)
if self.before is not None:
self.before.backward(x, diff)
def __str__(self):
ret = ''
if self.before is not None:
ret += str(self.before) + '\n ↓\n'
else:
ret += '[Input]\n ↓\n'
ret += self._str()
return ret
class Sigmoid(Function):
def _forward(self, x):
return 1 / (1 + np.exp(-x))
def _diff(self, x):
return np.exp(x) / (1 + np.exp(x)) ** 2
def _backward(self, diff):
pass
def _str(self):
return '[Sigmoid Function]'
class Neuron(Function):
def __init__(self, size, before=None):
super().__init__(before)
self.size = size
self.param = np.random.random_sample(size)
def _forward(self, x):
return np.dot(x, self.param)
def _diff(self, x):
return x
def _backward(self, diff):
self.param += diff
def _str(self):
return '[Neuron] = ' + str(self.param)
if __name__ == '__main__':
data_size = 2
train_input = np.array([[1, 2, 3], [2, 3, 4]])
train_output = np.array([0.1, 0.9])
learning_rate = 0.6
total_step = 100000
network = Sigmoid(Neuron(3))
tot_loss = 0
for i in range(total_step):
tot_loss = 0
for j in range(data_size):
output = network.forward(train_input[j])
loss = train_output[j] - output
tot_loss += loss
network.backward(train_input[j], loss * learning_rate)
if i % 100 == 0:
print('total loss on iter {}: {}'.format(i, tot_loss))
print('Result Network')
print(network)
print('loss = {}'.format(tot_loss))
| StarcoderdataPython |
329415 | <gh_stars>1-10
import os
import datetime
import logging
import re
import requests
import json
from requests.auth import HTTPBasicAuth
from dateutil.parser import parse as parse_datetime
from typing import List
import azure.functions as func
from .sentinel_connector import AzureSentinelConnector
from .state_manager import StateManager
logging.getLogger('azure.core.pipeline.policies.http_logging_policy').setLevel(logging.ERROR)
CISCO_SE_API_API_HOST = os.environ['CISCO_SE_API_API_HOST']
CISCO_SE_API_CLIENT_ID = os.environ['CISCO_SE_API_CLIENT_ID']
CISCO_SE_API_KEY = os.environ['CISCO_SE_API_KEY']
WORKSPACE_ID = os.environ['WORKSPACE_ID']
SHARED_KEY = os.environ['SHARED_KEY']
FILE_SHARE_CONN_STRING = os.environ['AzureWebJobsStorage']
LOG_TYPE = 'CiscoSecureEndpoint'
LOG_ANALYTICS_URI = os.environ.get('logAnalyticsUri')
if not LOG_ANALYTICS_URI or str(LOG_ANALYTICS_URI).isspace():
LOG_ANALYTICS_URI = 'https://' + WORKSPACE_ID + '.ods.opinsights.azure.com'
pattern = r'https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$'
match = re.match(pattern, str(LOG_ANALYTICS_URI))
if not match:
raise Exception("Invalid Log Analytics Uri.")
def main(mytimer: func.TimerRequest):
logging.info('Script started.')
sentinel = AzureSentinelConnector(LOG_ANALYTICS_URI, WORKSPACE_ID, SHARED_KEY, LOG_TYPE, queue_size=1000)
audit_logs_state_manager = StateManager(FILE_SHARE_CONN_STRING, file_path='cisco_se_auditlogs_last_ts')
events_state_manager = StateManager(FILE_SHARE_CONN_STRING, file_path='cisco_se_events_last_ts')
cli = CiscoAMPClient(CISCO_SE_API_API_HOST, CISCO_SE_API_CLIENT_ID, CISCO_SE_API_KEY)
audit_logs_last_ts = audit_logs_state_manager.get()
audit_logs_last_ts = parse_date_from(audit_logs_last_ts)
logging.info(f'Getting audit logs from {audit_logs_last_ts}')
for events in cli.get_audit_logs(audit_logs_last_ts):
for event in events:
sentinel.send(event)
sentinel.flush()
audit_logs_last_ts = get_last_event_ts(events=events, last_ts=audit_logs_last_ts, field_name='created_at')
if isinstance(audit_logs_last_ts, datetime.datetime):
audit_logs_state_manager.post(audit_logs_last_ts.isoformat())
events_last_ts = events_state_manager.get()
events_last_ts = parse_date_from(events_last_ts)
logging.info(f'Getting events from {events_last_ts}')
for events in cli.get_events(events_last_ts):
for event in events:
sentinel.send(event)
sentinel.flush()
events_last_ts = get_last_event_ts(events=events, last_ts=events_last_ts, field_name='date')
if isinstance(events_last_ts, datetime.datetime):
events_state_manager.post(events_last_ts.isoformat())
logging.info(f'Script finished. Total sent records: {sentinel.successfull_sent_events_number}')
class CiscoAMPClient:
def __init__(self, host, client_id, api_key):
host = host.lower()
self._validate_host(host)
self.host = host
self._client_id = client_id
self._api_key = api_key
def _validate_host(self, host):
hosts_list = [
'api.amp.cisco.com',
'api.apjc.amp.cisco.com',
'api.eu.amp.cisco.com'
]
if host not in hosts_list:
raise ValueError(f'Host {host} is not correct. Use one of {hosts_list}.')
def get_audit_logs(self, start_time: datetime.datetime):
url = f'https://{self.host}/v1/audit_logs'
params = {
'limit': 500
}
if isinstance(start_time, datetime.datetime):
start_time = start_time + datetime.timedelta(microseconds=1)
start_time = start_time.isoformat()
params['start_time'] = start_time
res = requests.get(url, params=params, auth=HTTPBasicAuth(self._client_id, self._api_key), timeout=30)
if not res.ok:
raise Exception(f'Error while calling Cisco API. Response code: {res.status_code}')
jsonData = json.loads(res.text)
yield jsonData['data']
next_link = jsonData['metadata']['links'].get('next')
while next_link:
res = requests.get(next_link, auth=HTTPBasicAuth(self._client_id, self._api_key), timeout=30)
if not res.ok:
raise Exception(f'Error while calling Cisco API. Response code: {res.status_code}')
jsonData = json.loads(res.text)
yield jsonData['data']
next_link = jsonData['metadata']['links'].get('next')
def get_events(self, start_time: datetime.datetime):
url = f'https://{self.host}/v1/events'
params = {
'limit': 500
}
if isinstance(start_time, datetime.datetime):
start_time = start_time + datetime.timedelta(microseconds=1)
start_time = start_time.isoformat()
params['start_date'] = start_time
res = requests.get(url, params=params, auth=HTTPBasicAuth(self._client_id, self._api_key), timeout=30)
if not res.ok:
raise Exception(f'Error while calling Cisco API. Response code: {res.status_code}')
jsonData = json.loads(res.text)
yield jsonData['data']
next_link = jsonData['metadata']['links'].get('next')
while next_link:
res = requests.get(next_link, auth=HTTPBasicAuth(self._client_id, self._api_key), timeout=30)
if not res.ok:
raise Exception(f'Error while calling Cisco API. Response code: {res.status_code}')
jsonData = json.loads(res.text)
yield jsonData['data']
next_link = jsonData['metadata']['links'].get('next')
def parse_date_from(date_from: str) -> datetime.datetime:
try:
date_from = parse_datetime(date_from)
except:
pass
if not isinstance(date_from, datetime.datetime):
date_from = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) - datetime.timedelta(days=1)
return date_from
def get_last_event_ts(events: List[dict], last_ts: datetime.datetime, field_name: str) -> datetime.datetime:
for event in events:
event_ts = event.get(field_name)
try:
event_ts = parse_datetime(event_ts)
except:
pass
if isinstance(event_ts, datetime.datetime):
if isinstance(last_ts, datetime.datetime) and event_ts > last_ts:
last_ts = event_ts
return last_ts
| StarcoderdataPython |
6571926 | <filename>web/JPS_EMISSIONS/python/latest/post_process_ccs.py
# ### 1.5 function that post process the ccs results
import numpy as np
import pandas as pd
# define a function that could calculate the overall annual emission and lock emission
def bau_ccs_post (df):
coal_annual_existing_emission = df.loc[:,('coal_power_annual_emission_existing')].values
coal_annual_new_emission = df.loc[:,('coal_power_annual_emission_new')].values
gas_annual_existing_emission = df.loc[:,('gas_power_annual_emission_existing')].values
gas_annual_new_emission = df.loc[:,('gas_power_annual_emission_new')].values
oil_annual_existing_emission = df.loc[:,('oil_power_annual_emission_existing')].values
oil_annual_new_emission = df.loc[:,('oil_power_annual_emission_new')].values
coal_lock_existing_emission = df.loc[:,('coal_power_lock_emission_existing')].values
coal_lock_new_emission = df.loc[:,('coal_power_lock_emission_new')].values
gas_lock_existing_emission = df.loc[:,('gas_power_lock_emission_existing')].values
gas_lock_new_emission = df.loc[:,('gas_power_lock_emission_new')].values
oil_lock_existing_emission = df.loc[:,('oil_power_lock_emission_existing')].values
oil_lock_new_emission = df.loc[:,('oil_power_lock_emission_new')].values
coal_overall_lock_emission = np.zeros(shape=(36))
gas_overall_lock_emission = np.zeros(shape=(36))
oil_overall_lock_emission = np.zeros(shape=(36))
coal_overall_annual_emission = np.zeros(shape=(36))
gas_overall_annual_emission = np.zeros(shape=(36))
oil_overall_annual_emission = np.zeros(shape=(36))
for i in range(36):
coal_annual_exisitng = coal_annual_existing_emission[i]
gas_annual_exisitng = gas_annual_existing_emission[i]
oil_annual_exisitng = oil_annual_existing_emission[i]
coal_annual_added = 0
gas_annual_added = 0
oil_annual_added = 0
for j in range(i+1):
coal_annual_new = coal_annual_new_emission[j]
coal_annual_added = coal_annual_added + coal_annual_new
gas_annual_new = gas_annual_new_emission[j]
gas_annual_added = gas_annual_added + gas_annual_new
oil_annual_new = oil_annual_new_emission[j]
oil_annual_added = oil_annual_added + oil_annual_new
coal_overall_annual_emission[i] = coal_annual_exisitng + coal_annual_added
df.loc[:,('coal_annual_emission')] = coal_overall_annual_emission
gas_overall_annual_emission[i] = gas_annual_exisitng + gas_annual_added
df.loc[:,('gas_annual_emission')] = gas_overall_annual_emission
oil_overall_annual_emission[i] = oil_annual_exisitng + oil_annual_added
df.loc[:,('oil_annual_emission')] = oil_overall_annual_emission
for i in range(36):
coal_lock_exisitng = coal_lock_existing_emission[i]
gas_lock_exisitng = gas_lock_existing_emission[i]
oil_lock_exisitng = oil_lock_existing_emission[i]
coal_lock_added = 0
gas_lock_added = 0
oil_lock_added = 0
for j in range(i+1):
coal_lock_new = coal_lock_new_emission[j]* (1-0.025*(i-j))
coal_lock_added = coal_lock_added + coal_lock_new
gas_lock_new = gas_lock_new_emission[j]* (1-0.025*(i-j))
gas_lock_added = gas_lock_added + gas_lock_new
oil_lock_new = oil_lock_new_emission[j]* (1-0.025*(i-j))
oil_lock_added = oil_lock_added + oil_lock_new
coal_overall_lock_emission[i] = coal_lock_exisitng + coal_lock_added
df.loc[:,('coal_lock_emission')] = coal_overall_lock_emission
gas_overall_lock_emission[i] = gas_lock_exisitng + gas_lock_added
df.loc[:,('gas_lock_emission')] = gas_overall_lock_emission
oil_overall_lock_emission[i] = oil_lock_exisitng + oil_lock_added
df.loc[:,('oil_lock_emission')] = oil_overall_lock_emission
return df
# define a function that could select the useful columns from the table
def ccs_results (ccs):
ccs_cols = ['year',
'coal_power_capacity_GW','coal_power_capacity_existing','coal_power_capacity_new',
'coal_annual_emission','coal_power_annual_emission_existing','coal_power_annual_emission_new',
'coal_lock_emission','coal_power_lock_emission_existing','coal_power_lock_emission_new',
'gas_power_capacity_GW','gas_power_capacity_existing','gas_power_capacity_new',
'gas_annual_emission','gas_power_annual_emission_existing','gas_power_annual_emission_new',
'gas_lock_emission','gas_power_lock_emission_existing','gas_power_lock_emission_new',
'oil_power_capacity_GW','oil_power_capacity_existing','oil_power_capacity_new',
'oil_annual_emission','oil_power_annual_emission_existing','oil_power_annual_emission_new',
'oil_lock_emission','oil_power_lock_emission_existing','oil_power_lock_emission_new',
'coal_power_capacity_new1','coal_power_annual_emission_new1','coal_power_annual_emission_new1',
'coal_power_capacity_new2','coal_power_annual_emission_new2','coal_power_annual_emission_new2',
'gas_power_capacity_new1','gas_power_annual_emission_new1','gas_power_annual_emission_new1',
'gas_power_capacity_new2','gas_power_annual_emission_new2','gas_power_annual_emission_new2',
'oil_power_capacity_new1','oil_power_annual_emission_new1','oil_power_annual_emission_new1',
'oil_power_capacity_new2','oil_power_annual_emission_new2','oil_power_annual_emission_new2']
ccs = ccs[ccs_cols]
return ccs | StarcoderdataPython |
6413873 | <reponame>adh/appshell
from appshell.skins import Skin
from appshell.assets import assets, appshell_components_css
from flask_assets import Bundle
from appshell import current_appshell
from subprocess import check_output
from markupsafe import Markup
adminlte_js = Bundle('appshell/adminlte/plugins/jquery.slimscroll.min.js',
'appshell/adminlte/dist/js/app.min.js',
output="appshell/adminlte.js")
assets.register("appshell_adminlte_js", adminlte_js)
adminlte_css = Bundle('appshell/adminlte/build/less/AdminLTE.less',
filters='less', output='appshell/adminlte.css')
assets.register("appshell_adminlte_css", adminlte_css)
adminlte_full_css = Bundle(adminlte_css, appshell_components_css,
'appshell/adminlte/appshell-fixups.css',
output='appshell/adminlte-full.css')
assets.register("appshell_adminlte_full_css", adminlte_full_css)
def get_version():
return check_output("git describe --always --tags",
shell=True).decode("utf-8")
class BaseAdminLTESkin(Skin):
height_decrement = 290
footer_right = None
footer_left = Markup(" ")
def __init__(self, colorscheme='blue',
skin_filename=None,
skin_class=None,
footer=True,
footer_right=None,
footer_left=None,
favicon=None,
get_version=get_version,
top_nav_add=[]):
self.top_nav_add = top_nav_add
self.favicon = favicon
if skin_filename is not None:
self.skin_less_file = skin_filename
self.skin_css_class = skin_class
else:
self.colorscheme = colorscheme
self.skin_less_file = "appshell/adminlte/build/less/skins/skin-{}.less"\
.format(self.colorscheme)
self.skin_css_class = "skin-{}".format(self.colorscheme)
self.want_footer = footer
if not footer:
self.height_decrement -= 45
else:
if footer_right is None:
self.footer_right = "v. " + get_version()
else:
self.footer_right = footer_right
if footer_left is not None:
self.footer_left = footer_left
@property
def footer_data(self):
pass
def get_extra_head(self):
if self.favicon:
return (Markup('<link rel="shortcut icon" href="{}" />')
.format(self.favicon))
return ''
def get_base_template(self, module):
return "appshell/adminlte/base.html"
def get_small_logo(self):
return current_appshell.app_name
def get_large_logo(self):
return current_appshell.app_name
class AdminLTESkin(BaseAdminLTESkin):
def initialize(self, appshell):
super(AdminLTESkin, self).initialize(appshell)
appshell.default_menu_position = 'primary'
self.menu_positions = ['left', 'right', 'primary']
self.want_sidebar = True
def body_classes(self, page_layout=None):
return "fixed"
def build_sidebar_menus(self):
res = []
res.append((current_appshell.menu['primary'].build_real_menu(),
''))
return res
class NavbarAdminLTESkin(BaseAdminLTESkin):
height_decrement = 300
def initialize(self, appshell, boxed=True):
super(NavbarAdminLTESkin, self).initialize(appshell)
self.want_sidebar = False
self.boxed = boxed
def body_classes(self, page_layout=None):
if page_layout == 'fluid':
return "layout-top-nav"
if self.boxed:
return "layout-top-nav layout-boxed"
else:
return "layout-top-nav"
| StarcoderdataPython |
324134 | <reponame>inovex/multi2convai<gh_stars>1-10
from abc import abstractmethod
from enum import Enum
from pathlib import Path
from multi2convai.data.label import Label
from multi2convai.pipelines.base import BasePipeline, BasePipelineConfig
from multi2convai.pipelines.multilingual_domain_mappings import Multi2ConvAIMapping
class ClassificationConfig(BasePipelineConfig):
"""Configuration for inference classification pipelines.
Args:
language (str): language supported by pipeline
domain (str): domain supported by pipeline
label_dict_file (Path): path to file storing label mapping
model_config (:class:`~multi2convai.main.pipelines.base.BasePipelineConfig`): model specifics
"""
def __init__(
self,
language: str,
domain: str,
label_dict_file: Path,
model_config: BasePipelineConfig,
):
super().__init__()
self.language = language
self.domain = domain
self.label_dict_file = label_dict_file
self.model_config = model_config
class ClassificationPipeline(BasePipeline):
"""Base class for inference classification pipelines.
Args:
config (:class:`~ClassificationConfig`): pipeline configuration
multilingual_domain_mapping (Enum): mapping from domain to language group for multilingual models.
"""
def __init__(
self,
config: ClassificationConfig,
multilingual_domain_mapping: Enum = Multi2ConvAIMapping,
):
super().__init__(config)
self.multilingual_domain_mapping = multilingual_domain_mapping
def is_language_supported(self, language: str) -> bool:
"""Checks whether given language code belongs to one of the supported languages.
Args:
language (str): check if this language is supported by the pipeline
Returns:
bool: indicating if the given language is supported by the loaded model
"""
if self.config.language.lower() == "ml":
# multilingual setting
supported_languages = self.multilingual_domain_mapping[
self.config.domain.upper()
].value
else:
# monolingual setting
supported_languages = [self.config.language.lower()]
return language.lower() in supported_languages
@abstractmethod
def run(self, text: str, *args, **kwargs) -> Label:
"""Runs a classification on the given text.
Args:
text (str): text to be classified
Returns:
:class:`~multi2convai.main.data.label.Label`: label with confidence score assigned to the given text
"""
raise NotImplementedError
| StarcoderdataPython |
1679397 | <filename>src/clims/services/transition.py
from __future__ import absolute_import
from django.db import transaction
from clims.models.transition import Transition as TransitionModel
from clims.models.transition import TransitionType
from clims.services.container import IndexOutOfBounds, PlateIndex
class TransitionService:
def __init__(self, app):
self._app = app
def parse_position(self, transition, containers, prefix):
position = transition['{}_position'.format(prefix)]
container_id = position["container_id"]
container = next((c for c in containers if c.id == container_id), None)
index = position["index"]
# This will throw an IndexOutOfBounds if the position is invalid
substance = container[index]
return ({
"container": container,
"index": index,
"substance": substance
})
def batch_create(self, work_batch_id, transitions):
container_ids = set()
for transition in transitions:
container_ids.add(transition["source_position"]["container_id"])
container_ids.add(transition["target_position"]["container_id"])
containers = self._app.containers.batch_get(container_ids)
transition_ids = []
for transition in transitions:
tid = self.create(transition, containers, work_batch_id)
transition_ids.append(tid)
return transition_ids
# TODO: CLIMS-401 - ensure atomic transaction only commits after plugin logic runs
@transaction.atomic
def create(self, transition, containers, work_batch_id):
"""
Creates a new Transition
"""
try:
source = self.parse_position(transition, containers, 'source')
except IndexOutOfBounds:
raise AssertionError("Source position invalid: '{}'".format(transition))
try:
target = self.parse_position(transition, containers, 'target')
except IndexOutOfBounds:
raise AssertionError("Target position invalid: '{}'".format(transition))
source_substance = source["substance"]
if source_substance is None:
raise AssertionError("Source substance not found: '{}'".format(source))
source_location = source_substance.raw_location()
transition_type = TransitionType.from_str(transition["type"])
if not TransitionType.valid(transition_type):
raise AssertionError("Invalid transition type: '{}'".format(transition["type"]))
# If transition type is SPAWN, create a child substance
substance = source_substance
if transition_type == TransitionType.SPAWN:
substance = source_substance.create_child()
# Move substance regardless of whether this is a "spawn" or "move"
target_loc = PlateIndex.from_string(target["container"], target["index"])
substance.move(target["container"], (target_loc.x, target_loc.y, target_loc.z))
substance.save()
target_location = substance.raw_location()
# 3. create transition record
transition = TransitionModel(
work_batch_id=work_batch_id,
source_location=source_location,
target_location=target_location,
transition_type=transition_type,
)
transition.save()
return transition.id
| StarcoderdataPython |
6704399 | import pytest
import autofit as af
from autofit import graphical as g
@pytest.fixture(
name="prior"
)
def make_prior():
return af.GaussianPrior(100, 10)
@pytest.fixture(
name="hierarchical_factor"
)
def make_hierarchical_factor(prior):
factor = g.HierarchicalFactor(
af.GaussianPrior,
mean=af.GaussianPrior(
mean=100,
sigma=10
),
sigma=af.GaussianPrior(
mean=10,
sigma=1
)
)
factor.add_drawn_variable(
prior
)
return factor
| StarcoderdataPython |
3379243 | import os
import threading
import time
import yarntf
def cluster_spec_test():
"""Assumes ClusterSpecGeneratorServer is running"""
threads = []
os.environ['TENSORBOARD'] = 'true'
for i in range(0, 3):
os.environ['TB_DIR'] = 'tensorboard_' + str(i)
thread = threading.Thread(target=yarntf.createClusterSpec,
args=('localhost:50052', '(appId)', 'worker', i))
thread.start()
threads.append(thread)
time.sleep(2)
for thread in threads:
thread.join()
if __name__ == '__main__':
cluster_spec_test()
| StarcoderdataPython |
1905595 | from copy import copy
from math import pow
from decimal import Decimal
from dateutil.parser import parse
from vnpy.api.oanda.const import OandaOrderState, OandaOrderType, OandaOrderPositionFill
from vnpy.api.oanda.utils import str2num
from vnpy.trader.vtObject import VtOrderData, VtPositionData, VtAccountData, VtContractData, VtTickData, VtBarData
from vnpy.trader.vtConstant import *
__all__ = [
"OandaData", "OandaVnpyConvertableData", "OandaAccountProperties",
"OandaAccountSummary", "OandaOrder", "OandaMarketOrder", "OandaLimitOrder",
"OandaPositionSide", "OandaPosition", "OandaClientExtensions", "OandaInstrument",
"OandaTick", "OandaCandlesTick",
]
class OandaData(object):
KEYS = []
@classmethod
def from_dict(cls, dct):
obj = cls()
data = {k: dct[k] for k in cls.KEYS if k in dct}
obj.__dict__.update(data)
return obj
def to_dict(self, drop_none=False):
dct = {}
for k in self.KEYS:
v = self.__dict__[k]
if v is not None or not drop_none:
if isinstance(v, OandaData):
dct[k] = v.to_dict()
else:
dct[k] = v
return dct
def __repr__(self):
return "%s.from_dict(%s)" % (self.__class__.__name__, self.__dict__)
class OandaVnpyConvertableData(OandaData):
KEYS = []
def to_vnpy(self, gateway):
raise NotImplementedError
class OandaClientExtensions(OandaData):
KEYS = ["id", "tag", "comment"]
def __init__(self):
super(OandaClientExtensions).__init__()
self.id = None
self.tag = None
self.comment = None
class OandaAccountProperties(OandaData):
KEYS = ["id", "mt4AccountID", "tags"]
def __init__(self):
super(OandaAccountProperties, self).__init__()
self.id = None
self.mt4AccountID = None
self.tags = None
class OandaInstrument(OandaVnpyConvertableData):
KEYS = ["name", "type", "displayName", "pipLocation", "displayPrecision",
"tradeUnitsPrecision", "minimumTradeSize", "maximumTrailingStopDistance",
"maximumPositionSize", "maximumOrderUnits", "marginRate", "commission"]
def __init__(self):
super(OandaInstrument, self).__init__()
self.name = None
self.type = None
self.displayName = None
self.pipLocation = None
self.displayPrecision = None
self.tradeUnitsPrecision = None
self.minimumTradeSize = None
self.maximumTrailingStopDistance = None
self.maximumPositionSize = None
self.maximumOrderUnits = None
self.marginRate = None
self.commission = None
def to_vnpy(self, gateway):
contract = VtContractData()
contract.gatewayName = gateway.gatewayName
contract.symbol = self.name
contract.exchange = EXCHANGE_OANDA
contract.vtSymbol = VN_SEPARATOR.join([contract.symbol, contract.gatewayName])
contract.name = self.displayName
contract.productClass = PRODUCT_FOREX
contract.size = pow(10, self.tradeUnitsPrecision)
# NOTE: https://www.oanda.com/lang/cns/forex-trading/learn/getting-started/pips
# priceTick equal to one tenth of a pips.
contract.priceTick = pow(10, self.pipLocation - 1)
return {
VtContractData: [contract],
}
class OandaAccountSummary(OandaVnpyConvertableData):
KEYS = ["id", "alias", "balance", "createdByUserId", "currency", "hedgingEnabled",
"lastTransactionID", "marginAvailable", "marginCloseoutMarginUsed","marginCloseoutNAV",
"marginCloseoutPercent", "marginCloseoutPositionValue", "marginCloseoutUnrealizedPL",
"marginRate", "marginUsed", "openPositionCount", "openTradeCount", "pendingOrderCount",
"pl", "positionValue", "resettablePL", "unrealizedPL", "withdrawalLimit", "NAV" ]
def __init__(self):
super(OandaAccountSummary, self).__init__()
self.id = None
self.alias = None
self.balance = None
self.createdByUserId = None
self.createdTime = None
self.currency = None
self.hedgingEnabled = None
self.lastTransactionID = None
self.marginAvailable = None
self.marginCloseoutMarginUsed = None
self.marginCloseoutNAV = None
self.marginCloseoutPercent = None
self.marginCloseoutPositionValue = None
self.marginCloseoutUnrealizedPL = None
self.marginRate = None
self.marginUsed = None
self.openPositionCount = None
self.openTradeCount = None
self.pendingOrderCount = None
self.pl = None
self.positionValue = None
self.resettablePL = None
self.unrealizedPL = None
self.withdrawalLimit = None
self.NAV = None
def to_vnpy(self, gateway):
account = VtAccountData()
account.accountID = self.id
account.gatewayName = gateway.gatewayName
account.vtAccountID = VN_SEPARATOR.join([account.gatewayName, account.accountID])
account.preBalance = None # NOTE: available?
account.balance = self.balance # NOTE: or NAV?
account.available = self.marginAvailable
account.commission = None # NOTE: available?
account.margin = self.marginUsed # NOTE: or marginCloseOut?
account.closeProfit = self.pl # NOTE: or marginCloseOut?
account.positionProfit = self.unrealizedPL
return {
VtAccountData: [account],
}
class OandaOrder(OandaVnpyConvertableData):
KEYS = ["id", "type", "createTime", "state"] + ["instrument", "units", "timeInForce", "positionFill",
"fillingTransactionID", "filledTime", "tradeOpenedID", "tradeReducedID", "tradeClosedIDs",
"cancellingTransactionID", "cancelledTime", "clientExtensions", "tradeClientExtensions"]
def __init__(self, type=None):
super(OandaOrder, self).__init__()
# base order
self.id = None
self.type = type
self.createTime = None
self.state = None
self.instrument = None
self.units = None
self.timeInForce = None
self.positionFill = None
self.fillingTransactionID = None
self.filledTime = None
self.tradeOpenedID = None
self.tradeReducedID = None
self.tradeClosedIDs = []
self.cancellingTransactionID = None
self.cancelledTime = None
self.clientExtensions = None
self.tradeClientExtensions = None
@classmethod
def from_dict(cls, dct):
obj = cls()
obj.__dict__ = super(OandaOrder, cls).from_dict(dct).__dict__
obj.clientExtensions = obj.clientExtensions and OandaClientExtensions.from_dict(obj.clientExtensions)
obj.tradeClientExtensions = obj.tradeClientExtensions and OandaClientExtensions.from_dict(obj.tradeClientExtensions)
return obj
def to_vnpy(self, gateway):
order = VtOrderData()
order.orderID = gateway.getClientOrderID(self.id, self.clientExtensions)
order.exchangeOrderID = self.id
order.exchange = EXCHANGE_OANDA
order.gatewayName = gateway.gatewayName
order.status = OandaOrderState(self.state).to_vnpy()
if self.cancellingTransactionID:
order.cancelDatetime = parse(self.cancelledTime)
order.cancelTime = order.cancelDatetime.strftime("%H:%M:%S")
order.orderDatetime = parse(self.createTime)
order.orderTime = order.orderDatetime.strftime("%H:%M:%S")
order.totalVolume = str2num(self.units)
order.direction = DIRECTION_LONG if order.totalVolume > 0 else DIRECTION_SHORT
order.totalVolume = abs(order.totalVolume)
order.symbol = self.instrument
order.offset = OandaOrderPositionFill(self.positionFill).to_vnpy()
return order
class OandaMarketOrder(OandaOrder):
KEYS = OandaOrder.KEYS + ["priceBound"]
def __init__(self):
super(OandaMarketOrder, self).__init__(type=OandaOrderType.MARKET)
self.priceBound = None
def to_vnpy(self, gateway):
order = super(OandaMarketOrder, self).to_vnpy(gateway)
order.price = self.priceBound
return order
class OandaLimitOrder(OandaOrder):
KEYS = OandaOrder.KEYS + ["price", "gtdTime", "replacesOrderID", "replacedByOrderID"]
def __init__(self):
super(OandaLimitOrder, self).__init__(type=OandaOrderType.LIMIT)
self.price = None
self.gtdTime = None
self.replacesOrderID = None
self.replaceByOrderID = None
def to_vnpy(self, gateway):
order = super(OandaLimitOrder, self).to_vnpy(gateway)
order.price = self.price
return order
class OandaPositionSide(OandaData):
"""
Oanda position side data retrieved from exchange's api.
NOTE: All fields are string, for percision.
"""
KEYS = ["units", "averagePrice", "tradeIDs", "pl", "unrealizedPL",
"resettablePL", "financing", "guaranteedExecutionFees"]
def __init__(self):
super(OandaPositionSide, self).__init__()
self.units = None
self.averagePrice = None
self.tradeIDs = []
self.pl = None
self.unrealizedPL = None
self.resettablePL = None
self.financing = None
self.guaranteedExecutionFees = None
class OandaPosition(OandaVnpyConvertableData):
"""
Oanda position data retrieved from exchange's api.
NOTE: All fields are string, for percision, include long side and short side.
"""
KEYS = ["instrument", "pl", "unrealizedPL", "marginUsed", "resettablePL",
"financing", "commission", "guaranteedExecutionFees", "long", "short"]
def __init__(self):
super(OandaPosition, self).__init__()
self.instrument = None
self.pl = None
self.unrealizePL = None
self.marginUsed = None
self.resettablePL = None
self.financing = None
self.commission = None
self.guaranteedExecutionFees = None
self.long = None
self.short = None
@classmethod
def from_dict(cls, dct):
obj = cls()
obj.__dict__ = super(OandaPosition, cls).from_dict(dct).__dict__
obj.long = OandaPositionSide.from_dict(obj.long)
obj.short = OandaPositionSide.from_dict(obj.short)
return obj
def to_vnpy(self, gateway):
pos = VtPositionData()
pos.gatewayName = gateway.gatewayName
pos.symbol = self.instrument
pos.vtSymbol = VN_SEPARATOR.join([pos.symbol, pos.gatewayName])
pos.exchange = EXCHANGE_OANDA
pos_long = copy(pos)
pos_long.direction = DIRECTION_LONG
pos_long.position = abs(str2num(self.long.units))
pos_long.price = (self.long.averagePrice or 0) and float(self.long.averagePrice)
pos_long.frozen = pos_long.position
pos_long.positionProfit = self.long.unrealizedPL
pos_long.vtPositionName = VN_SEPARATOR.join([pos.vtSymbol, pos_long.direction])
pos_short = copy(pos)
pos_short.direction = DIRECTION_SHORT
pos_short.position = abs(str2num(self.short.units))
pos_short.price = (self.short.averagePrice or 0) and float(self.short.averagePrice)
pos_short.frozen = pos_short.position
pos_short.positionProfit = self.short.unrealizedPL
pos_short.vtPositionName = VN_SEPARATOR.join([pos.vtSymbol, pos_short.direction])
return {
VtPositionData: [pos_long, pos_short],
}
def parse_datetime_str(ts):
datetime = parse(ts).replace(tzinfo=None)
date, time = ts.split("T")
date = date.replace("-", "")
time = time.strip("Z")
return datetime, date, time
class OandaTick(OandaVnpyConvertableData):
KEYS = ["type", "time", "bids", "asks", "closeoutBid", "closeoutAsk", "status",
"tradeable", "instrument"]
def __init__(self):
self.type = None
self.time = None
self.bids = None
self.asks = None
self.closeoutBid = None
self.closeoutAsk = None
self.status = None
self.tradeable = None
self.instrument = None
@classmethod
def from_dict(cls, dct):
"""skip read attr from KEYS"""
obj = cls()
obj.__dict__ = dct
return obj
def to_vnpy(self, gateway):
tick = VtTickData()
tick.symbol = self.instrument
tick.exchange = EXCHANGE_OANDA
tick.gatewayName = gateway.gatewayName
tick.vtSymbol = VN_SEPARATOR.join([tick.symbol, tick.gatewayName])
tick.datetime, tick.date, tick.time = parse_datetime_str(self.time)
ibids = list(range(len(self.bids)))
iasks = list(range(len(self.asks)))
bids = {"bidPrice%s" % (i + 1): float(v["price"]) for i, v in zip(ibids, self.bids)}
bid_volumes = {"bidVolume%s" % (i + 1): v["liquidity"] for i, v in zip(ibids, self.bids)}
asks = {"askPrice%s" % (i + 1): float(v["price"]) for i, v in zip(iasks, self.asks)}
ask_volumes = {"askVolume%s" % (i + 1) : v['liquidity'] for i, v in zip(iasks, self.asks)}
tick.__dict__.update(bids)
tick.__dict__.update(bid_volumes)
tick.__dict__.update(asks)
tick.__dict__.update(ask_volumes)
tick.lastPrice = float(Decimal(str((tick.askPrice1 + tick.bidPrice1) / 2.0)).quantize(Decimal(str(tick.askPrice1))))
return {
VtTickData: [tick],
}
class OandaCandlesTickData(OandaData):
KEYS = ["o", "h", "l", "c"]
def __init__(self):
self.o = None
self.h = None
self.l = None
self.c = None
class OandaCandlesTick(OandaData):
KEYS = ["time", "bid", "ask", "mid", "volume", "complete"]
def __init__(self):
self.time = None
self.bid = None
self.ask = None
self.mid = None
self.volume = None
self.complete = None
@classmethod
def from_dict(cls, dct):
obj = cls()
obj.__dict__ = super(OandaCandlesTick, cls).from_dict(dct).__dict__
obj.bid = obj.bid and OandaCandlesTickData.from_dict(obj.bid)
obj.ask = obj.ask and OandaCandlesTickData.from_dict(obj.ask)
obj.mid = obj.mid and OandaCandlesTickData.from_dict(obj.mid)
return obj
@property
def data(self):
return self.mid or self.bid or self.ask
def to_vnpy_bar(self):
bar = VtBarData()
bar.datetime, bar.date, bar.time = parse_datetime_str(self.time)
data = self.data
bar.open = data.o
bar.close = data.c
bar.high = data.h
bar.low = data.l
return bar | StarcoderdataPython |
3427914 | <reponame>zidarsk8/aoc2020
test_data = """
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
""".strip()
test_data2 = """
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[9] = 11
mem[10] = 102
mem[9] = 0
""".strip()
test_data3 = """
mask = X10X11X1000101X1XX001100001X101X0111
mem[27041] = 56559
mem[43069] = 56082467
mem[55125] = 25536
mem[13313] = 3968
mask = 0X01110110X10101X01100110000X0X010X0
mem[51810] = 586700041
mem[5546] = 73582083
mem[64563] = 1709385
mask = X10X1X01000X1XXX1011010X0101001X1100
mem[55081] = 164951
mem[57902] = 941479
mem[64412] = 168227
mem[38991] = 7285
mem[32442] = 4026389
mem[13462] = 11389
""".strip()
test_data4 = """
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
""".strip()
data = """
mask = X10X11X1000101X1XX001100001X101X0111
mem[27041] = 56559
mem[43069] = 56082467
mem[55125] = 25536
mem[13313] = 3968
mask = 0X01110110X10101X01100110000X0X010X0
mem[51810] = 586700041
mem[5546] = 73582083
mem[64563] = 1709385
mask = X10X1X01000X1XXX1011010X0101001X1100
mem[55081] = 164951
mem[57902] = 941479
mem[64412] = 168227
mem[38991] = 7285
mem[32442] = 4026389
mem[13462] = 11389
mask = 100XX0010X000001X1X1000110011X000011
mem[30898] = 16273
mem[58759] = 129155
mem[32283] = 16275
mem[40822] = 5428787
mask = 1101110X1101011XX10100111101XX1X0011
mem[49575] = 64412
mem[27128] = 4116
mem[44802] = 1524861
mem[64022] = 21246
mem[43630] = 40764
mask = 1X0X110111010XX111010111X101X0011011
mem[48341] = 23417
mem[53479] = 235442
mem[18336] = 94965893
mem[57351] = 162640
mem[57629] = 1482328
mem[26659] = 30905751
mask = XX0010001X0000011111X1X111X10X11X0X1
mem[24841] = 117268
mem[40922] = 620521271
mem[15784] = 9554135
mem[34841] = 73813
mem[26214] = 1046646796
mem[51219] = 342
mask = 01011X010001X11111010110X1000000011X
mem[30384] = 10767
mem[30180] = 3038
mem[41792] = 1794799
mem[22454] = 141484
mem[8961] = 138657
mask = 1X01010100001X11100100X000X00000X1X0
mem[62834] = 21881
mem[32225] = 113149539
mem[50218] = 84165
mem[39558] = 216497715
mask = 01X0010XX00011X1X0011X001000XX110011
mem[58102] = 854
mem[30215] = 3351
mem[33733] = 166
mem[5725] = 12102
mem[40925] = 663163
mem[4700] = 11609
mem[46222] = 247699901
mask = XX110X11010101101101001XXX1X1X100011
mem[25668] = 1311480
mem[15110] = 43047
mem[20494] = 621
mem[42552] = 885
mem[676] = 440298427
mem[47591] = 1439872
mem[44891] = 239995
mask = 11011XXX0100X001X10X10010110010X11X1
mem[7814] = 84184
mem[54268] = 2433599
mem[50873] = 11428
mem[20156] = 42428
mem[4576] = 13692368
mask = 11010X01100001X110X1X10001X101101X10
mem[40524] = 798
mem[47191] = 3260486
mem[18798] = 86012101
mem[41247] = 21300057
mem[54268] = 74197872
mem[53415] = 68475
mask = 010X1100100X1100X1010011X0X0X0011101
mem[15154] = 7956
mem[43012] = 897
mem[58152] = 25845
mem[766] = 172082371
mem[52128] = 31341204
mask = 1X00X00X110000X111X110XX0111001X0X11
mem[23567] = 917031
mem[35639] = 1135
mem[46408] = 26253
mem[51984] = 269805971
mem[18161] = 145505
mask = 010X100101010X1110X10XX00X11001X0110
mem[39575] = 59052182
mem[44818] = 1753773
mem[54049] = 9062079
mem[7021] = 23033
mask = 1101110111000X0111000010X1010100XX00
mem[1943] = 61503738
mem[61496] = 4688484
mem[12496] = 3724
mem[43497] = 64222
mem[53687] = 7996
mask = 11XXX0010X00001X10XX0000010X0111011X
mem[435] = 14192652
mem[3765] = 846967
mem[61166] = 13875665
mask = 0100010110001X111XXXX100000001010111
mem[3750] = 1921
mem[40035] = 7716582
mem[59566] = 993057
mem[57732] = 1389
mask = 0100000110X00110X100011001XX1X011010
mem[40412] = 246706
mem[63492] = 18123
mem[25668] = 53187
mem[21553] = 25476
mem[30692] = 36784
mem[3183] = 32438
mask = 110X0101X000X11110X11000X010100XX11X
mem[38615] = 506
mem[24603] = 737880
mem[21866] = 390846
mem[4108] = 2001313
mem[44257] = 31442716
mem[41468] = 7966
mem[57394] = 783646
mask = 0X011001100X00111X0110110001X1011101
mem[24387] = 10182
mem[31736] = 115136274
mem[28984] = 2353
mem[2475] = 109336
mask = 110X100X0X00000X1001100X100010000101
mem[51542] = 8298665
mem[57871] = 101098400
mem[50403] = 10935230
mem[22710] = 1959
mem[37641] = 114119
mask = 11X1010X100001X110X110001100X0110011
mem[57394] = 29625020
mem[11168] = 1029
mem[9347] = 1826
mem[11334] = 594
mem[62531] = 2201943
mask = 0X1X01010101X11X1101111X1X00X1010111
mem[41378] = 58920
mem[27145] = 554
mem[60014] = 7131
mask = 11XXX1110X00000X0101100101X0011001X1
mem[28366] = 290247
mem[63097] = 177936
mem[18333] = 14036
mask = 10011X0010X110111011010010X10X001100
mem[52923] = 65816388
mem[23341] = 9880982
mem[15175] = 22538
mask = X101X0110X0100111X10111110100XX10X01
mem[62215] = 13184563
mem[13801] = 49239763
mem[12496] = 892859
mem[37788] = 7710
mem[60433] = 8951271
mask = X11100010X00X010X0000X00000001110110
mem[18584] = 1767
mem[1250] = 44242023
mem[54337] = 1202
mem[16284] = 7015
mask = 11X1X1011000X11X10011010X1X0X00100X1
mem[63358] = 13288368
mem[42217] = 107319
mem[27988] = 123244922
mem[33860] = 182239
mem[40015] = 857415
mem[9829] = 1552
mask = 0101X101100X110X101110X1XX000X0101XX
mem[5707] = 623141
mem[63170] = 3540
mem[49303] = 5946356
mem[34003] = 31894390
mask = 0X010000010001XX1X0101000XXXX000X011
mem[65195] = 9789
mem[49527] = 35294934
mem[61780] = 27702
mem[15175] = 12753075
mem[19444] = 312835
mem[30215] = 26235
mem[56607] = 1184
mask = X101110X1001X00X1011XX01111X00000100
mem[11529] = 202692
mem[7212] = 12087618
mem[13649] = 4152
mem[54165] = 36475777
mem[3587] = 59730191
mask = X10111011101X0X11101111110X100100101
mem[32820] = 988109
mem[58886] = 774558
mem[19222] = 37482
mask = 11010XX1X000011X10110000100001011010
mem[56490] = 41313
mem[60014] = 24800
mem[60019] = 3393
mask = X1000101100011X1X001X1101000000101X1
mem[59526] = 266367133
mem[4762] = 740776
mem[2900] = 137498
mem[19079] = 61895
mem[33262] = 4863
mask = X1011X0XX0X0X11010011X00100011X01X10
mem[48337] = 1047
mem[7814] = 11885727
mem[30993] = 170945
mem[39628] = 1313283
mask = 110X01011000011X100X1101X100X00X0001
mem[46408] = 156741
mem[52622] = 3088962
mem[15423] = 518000
mem[64701] = 514808438
mask = 110101X110000X1X10X11010X110011X1111
mem[43254] = 387
mem[58713] = 8724789
mem[26335] = 544255113
mem[11424] = 47914958
mem[36656] = 224193
mask = 010000XXX0X0011110X10110000X01001X01
mem[63579] = 52105299
mem[37046] = 1134
mask = 11111X0110001X10X001X111X10100111111
mem[61324] = 31303686
mem[64060] = 1610
mem[39793] = 1958162
mem[46164] = 668042
mem[16116] = 13803
mem[35215] = 448312344
mask = X10101X1100011111001101001X0110001X1
mem[7844] = 760372
mem[44693] = 16675
mem[11334] = 16652
mask = 011101X1010101111XX110010101X01X0011
mem[47181] = 37171
mem[57629] = 49275889
mem[18322] = 500908019
mem[46549] = 834444470
mem[13951] = 574868
mem[26538] = 787853
mask = XX011X011100X00111X1X0X001X011X11010
mem[64949] = 162355279
mem[57407] = 3350
mem[43929] = 203041
mask = 1101101100X100110X010001101000110XX1
mem[32532] = 32712949
mem[51621] = 1546586
mem[22570] = 251922029
mem[10937] = 3154069
mem[25790] = 49139
mask = 01011101XX0111X110111011XXX01001010X
mem[24482] = 117679
mem[60433] = 714
mem[23257] = 69062735
mem[28676] = 15016259
mem[32334] = 4194426
mask = 0X000101100001101001XX101111010101X1
mem[31956] = 796
mem[28984] = 1446991
mem[39810] = 27358954
mem[9234] = 19734706
mem[45123] = 247705194
mem[34003] = 35251
mem[16180] = 804679
mask = 1101XXX00000011010X0111010X010010000
mem[13595] = 284
mem[39575] = 14805
mem[41940] = 27710
mem[48667] = 118527
mem[43689] = 5996
mem[55758] = 3036117
mask = 11011XX10101010111011110000100010X0X
mem[10468] = 8121
mem[45166] = 970006437
mask = 11011001X10000X11XX1XXX00XX011000011
mem[18583] = 2551
mem[32334] = 2009
mem[27128] = 56668016
mem[52052] = 750760
mem[49527] = 46604
mem[49214] = 1061
mem[1791] = 11556
mask = 1X0X0101X000011X1X0100110X001001X011
mem[12346] = 178717508
mem[52270] = 311954237
mem[49864] = 7470000
mask = X101010110000X1X10011X01XX101XX11010
mem[37530] = 80
mem[32958] = 28577227
mem[40585] = 8221
mem[57579] = 1646
mem[50218] = 7908
mask = 010011011X0101X100X10001001X00000111
mem[50694] = 457
mem[13360] = 62444656
mem[4404] = 1266571
mask = X101110X00X110X11X1110X1X011101001XX
mem[23104] = 253935942
mem[14852] = 2270
mem[34981] = 1108
mem[53840] = 146384561
mem[47509] = 60247
mem[23085] = 51902845
mem[58655] = 9691
mask = 1X0XXX1100X1X0110X01111X1110000001X1
mem[60433] = 128268
mem[57082] = 816691399
mem[45011] = 80982
mem[39218] = 1071426371
mask = 11010X011000XX101001X01100001101010X
mem[35350] = 8809868
mem[9433] = 65247
mem[38801] = 2086
mask = 11011101X10XX00X110XX00001X0X110X101
mem[54049] = 8667
mem[46876] = 435193
mem[43949] = 15176016
mem[44664] = 26159
mem[23255] = 895
mask = 01X1X1X1010101111101001111X1X101X1X1
mem[61166] = 504606
mem[61956] = 20721
mem[39929] = 1403
mask = X10X010110000X1110011010110X11110001
mem[40015] = 700480
mem[36436] = 4893
mem[32266] = 25534
mem[41902] = 2988
mem[39810] = 28
mem[41370] = 57925
mem[12356] = 297319
mask = X101111101010XX111X1X01X1X0101X1010X
mem[37641] = 9726
mem[59076] = 168552
mem[45749] = 1746884
mem[17712] = 69612
mem[49214] = 867128713
mem[61637] = 40856300
mem[33365] = 1428019
mask = 1101110X1000X1111X0110001X0001010100
mem[13072] = 10041
mem[53702] = 773477
mem[44645] = 2404420
mem[45439] = 5964
mem[58658] = 208590
mem[44798] = 19402244
mask = 110X11X1100X0111100110001X00X10000X1
mem[10421] = 171265821
mem[12356] = 680301
mem[22675] = 21609725
mask = 011X0111X101011X1X010XX100X010X001X1
mem[32543] = 354353
mem[46511] = 2359
mem[19545] = 27624
mem[27128] = 48860350
mem[4352] = 97594900
mem[7423] = 256
mask = 100XX001010000X11X111X0010X00XX10X10
mem[9234] = 14199
mem[20857] = 105139600
mem[8485] = 397519358
mem[56314] = 191575
mem[56707] = 14956
mem[53089] = 289600
mem[21001] = 13079705
mask = 010111011001XX01X0111001X0XX1001XX0X
mem[106] = 21541596
mem[12371] = 3644659
mem[50177] = 205339532
mem[1812] = 473555543
mem[22675] = 1216737
mask = 1X011001010000X1101100001X1X01X101XX
mem[27858] = 4008
mem[51566] = 49154
mem[4762] = 16827
mem[53603] = 472698
mem[23186] = 5880
mem[53599] = 989
mask = X10X0101100000111001X1XX0X00X1000110
mem[49118] = 921
mem[34859] = 802
mem[55335] = 213213436
mem[12356] = 90675624
mem[34362] = 1372
mem[30455] = 1338
mask = 010100110000001XX001X101011101X1X100
mem[12601] = 13719557
mem[6055] = 1675
mem[22570] = 918
mem[52622] = 2656587
mem[45642] = 5143
mem[16015] = 11484862
mask = 1101X0010100X011X1X100X0X100XX11X111
mem[19222] = 424
mem[59273] = 29846629
mem[17113] = 7689
mem[48356] = 4606
mask = 010X1101X00101010011X00100XXXX1X0X10
mem[15158] = 3693
mem[4535] = 32872078
mem[42138] = 9199883
mem[61166] = 328197677
mem[25980] = 210
mem[38567] = 238946499
mask = X10XX0000X00011X100X1011000111X1X11X
mem[56158] = 7060
mem[1355] = 4692
mem[15605] = 13671929
mem[22570] = 1896780
mem[25653] = 62047211
mem[49303] = 143257122
mask = 1XX11100X01110111011X0X11X11X01000X0
mem[47139] = 5173
mem[33745] = 500428082
mem[55823] = 311
mem[1250] = 296582
mem[63703] = 949
mask = X1011101000X1X0110110X1X0X0110X00110
mem[51810] = 18035546
mem[53428] = 3886
mem[64949] = 645
mem[52996] = 41172825
mem[49452] = 1367
mask = 110XXX011X000X011111110XX010X10X0011
mem[17565] = 16592
mem[49575] = 3981067
mem[4250] = 300364
mem[35350] = 8178
mem[22007] = 21898575
mask = 010X01X110000X101011X001011X111X1X10
mem[18950] = 13300
mem[4487] = 612
mem[12388] = 412719717
mem[44693] = 3118
mask = 1101X10110X0011010010X0100101010X000
mem[48867] = 17605927
mem[8397] = 1860772
mem[39018] = 550228
mem[38250] = 47809475
mem[2450] = 116013203
mem[49776] = 393349
mask = 1101X10110000X11100111X101101100X010
mem[53450] = 656535467
mem[6408] = 21122
mem[40154] = 1342486
mask = 1101100X010X101X0111000101000X101111
mem[4324] = 686588689
mem[64022] = 1565401
mem[35893] = 9127
mem[63924] = 5346
mask = X1011X01XX0X0X111X0111101111X00X0111
mem[18336] = 33319783
mem[9347] = 7493509
mem[6227] = 1816
mem[31665] = 5514016
mem[51408] = 4879
mem[53450] = 114937
mask = 010100XX000000XX10011X0X0011XX011000
mem[46925] = 103
mem[57394] = 965124
mem[15985] = 980924
mem[52270] = 110464
mem[15394] = 1276
mask = XX000100X0X01101X10100XX0XX000001011
mem[13230] = 1399497
mem[64578] = 3309
mem[25149] = 7933
mask = 01010101100X101110X1X00XX00X1111X0X1
mem[16284] = 27584023
mem[46579] = 360009595
mem[30484] = 677717
mem[11059] = 52513896
mem[38700] = 683
mask = 110110X1000X0X11X1XXX11X1000000X0111
mem[16164] = 177
mem[8961] = 6185342
mem[53026] = 72900939
mask = 1XX11X0111001XX11111010X010011111X00
mem[59825] = 37712
mem[42879] = 746543
mem[16391] = 54454690
mem[43036] = 346868
mem[25030] = 38931
mem[5215] = 890634260
mask = 1101110X00001001101111X0XX11001XX100
mem[19490] = 5726
mem[17000] = 16738
mem[61693] = 7616
mem[2295] = 1627
mem[44236] = 269
mem[4389] = 1030099
mem[11170] = 4961
mask = 010X1101101X01011X1110X1X10010X0001X
mem[27835] = 16102
mem[12301] = 8157
mem[38801] = 32685079
mask = X1011101000100111X01011X101110X00111
mem[64949] = 21205
mem[56917] = 416029376
mem[50979] = 2487904
mem[64022] = 2045
mem[13313] = 804618
mem[55628] = 17126
mask = X1010101100X110110X1111X0X00XX0X010X
mem[39628] = 924
mem[39127] = 3978399
mem[36741] = 1917293
mem[62950] = 6997
mask = 01110101010101111X01101XX111X1X0010X
mem[20491] = 500
mem[36269] = 22639420
mem[44179] = 725
mem[48503] = 520056
mem[7514] = 28103
mem[54438] = 410686
mask = 0101XX0X10001X01X01110101100000111X1
mem[45749] = 494350
mem[44246] = 3578
mem[15900] = 1654
mem[32209] = 218393668
mem[30484] = 213583441
mem[25092] = 55738
mask = 01000X011000011X1X0X010100XXXX011110
mem[39783] = 25297
mem[60998] = 35841298
mem[54570] = 2329
mem[41435] = 43307534
mem[55264] = 375757
mem[63681] = 112957
mask = 1101010XX000X1101001101000XX00X1X001
mem[55707] = 16304353
mem[14995] = 11351
mem[12346] = 746754510
mem[11376] = 339
mem[2762] = 51886
mem[26983] = 4316816
mask = 0100X101100001XX1011100101X010111100
mem[25172] = 183861621
mem[5742] = 14878506
mask = X10X01011000X1X010X110X0XX1X01010110
mem[51566] = 2828
mem[18567] = 265
mem[16314] = 423578
mem[5215] = 239
mem[42065] = 2230191
mask = 010X1X010X0X0111X00X1110X001XX010111
mem[10367] = 9431
mem[23257] = 10057
mem[37558] = 789
mem[2683] = 83395426
mem[19984] = 148832616
mask = 110X0101110X0X0111111000001001XX10XX
mem[47509] = 998
mem[41953] = 20146419
mem[44664] = 302148902
mem[63696] = 1160792
mem[17933] = 98120645
mem[49199] = 931033
mem[7279] = 20085
mask = 11X10101X0XX111X101110100X10XX001010
mem[34951] = 7290
mem[40015] = 39139
mem[36460] = 53336927
mask = 0100X10X1000110X1X01X1110X1X0X1100X1
mem[45422] = 1972
mem[54204] = 3976851
mem[53302] = 100688
mem[39162] = 179175
mem[46756] = 354581686
mem[30570] = 4338666
mask = XX01X0X1000X0X11X001011001100XX10101
mem[57138] = 9547723
mem[32548] = 17740188
mem[62284] = 300055571
mask = 010X01011000XXXX1001101X00X01001XX01
mem[21470] = 78562
mem[44450] = 77626
mem[19490] = 33608425
mem[4762] = 21717
mem[26613] = 40
""".strip()
| StarcoderdataPython |
1932955 | import sys
import warnings
from dataclasses import dataclass, field
from difflib import SequenceMatcher
from typing import Dict, List, NamedTuple, Optional
import numpy as np
from .thermodynamic_restrictions import ComplexFormation, DuplicateError, ThermodynamicRestrictions
class UnregisteredRule(NamedTuple):
expected: Optional[str]
original: Optional[str]
PREPOSITIONS: List[str] = [
"to",
"for",
"from",
"up",
"down",
"in",
"on",
"at",
"off",
"into",
"around",
"among",
"between",
"of",
"over",
"above",
"below",
"under",
"through",
"across",
"along",
"near",
"by",
"beside",
]
@dataclass
class ReactionRules(ThermodynamicRestrictions):
"""Create an executable biochemical model from text.
.. list-table:: Available reaction rules
:widths: 25 50 25
:header-rows: 1
* - Rule
- Example sentence
- Parameters (optional)
* - :func:`~pasmopy.construction.reaction_rules.dimerize`
- *A* dimerizes <--> *AA*
- .. math:: kf, kr
* - :func:`~pasmopy.construction.reaction_rules.bind`
- *A* binds *B* <--> *AB*
- .. math:: kf, kr
* - :func:`~pasmopy.construction.reaction_rules.dissociate`
- *AB* dissociates to *A* and *B*
- .. math:: kf, kr
* - :func:`~pasmopy.construction.reaction_rules.is_phosphorylated`
- *uA* is phosphorylated <--> *pA*
- .. math:: kf, kr
* - :func:`~pasmopy.construction.reaction_rules.is_dephosphorylated`
- *pA* is dephosphorylated --> *uA*
- .. math:: V, K
* - :func:`~pasmopy.construction.reaction_rules.phosphorylate`
- *B* phosphorylates *uA* --> *pA*
- .. math:: V, K
* - :func:`~pasmopy.construction.reaction_rules.dephosphorylate`
- *B* dephosphorylates *pA* --> *uA*
- .. math:: V, K
* - :func:`~pasmopy.construction.reaction_rules.transcribe`
- *B* transcribes *a*
- .. math:: V, K, n, (KF, nF)
* - :func:`~pasmopy.construction.reaction_rules.is_translated`
- *a* is translated into *A*
- .. math:: kf
* - :func:`~pasmopy.construction.reaction_rules.synthesize`
- *B* synthesizes *A*
- .. math:: kf
* - :func:`~pasmopy.construction.reaction_rules.is_synthesized`
- *A* is synthesized
- .. math:: kf
* - :func:`~pasmopy.construction.reaction_rules.degrade`
- *B* degrades *A*
- .. math:: kf
* - :func:`~pasmopy.construction.reaction_rules.is_degraded`
- *A* is degraded
- .. math:: kf
* - :func:`~pasmopy.construction.reaction_rules.translocate`
- *Acyt* translocates from cytoplasm to nucleus (Vcyt, Vnuc) <--> *Anuc*
- .. math:: kf, kr, (V_{pre}, V_{post})
Attributes
----------
input_txt : str
Model description file (*.txt), e.g.,
`Kholodenko_JBC_1999.txt <https://github.com/pasmopy/pasmopy/blob/master/tests/text_files/Kholodenko_JBC_1999.txt>`_.
parameters : list of strings
``x`` : model parameters.
species : list of strings
``y`` : model species.
reactions : list of strings
``v`` : flux vector.
differential_equations : list of strings
``dydt`` : right-hand side of the differential equation.
obs_desc : list of List[str]
Description of observables.
param_info : list of strings
Information about parameter values.
init_info : list of strings
Information about initial values.
param_constraints : list of strings
Information about parameter constraints.
param_excluded : list of strings
List of parameters excluded from search params because of parameter constraints.
fixed_species : list of strings
List of species which should be held fixed (never consumed) during simulation.
sim_tspan : list of strings ['t0', 'tf']
Interval of integration.
sim_conditions : list of List[str]
Simulation conditions with stimulation.
sim_unperturbed : str
Untreated conditions to get steady state.
rule_words : dict
Words to identify reaction rules.
complex_formations : list
List of ComplexFormation to detect duplicate binding-dissociation.
"""
input_txt: str
parameters: List[str] = field(
default_factory=list,
init=False,
)
species: List[str] = field(
default_factory=list,
init=False,
)
reactions: List[str] = field(
default_factory=list,
init=False,
)
differential_equations: List[str] = field(
default_factory=list,
init=False,
)
obs_desc: List[List[str]] = field(
default_factory=list,
init=False,
)
param_info: List[str] = field(
default_factory=list,
init=False,
)
init_info: List[str] = field(
default_factory=list,
init=False,
)
param_constraints: List[str] = field(
default_factory=list,
init=False,
)
param_excluded: List[str] = field(
default_factory=list,
init=False,
)
fixed_species: List[str] = field(
default_factory=list,
init=False,
)
# Information about simulation
sim_tspan: List[str] = field(
default_factory=list,
init=False,
)
sim_conditions: List[List[str]] = field(
default_factory=list,
init=False,
)
sim_unperturbed: str = field(
default_factory=str,
init=False,
)
# Words to identify reaction rules
rule_words: Dict[str, List[str]] = field(
default_factory=lambda: dict(
dimerize=[
" dimerizes",
" homodimerizes",
" forms a dimer",
" forms dimers",
],
bind=[
" binds",
" forms complexes with",
],
dissociate=[
" is dissociated into",
" dissociates to",
],
is_phosphorylated=[
" is phosphorylated",
],
is_dephosphorylated=[
" is dephosphorylated",
],
phosphorylate=[
" phosphorylates",
],
dephosphorylate=[
" dephosphorylates",
],
transcribe=[
" transcribe",
" transcribes",
],
is_translated=[
" is translated into",
],
synthesize=[
" synthesizes",
" promotes synthesis of",
" increases",
],
is_synthesized=[
" is synthesized",
],
degrade=[
" degrades",
" promotes degradation of",
" decreases",
],
is_degraded=[
" is degraded",
],
translocate=[
" translocates",
" is translocated",
],
),
init=False,
)
@staticmethod
def _isfloat(string: str) -> bool:
"""
Checking if a string can be converted to float.
"""
try:
float(string)
return True
except ValueError:
return False
@staticmethod
def _remove_prefix(text: str, prefix: str) -> str:
"""
Remove prefix from a text.
"""
if text.startswith(prefix):
return text[len(prefix) :]
assert False
def _set_params(self, line_num: int, *args: str) -> None:
"""
Set model parameters.
"""
for p_name in args:
if p_name + f"{line_num:d}" not in self.parameters:
self.parameters.append(p_name + f"{line_num:d}")
def _set_species(self, *args: str) -> None:
"""
Set model species.
"""
for s_name in args:
if s_name not in self.species:
self.species.append(s_name)
def _preprocessing(
self,
func_name: str,
line_num: int,
line: str,
*args: str,
) -> List[str]:
"""
Extract the information about parameter and/or initial values
if '|' in the line and find a keyword to identify reaction rules.
Parameters
----------
func_name : str
Name of the rule function.
line_num : int
Line number.
line : str
Each line of the input text.
Returns
-------
description : list of strings
"""
self._set_params(line_num, *args)
if "|" in line:
if line.split("|")[1].strip():
param_values = line.split("|")[1].strip().split(",")
if all("=" in pval for pval in param_values):
for pval in param_values:
base_param = pval.split("=")[0].strip(" ")
if base_param.startswith("const "):
# Parameter names with 'const' will be added to param_excluded.
base_param = base_param.split("const ")[-1]
fixed = True
else:
fixed = False
if base_param in args:
if self._isfloat(pval.split("=")[1].strip(" ")):
self.param_info.append(
"x[C."
+ base_param
+ f"{line_num:d}] = "
+ pval.split("=")[1].strip(" ")
)
# If a parameter value is initialized to 0.0 or fixed,
# then add it to param_excluded.
if float(pval.split("=")[1].strip(" ")) == 0.0 or fixed:
self.param_excluded.append(base_param + f"{line_num:d}")
else:
raise ValueError(
f"line{line_num:d}: Parameter value must be int or float."
)
else:
raise ValueError(
f"line{line_num:d}: '{pval.split('=')[0].strip(' ')}'\n"
f"Available parameters are: {', '.join(args)}."
)
elif param_values[0].strip(" ").isdecimal():
# Parameter constraints
for param_name in args:
if f"{param_name}{int(param_values[0]):d}" not in self.parameters:
raise ValueError(
f"Line {line_num:d} and {int(param_values[0]):d} : "
"Different reaction rules in parameter constraints."
)
else:
self.param_excluded.append(f"{param_name}{line_num:d}")
self.param_info.append(
f"x[C.{param_name}"
f"{line_num:d}] = "
f"x[C.{param_name}"
f"{int(param_values[0]):d}]"
)
self.param_constraints.append(
f"x[C.{param_name}"
f"{line_num:d}] = "
f"x[C.{param_name}"
f"{int(param_values[0]):d}]"
)
else:
raise ValueError(
f"line{line_num:d}: {line}\nInvalid expression in the input parameter."
)
if line.count("|") > 1 and line.split("|")[2].strip():
initial_values = line.split("|")[2].strip().split(",")
for ival in initial_values:
if ival.startswith("fixed "):
ival = ival.split("fixed ")[-1]
self.fixed_species.append(ival.split("=")[0].strip(" "))
if ival.split("=")[0].strip(" ") in line.split("|")[0]:
if self._isfloat(ival.split("=")[1].strip(" ")):
self.init_info.append(
"y0[V."
+ ival.split("=")[0].strip(" ")
+ "] = "
+ ival.split("=")[1].strip(" ")
)
else:
raise ValueError(
f"line{line_num:d}: Initial value must be int or float."
)
else:
raise NameError(
f"line{line_num:d}: "
f"Name'{ival.split('=')[0].strip(' ')}' is not defined."
)
line = line.split("|")[0]
hit_words: List[str] = []
for word in self.rule_words[func_name]:
# Choose longer word
if word in line:
hit_words.append(word)
return line.strip().split(max(hit_words, key=len))
@staticmethod
def _word2scores(word: str, sentence: str) -> List[float]:
"""
Calculate similarity scores between word and sentence.
Parameters
----------
word : str
User-defined word.
sentence : str
Textual unit consisting of two or more words.
returns
-------
ratio : list
List containing similarity scores.
"""
ratio = [
SequenceMatcher(None, word, sentence[i : i + len(word)]).ratio()
for i in range(len(sentence) - len(word) + 1)
]
return ratio
def _get_partial_similarity(
self,
line: str,
similarity_threshold: float = 0.7,
) -> UnregisteredRule:
"""
Suggest similar rule word when user-defined word is not registered
in rule_words.
Parameters
----------
line : str
Each line of the input text.
similarity_threshold : float (default: 0.7)
if all match_scores are below this value, expected_word will not
be returned.
Returns
-------
unregistered_rule : UnregisteredRule
Rule word with the highest similarity score.
"""
match_words = []
match_scores = []
str_subset = []
for rules in self.rule_words.values():
for word in rules:
ratio = self._word2scores(word, line)
if ratio:
match_words.append(word)
match_scores.append(max(ratio))
str_subset.append(line[np.argmax(ratio) : np.argmax(ratio) + len(word)])
expected_word = (
None
if all([score < similarity_threshold for score in match_scores])
else match_words[np.argmax(match_scores)]
)
original_word = (
None if expected_word is None else str_subset[match_words.index(expected_word)]
)
unregistered_rule = UnregisteredRule(expected_word, original_word)
return unregistered_rule
@staticmethod
def _remove_prepositions(sentence: str) -> str:
"""
Remove preposition from text not to use it for identifying reaction rules.
"""
for preposition in PREPOSITIONS:
if sentence.endswith(f" {preposition}"):
return sentence[: -len(preposition) - 1]
return sentence
def dimerize(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'A dimerizes <--> AA'
>>> 'A homodimerizes <--> AA'
>>> 'A forms a dimer <--> AA'
>>> 'A forms dimers <--> AA'
Notes
-----
* Parameters
.. math:: kf, kr
* Rate equation
.. math:: v = kf * [A] * [A] - kr * [AA]
* Differential equation
.. math::
d[A]]/dt = - 2 * v
d[AA]/dt = + v
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "kf", "kr"
)
monomer = description[0].strip(" ")
if " <--> " in description[1]:
dimer = description[1].split(" <--> ")[1].strip(" ")
elif " --> " in description[1]:
warnings.warn(
f"line{line_num:d}: Use '<-->' instead of '-->' for reversible reaction rules.",
FutureWarning,
)
dimer = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(f"line{line_num:d}: Use '<-->' to specify the name of the dimer.")
if monomer == dimer:
raise ValueError(f"{dimer} <- Use a different name.")
self._set_species(monomer, dimer)
self.complex_formations.append(ComplexFormation(line_num, set(monomer), dimer, True))
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.kf{line_num:d}] * y[V.{monomer}] * y[V.{monomer}] - "
f"x[C.kr{line_num:d}] * y[V.{dimer}]"
)
counter_monomer, counter_dimer = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{monomer}]" in eq:
counter_monomer += 1
self.differential_equations[i] = eq + f" - 2 * v[{line_num:d}]"
elif f"dydt[V.{dimer}]" in eq:
counter_dimer += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_monomer == 0:
self.differential_equations.append(f"dydt[V.{monomer}] = - v[{line_num:d}]")
if counter_dimer == 0:
self.differential_equations.append(f"dydt[V.{dimer}] = + v[{line_num:d}]")
def bind(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'A binds B <--> AB'
>>> 'A forms complexes with B <--> AB'
Notes
-----
* Parameters
.. math:: kf, kr
* Rate equation
.. math:: v = kf * [A] * [B] - kr * [AB]
* Differential equation
.. math::
d[A]/dt = - v
d[B]/dt = - v
d[AB]/dt = + v
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "kf", "kr"
)
component1 = description[0].strip(" ")
if " <--> " in description[1]:
# Specify name of the complex
component2 = description[1].split(" <--> ")[0].strip(" ")
complex = description[1].split(" <--> ")[1].strip(" ")
elif " --> " in description[1]:
warnings.warn(
f"line{line_num:d}: Use '<-->' instead of '-->' for reversible reaction rules.",
FutureWarning,
)
component2 = description[1].split(" --> ")[0].strip(" ")
complex = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: Use '<-->' to specify the name of the protein complex."
)
if component1 == complex or component2 == complex:
raise ValueError(f"line{line_num:d}: {complex} <- Use a different name.")
elif component1 == component2:
self.dimerize(line_num, line)
else:
self._set_species(component1, component2, complex)
self.complex_formations.append(
ComplexFormation(line_num, set([component1, component2]), complex, True)
)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.kf{line_num:d}] * y[V.{component1}] * y[V.{component2}] - "
f"x[C.kr{line_num:d}] * y[V.{complex}]"
)
counter_component1, counter_component2, counter_complex = (0, 0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{component1}]" in eq:
counter_component1 += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{component2}]" in eq:
counter_component2 += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{complex}]" in eq:
counter_complex += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_component1 == 0:
self.differential_equations.append(f"dydt[V.{component1}] = - v[{line_num:d}]")
if counter_component2 == 0:
self.differential_equations.append(f"dydt[V.{component2}] = - v[{line_num:d}]")
if counter_complex == 0:
self.differential_equations.append(f"dydt[V.{complex}] = + v[{line_num:d}]")
def dissociate(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'AB dissociates to A and B'
>>> 'AB is dissociated into A and B'
Notes
-----
* Parameters
.. math:: kf, kr
* Rate equation
.. math:: v = kf * [AB] - kr * [A] * [B]
* Differential equation
.. math::
d[A]/dt = + v
d[B]/dt = + v
d[AB]/dt = - v
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "kf", "kr"
)
complex = description[0].strip(" ")
if " and " not in description[1]:
raise ValueError(
f"Use 'and' in line{line_num:d}:\ne.g., AB is dissociated into A and B"
)
else:
component1 = description[1].split(" and ")[0].strip(" ")
component2 = description[1].split(" and ")[1].strip(" ")
self._set_species(complex, component1, component2)
self.complex_formations.append(
ComplexFormation(line_num, set([component1, component2]), complex, False)
)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.kf{line_num:d}] * y[V.{complex}] - "
f"x[C.kr{line_num:d}] * y[V.{component1}] * y[V.{component2}]"
)
counter_complex, counter_component1, counter_component2 = (0, 0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{complex}]" in eq:
counter_complex += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{component1}]" in eq:
counter_component1 += 1
self.differential_equations[i] = (
eq + f" + v[{line_num:d}]"
if component1 != component2
else eq + f" + 2 * v[{line_num:d}]"
)
elif f"dydt[V.{component2}]" in eq:
counter_component2 += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_complex == 0:
self.differential_equations.append(f"dydt[V.{complex}] = - v[{line_num:d}]")
if counter_component1 == 0:
self.differential_equations.append(f"dydt[V.{component1}] = + v[{line_num:d}]")
if counter_component2 == 0:
self.differential_equations.append(f"dydt[V.{component2}] = + v[{line_num:d}]")
def is_phosphorylated(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'uA is phosphorylated <--> pA'
Notes
-----
* Parameters
.. math:: kf, kr
* Rate equation
.. math:: v = kf * [uA] - kr * [pA]
* Differential equation
.. math::
d[uA]/dt = - v
d[pA]/dt = + v
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "kf", "kr"
)
unphosphorylated_form = description[0].strip(" ")
if " <--> " in description[1]:
phosphorylated_form = description[1].split(" <--> ")[1].strip(" ")
elif " --> " in description[1]:
warnings.warn(
f"line{line_num:d}: Use '<-->' instead of '-->' for reversible reaction rules.",
FutureWarning,
)
phosphorylated_form = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '<-->' to specify the name of the phosphorylated protein."
)
self._set_species(unphosphorylated_form, phosphorylated_form)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.kf{line_num:d}] * y[V.{unphosphorylated_form}] - "
f"x[C.kr{line_num:d}] * y[V.{phosphorylated_form}]"
)
counter_unphosphorylated_form, counter_phosphorylated_form = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{unphosphorylated_form}]" in eq:
counter_unphosphorylated_form += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif "dydt[V.{phosphorylated_form}]" in eq:
counter_phosphorylated_form += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_unphosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{unphosphorylated_form}] = - v[{line_num:d}]"
)
if counter_phosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{phosphorylated_form}] = + v[{line_num:d}]"
)
def is_dephosphorylated(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'pA is dephosphorylated --> uA'
Notes
-----
* Parameters
.. math:: V, K
* Rate equation
.. math:: v = V * [pA] / (K + [pA])
* Differential equation
.. math::
d[uA]/dt = + v
d[pA]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "V", "K")
phosphorylated_form = description[0].strip(" ")
if " --> " in description[1]:
unphosphorylated_form = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '-->' to specify the name of the dephosphorylated protein."
)
self._set_species(phosphorylated_form, unphosphorylated_form)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * y[V.{phosphorylated_form}] / "
f"(x[C.K{line_num:d}] + y[V.{phosphorylated_form}])"
)
counter_unphosphorylated_form, counter_phosphorylated_form = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{unphosphorylated_form}]" in eq:
counter_unphosphorylated_form += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
elif f"dydt[V.{phosphorylated_form}]" in eq:
counter_phosphorylated_form += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
if counter_unphosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{unphosphorylated_form}] = + v[{line_num:d}]"
)
if counter_phosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{phosphorylated_form}] = - v[{line_num:d}]"
)
def phosphorylate(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B phosphorylates uA --> pA'
Notes
-----
* Parameters
.. math:: V, K
* Rate equation
.. math:: v = V * [B] * [uA] / (K + [uA])
* Differential equation
.. math::
d[uA]/dt = - v
d[pA]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "V", "K")
kinase = description[0].strip(" ")
if " --> " in description[1]:
unphosphorylated_form = description[1].split(" --> ")[0].strip(" ")
phosphorylated_form = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '-->' to specify the name of the phosphorylated "
"(or activated) protein."
)
if unphosphorylated_form == phosphorylated_form:
raise ValueError(f"line{line_num:d}: {phosphorylated_form} <- Use a different name.")
self._set_species(kinase, unphosphorylated_form, phosphorylated_form)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * y[V.{kinase}] * y[V.{unphosphorylated_form}] / "
f"(x[C.K{line_num:d}] + y[V.{unphosphorylated_form}])"
)
counter_unphosphorylated_form, counter_phosphorylated_form = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{unphosphorylated_form}]" in eq:
counter_unphosphorylated_form += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{phosphorylated_form}]" in eq:
counter_phosphorylated_form += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_unphosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{unphosphorylated_form}] = - v[{line_num:d}]"
)
if counter_phosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{phosphorylated_form}] = + v[{line_num:d}]"
)
def dephosphorylate(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B dephosphorylates pA --> uA'
Notes
-----
* Parameters
.. math:: V, K
* Rate equation
.. math:: v = V * [B] * [pA] / (K + [pA])
* Differential equation
.. math::
d[uA]/dt = + v
d[pA]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "V", "K")
phosphatase = description[0].strip(" ")
if " --> " in description[1]:
phosphorylated_form = description[1].split(" --> ")[0].strip(" ")
unphosphorylated_form = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '-->' to specify the name of the dephosphorylated "
"(or deactivated) protein."
)
if phosphorylated_form == unphosphorylated_form:
raise ValueError(f"line{line_num:d}: {unphosphorylated_form} <- Use a different name.")
self._set_species(phosphatase, phosphorylated_form, unphosphorylated_form)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * y[V.{phosphatase}] * y[V.{phosphorylated_form}] / "
f"(x[C.K{line_num:d}] + y[V.{phosphorylated_form}])"
)
counter_phosphorylated_form, counter_unphosphorylated_form = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{phosphorylated_form}]" in eq:
counter_phosphorylated_form += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{unphosphorylated_form}]" in eq:
counter_unphosphorylated_form += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_phosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{phosphorylated_form}] = - v[{line_num:d}]"
)
if counter_unphosphorylated_form == 0:
self.differential_equations.append(
f"dydt[V.{unphosphorylated_form}] = + v[{line_num:d}]"
)
def transcribe(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B transcribes a'
>>> 'B1 & B2 transcribe a' # (AND-gate)
>>> 'B transcribes a, repressed by C' # (Negative regulation)
Notes
-----
* Parameters
.. math:: V, K, n, (KF, nF)
* Rate equation
.. math::
v = V * [B] ^ {n} / (K ^ {n} + [B] ^ {n})
v = V * ([B1] * [B2]) ^ {n} / (K ^ {n} + ([B1] * [B2]) ^ {n})
v = V * [B] ^ {n} / (K ^ {n} + [B] ^ {n} + ([C] / KF) ^ {nF})
* Differential equation
.. math:: d[a]/dt = + v
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "V", "K", "n", "KF", "nF"
)
repressor: Optional[str] = None
ratio = self._word2scores(", repressed by", description[1])
if not ratio or max(ratio) < 1.0:
self.parameters.remove(f"KF{line_num:d}")
self.parameters.remove(f"nF{line_num:d}")
mRNA = description[1].strip()
if " " in mRNA:
# Fix typo in line{line_num:d}
raise ValueError(
f"line{line_num:d}: "
"Add ', repressed by XXX' to describe negative regulation from XXX."
)
else:
# Add negative regulation from repressor
mRNA = description[1].split(", repressed by")[0].strip()
repressor = description[1].split(", repressed by")[1].strip()
if " & " not in description[0]:
TF = description[0].strip(" ")
self._set_species(mRNA, TF)
if repressor is not None:
self._set_species(repressor)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * y[V.{TF}] ** x[C.n{line_num:d}] / "
f"(x[C.K{line_num:d}] ** x[C.n{line_num:d}] + "
f"y[V.{TF}] ** x[C.n{line_num:d}]"
+ (
")"
if repressor is None
else f" + (y[V.{repressor}] / x[C.KF{line_num:d}]) ** x[C.nF{line_num:d}])"
)
)
else:
# AND-gate
TFs = [TF.strip(" ") for TF in description[0].split(" & ")]
self._set_species(mRNA, *TFs)
if repressor is not None:
self._set_species(repressor)
self.reactions.append(
f"v[{line_num:d}] = "
f"x[C.V{line_num:d}] * ({'y[V.' + '] * y[V.'.join(TFs) + ']'}) ** x[C.n{line_num:d}] / "
f"(x[C.K{line_num:d}] ** x[C.n{line_num:d}] + "
f"({'y[V.' + '] * y[V.'.join(TFs) + ']'}) ** x[C.n{line_num:d}]"
+ (
")"
if repressor is None
else f" + (y[V.{repressor}] / x[C.KF{line_num:d}]) ** x[C.nF{line_num:d}])"
)
)
counter_mRNA = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{mRNA}]" in eq:
counter_mRNA += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_mRNA == 0:
self.differential_equations.append(f"dydt[V.{mRNA}] = + v[{line_num:d}]")
def is_translated(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'a is translated into A'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [a]
* Differential equation
.. math:: d[A]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
mRNA = description[0].strip(" ")
protein = description[1].strip(" ")
self._set_species(mRNA, protein)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{mRNA}]")
counter_protein = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{protein}]" in eq:
counter_protein += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_protein == 0:
self.differential_equations.append(f"dydt[V.{protein}] = + v[{line_num:d}]")
def synthesize(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B synthesizes A'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [B]
* Differential equation
.. math:: d[A]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
catalyst = description[0].strip(" ")
product = description[1].strip(" ")
self._set_species(catalyst, product)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{catalyst}]")
counter_product = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{product}]" in eq:
counter_product += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_product == 0:
self.differential_equations.append(f"dydt[V.{product}] = + v[{line_num:d}]")
def is_synthesized(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'A is synthesized'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf
* Differential equation
.. math:: d[A]/dt = + v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
chemical_species = description[0].strip(" ")
self._set_species(chemical_species)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}]")
counter_chemical_species = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{chemical_species}]" in eq:
counter_chemical_species += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if counter_chemical_species == 0:
self.differential_equations.append(f"dydt[V.{chemical_species}] = + v[{line_num:d}]")
def degrade(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'B degrades A'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [B]
* Differential equation
.. math:: d[A]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
protease = description[0].strip(" ")
protein = description[1].strip(" ")
self._set_species(protease, protein)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{protease}]")
counter_protein = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{protein}]" in eq:
counter_protein += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
if counter_protein == 0:
self.differential_equations.append(f"dydt[V.{protein}] = - v[{line_num:d}]")
def is_degraded(self, line_num: int, line: str) -> None:
"""
Examples
--------
>>> 'A is degraded'
Notes
-----
* Parameters
.. math:: kf
* Rate equation
.. math:: v = kf * [A]
* Differential equation
.. math:: d[A]/dt = - v
"""
description = self._preprocessing(sys._getframe().f_code.co_name, line_num, line, "kf")
chemical_species = description[0].strip(" ")
self._set_species(chemical_species)
self.reactions.append(f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{chemical_species}]")
counter_chemical_species = 0
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{chemical_species}]" in eq:
counter_chemical_species += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
if counter_chemical_species == 0:
self.differential_equations.append(f"dydt[V.{chemical_species}] = - v[{line_num:d}]")
def translocate(self, line_num: int, line: str) -> None:
r"""
Examples
--------
>>> 'A_at_cyt translocates from cytoplasm to nucleus (V_cyt, V_nuc) <--> A_at_nuc'
>>> 'A_at_cyt is translocated from cytoplasm to nucleus (V_cyt, V_nuc) <--> A_at_nuc'
Notes
-----
* Parameters
.. math:: kf, kr, (V_{pre}, V_{post})
* Rate equation
.. math:: v = kf * [A\_at\_pre] - kr * (V_{post} / V_{pre}) * [A\_at\_post]
* Differential equation
.. math::
d[A\_at\_pre]/dt = - v
d[A\_at\_post]/dt = + v * (V_{pre} / V_{post})
"""
description = self._preprocessing(
sys._getframe().f_code.co_name, line_num, line, "kf", "kr"
)
pre_translocation = description[0].strip(" ")
if " <--> " in description[1]:
post_translocation = description[1].split(" <--> ")[1].strip(" ")
elif " --> " in description[1]:
warnings.warn(
f"line{line_num:d}: Use '<-->' instead of '-->' for reversible reaction rules.",
FutureWarning,
)
post_translocation = description[1].split(" --> ")[1].strip(" ")
else:
raise ValueError(
f"line{line_num:d}: "
"Use '<-->' to specify the name of the species after translocation."
)
if pre_translocation == post_translocation:
raise ValueError(f"line{line_num:d}: {post_translocation} <- Use a different name.")
# Information about compartment volumes
if "(" in description[1] and ")" in description[1]:
[pre_volume, post_volume] = description[1].split("(")[-1].split(")")[0].split(",")
if not self._isfloat(pre_volume.strip(" ")) or not self._isfloat(
post_volume.strip(" ")
):
raise ValueError("pre_volume and post_volume must be float or int.")
else:
[pre_volume, post_volume] = ["1", "1"]
self._set_species(pre_translocation, post_translocation)
self.reactions.append(
f"v[{line_num:d}] = x[C.kf{line_num:d}] * y[V.{pre_translocation}] - "
f"x[C.kr{line_num:d}] * y[V.{post_translocation}]"
)
if float(pre_volume.strip(" ")) != float(post_volume.strip(" ")):
self.reactions[-1] = (
f"v[{line_num:d}] = "
f"x[C.kf{line_num:d}] * y[V.{pre_translocation}] - "
f"x[C.kr{line_num:d}] * "
f"({post_volume.strip()} / {pre_volume.strip()}) * "
f"y[V.{post_translocation}]"
)
counter_pre_translocation, counter_post_translocation = (0, 0)
for i, eq in enumerate(self.differential_equations):
if f"dydt[V.{pre_translocation}]" in eq:
counter_pre_translocation += 1
self.differential_equations[i] = eq + f" - v[{line_num:d}]"
elif f"dydt[V.{post_translocation}]" in eq:
counter_post_translocation += 1
self.differential_equations[i] = eq + f" + v[{line_num:d}]"
if float(pre_volume.strip(" ")) != float(post_volume.strip(" ")):
self.differential_equations[
i
] += f" * ({pre_volume.strip()} / {post_volume.strip()})"
if counter_pre_translocation == 0:
self.differential_equations.append(f"dydt[V.{pre_translocation}] = - v[{line_num:d}]")
if counter_post_translocation == 0:
self.differential_equations.append(f"dydt[V.{post_translocation}] = + v[{line_num:d}]")
if float(pre_volume.strip(" ")) != float(post_volume.strip(" ")):
self.differential_equations[
-1
] += f" * ({pre_volume.strip()} / {post_volume.strip()})"
def create_ode(self) -> None:
"""
Find a keyword in each line to identify the reaction rule and
construct an ODE model.
"""
with open(self.input_txt, encoding="utf-8") as f:
lines = f.readlines()
for line_num, line in enumerate(lines, start=1):
# Remove double spaces
while True:
if " " not in line:
break
else:
line = line.replace(" ", " ")
# Comment out
line = line.split("#")[0].rstrip(" ")
if not line.strip():
# Skip blank lines
continue
elif lines.count(line) > 1:
# Find duplicate lines
raise DuplicateError(
f"Reaction '{line}' is duplicated in lines "
+ ", ".join([str(i + 1) for i, rxn in enumerate(lines) if rxn == line])
)
# About observables
elif line.startswith("@obs "):
line = self._remove_prefix(line, "@obs ")
if line.count(":") != 1:
raise SyntaxError(
f"line{line_num:d}: Missing colon\n"
"Should be `@obs <observable name>: <expression>`."
)
else:
self.obs_desc.append(line.split(":"))
# About simulation info.
elif line.startswith("@sim "):
line = self._remove_prefix(line, "@sim ")
if line.count(":") != 1:
raise SyntaxError(f"line{line_num:d}: Missing colon")
else:
if line.startswith("tspan"):
t_info = line.split(":")[-1].strip()
if "[" in t_info and "]" in t_info:
[t0, tf] = t_info.split("[")[-1].split("]")[0].split(",")
if t0.strip(" ").isdecimal() and tf.strip(" ").isdecimal():
self.sim_tspan.append(t0)
self.sim_tspan.append(tf)
else:
raise TypeError("@sim tspan: [t0, tf] must be a list of integers.")
else:
raise ValueError(
"`tspan` must be a two element vector [t0, tf] "
"specifying the initial and final times."
)
elif line.startswith("unperturbed"):
self.sim_unperturbed += line.split(":")[-1].strip()
elif line.startswith("condition "):
self.sim_conditions.append(
self._remove_prefix(line, "condition ").split(":")
)
else:
raise ValueError(
f"(line{line_num:d}) Available options are: "
"'@sim tspan:', '@sim unperturbed:', or '@sim condition XXX:'."
)
# Detect reaction rule
else:
for reaction_rule, words in self.rule_words.items():
if any([self._remove_prepositions(word) in line for word in words]):
exec("self." + reaction_rule + "(line_num, line)")
break
else:
unregistered_rule = self._get_partial_similarity(line)
raise ValueError(
f"Unregistered words in line{line_num:d}: {line}"
+ (
f"\nMaybe: '{unregistered_rule.expected}'."
if unregistered_rule.expected is not None
else ""
)
)
| StarcoderdataPython |
326590 | from utils.db import *
from infiniti.params import *
import os
from utils.helpers import *
class Address(object):
incoming_value = 0
outgoing_value = 0
utxo = []
stxo = []
pubkey = None
address = None
wallet = None
def __init__(self,address=None,public_key = None,wallet_name=None):
self.pubkey = public_key
self.address = address
self.wallet = wallet_name
def current_balance(self):
return self.incoming_value - self.outgoing_value
def _save_addr_db(self):
path = join_path(DATA_PATH,NETWORK)
addrdb = open_db(join_path(path,'addresses'))
addrdb.put(self.address,"{0}|{1}|{2}".format(self.wallet,str(self.incoming_value),str(self.outgoing_value)))
def _save_utxo_db(self):
wb = writebatch()
if len(self.stxo)>0:
for stxo in self.stxo:
wb.delete(stxo)
for utxo in self.utxo:
wb.put("{0}.{1}".format(utxo[2],utxo[0]),"{0}|{1}".format(self.address,str(utxo[1])))
path = join_path(DATA_PATH,NETWORK)
utxodb = open_db(join_path(path,'utxo'))
utxodb.write(wb)
def save(self):
self._save_addr_db()
self._save_utxo_db()
def __repr__(self):
return self.address | StarcoderdataPython |
3226396 | from utlis.rank import setrank,isrank,remrank,remsudos,setsudo, GPranks
from utlis.send import Name,Glang
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re,json
import importlib
def delete(client, message,redis):
type = message.chat.type
userID = message.from_user.id
userFN = message.from_user.first_name
chatID = message.chat.id
rank = isrank(redis,userID,chatID)
if message.text :
text = message.text
elif message.caption:
text = message.caption
else:
text = 0
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if redis.sismember("{}Nbot:restricteds".format(BOT_ID),userID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if redis.sismember("{}Nbot:bans".format(BOT_ID),userID):
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
if redis.sismember(f"{BOT_ID}Nbot:{chatID}:muteusers",userID) and (rank is False or rank is 0):
message.delete()
if text :
if text == c.kickme and not redis.sismember("{}Nbot:kickme".format(BOT_ID),chatID):
GetGprank = GPranks(userID,chatID)
if GetGprank == "member":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.yes,callback_data=json.dumps(["kickme-yes","",userID])),InlineKeyboardButton(r.no,callback_data=json.dumps(["kickme-no","",userID])),]])
Bot("sendMessage",{"chat_id":chatID,"text":r.kickme,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":reply_markup})
if re.findall("[Hh][Tt][Tt][Pp][Ss]:/|[Hh][Tt][Tt][Pp]://|.[Ii][Rr]|.[Cc][Oo][Mm]|.[Oo][Rr][Gg]|.[Ii][Nn][Ff][Oo]|[Ww][Ww][Ww]|.[Tt][Kk]|.[Mm][Ee]", text):
if redis.sismember("{}Nbot:Llink".format(BOT_ID),chatID): #1
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Llink:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if re.findall('@', text):
if redis.sismember("{}Nbot:Lusername".format(BOT_ID),chatID):#2
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lusername:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.forward_date:
if redis.sismember("{}Nbot:Lfwd".format(BOT_ID),chatID):#18
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lfwd:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if re.findall('#', text):
if redis.sismember("{}Nbot:Ltag".format(BOT_ID),chatID):#3
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Ltag:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if re.findall("[a-zA-Z0-9$@$!%*?&#^-_. +]+", text):
if redis.sismember("{}Nbot:Lenglish".format(BOT_ID),chatID):#4
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lenglish:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if re.findall("[ا-ي٠-٩]", text):
if redis.sismember("{}Nbot:Larabic".format(BOT_ID),chatID):#5
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Larabic:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
Nlongtext = (redis.get("{}Nbot:Nlongtext".format(BOT_ID)) or 250)
if len(text) >= Nlongtext:
if redis.sismember("{}Nbot:Llongtext".format(BOT_ID),chatID):#2
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Llongtext:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
li = redis.smembers("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
for word in li:
if re.findall(word, text):
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
break
# text ^
if message.entities :
if redis.sismember("{}Nbot:Lmarkdown".format(BOT_ID),chatID):#6
for entitie in message.entities:
if entitie.type is "text_link":
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lmarkdown:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
break
if message.via_bot:
if redis.sismember("{}Nbot:Linline".format(BOT_ID),chatID):#7
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Linline:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.reply_markup:
if redis.sismember("{}Nbot:Linline".format(BOT_ID),chatID):
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Linline:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.sticker:
if redis.sismember("{}Nbot:Lsticker".format(BOT_ID),chatID):#8
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lsticker:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
elif redis.sismember("{}Nbot:{}:blockSTICKERs".format(BOT_ID,chatID),message.sticker.file_id):
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if message.animation:
if redis.sismember("{}Nbot:Lgifs".format(BOT_ID),chatID):#9
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lgifs:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
elif redis.sismember("{}Nbot:{}:blockanimations".format(BOT_ID,chatID),message.animation.file_id):
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if message.audio:
if redis.sismember("{}Nbot:Lmusic".format(BOT_ID),chatID):#10
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lmusic:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.voice:
if redis.sismember("{}Nbot:Lvoice".format(BOT_ID),chatID):#11
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lvoice:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.video:
if redis.sismember("{}Nbot:Lvideo".format(BOT_ID),chatID):#12
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lvideo:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.document:
if redis.sismember("{}Nbot:Lfiles".format(BOT_ID),chatID):#13
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lfiles:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.photo:
if redis.sismember("{}Nbot:Lphoto".format(BOT_ID),chatID):#14
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lphoto:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
elif redis.sismember("{}Nbot:{}:blockphotos".format(BOT_ID,chatID),message.photo.file_id):
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if message.contact:
if redis.sismember("{}Nbot:Lcontact".format(BOT_ID),chatID):#15
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lcontact:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.new_chat_members:
if message.new_chat_members[0].is_bot:
if redis.sismember("{}Nbot:Lbots".format(BOT_ID),chatID):#16
first_name = message.new_chat_members[0].first_name
username = message.new_chat_members[0].username
Bot("kickChatMember",{"chat_id":chatID,"user_id":message.new_chat_members[0].id})
Bot("sendMessage",{"chat_id":chatID,"text":r.kickbotadd.format(username,first_name),"reply_to_message_id":message.message_id,"parse_mode":"html"})
if redis.sismember("{}Nbot:Ljoin".format(BOT_ID),chatID):#17
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if message.forward_date:
if redis.sismember("{}Nbot:Lfwd".format(BOT_ID),chatID):#18
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lfwd:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if message.video_note:
if redis.sismember("{}Nbot:Lnote".format(BOT_ID),chatID):#19
Bot("deleteMessage",{"chat_id":chatID,"message_id":message.message_id})
if redis.sismember("{}Nbot:Lnote:res".format(BOT_ID),chatID):
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if redis.sismember("{}Nbot:Lflood".format(BOT_ID),chatID) :#20
Max_msg = int((redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10))
Time_ck = int((redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3))
User_msg = int((redis.get("{}Nbot:{}:{}:flood".format(BOT_ID,chatID,userID)) or 1))
if User_msg > Max_msg:
GetGprank = GPranks(userID,chatID)
if GetGprank == "member":
if redis.hexists("{}Nbot:floodset".format(BOT_ID),chatID):
get = redis.hget("{}Nbot:floodset".format(BOT_ID),chatID)
else:
get = "res"
if get == "res":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": userId,"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,
"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,"can_invite_users": 0,})
if get == "ban":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
redis.sadd("{}Nbot:{}:restricteds".format(BOT_ID,chatID),userID)
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
Bot("sendMessage",{"chat_id":chatID,"text":r.TKflood.format(BY,Max_msg,Time_ck),"parse_mode":"html"})
redis.setex("{}Nbot:{}:{}:flood".format(BOT_ID,chatID,userID), Time_ck, User_msg+1)
| StarcoderdataPython |
11232422 | from os.path import join, abspath, dirname
from setuptools import setup, find_packages
_here = abspath(dirname(__file__))
with open(join(_here, "./README.md")) as f:
readme = f.read()
setup(
name="jac_format",
version="0.1.4",
description="JAC (JSON as CSV) Format Conversion",
long_description=readme,
long_description_content_type="text/markdown",
author="UniversalDataTool",
author_email="<EMAIL>",
url="https://github.com/UniversalDataTool/jac-format",
license="MIT",
packages=["jac_format"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
],
)
| StarcoderdataPython |
11222485 | # Copyright © 2021 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Splunk Search service
Use the Search service in Splunk Cloud Services to dispatch, review, and manage searches and search jobs. You can finalize or cancel jobs, retrieve search results, and request search-related configurations from the Metadata Catalog service in Splunk Cloud Services.
OpenAPI spec version: v3alpha1
Generated by: https://openapi-generator.tech
"""
from datetime import datetime
from typing import List, Dict
from splunk_sdk.common.sscmodel import SSCModel
from splunk_sdk.base_client import dictify, inflate
from enum import Enum
class Dataset(SSCModel):
from_dict_handlers = dict()
@staticmethod
def _from_dict(model: dict) -> "Dataset":
def default_handler(model: dict) -> "Dataset":
instance = Dataset.__new__(Dataset)
instance._attrs = model
return instance
kind = model['kind']
handler = Dataset.from_dict_handlers.get(kind, default_handler)
return handler(model)
def __init__(self, appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, created: "str" = None, createdby: "str" = None, description: "str" = None, id: "str" = None, modified: "str" = None, modifiedby: "str" = None, name: "str" = None, namespace: "str" = None, owner: "str" = None, resourcename: "str" = None, summary: "str" = None, title: "str" = None, **extra):
"""Dataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if description is not None:
self._attrs["description"] = description
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this Dataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this Dataset.
The date and time object was created.
:param created: The created of this Dataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this Dataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this Dataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this Dataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this Dataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this Dataset.
A unique dataset ID.
:param id: The id of this Dataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this Dataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this Dataset.
The date and time object was modified.
:param modified: The modified of this Dataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this Dataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this Dataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this Dataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this Dataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this Dataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this Dataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this Dataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this Dataset.
The name of the object's owner.
:param owner: The owner of this Dataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this Dataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this Dataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this Dataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this Dataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this Dataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this Dataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this Dataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this Dataset.
AppClinetId of the modifier app of the dataset.
:param appclientidmodifiedby: The appclientidmodifiedby of this Dataset.
:type: str
"""
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
@property
def description(self) -> "str":
""" Gets the description of this Dataset.
Detailed description of the dataset.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this Dataset.
Detailed description of the dataset.
:param description: The description of this Dataset.
:type: str
"""
self._attrs["description"] = description
@property
def namespace(self) -> "str":
""" Gets the namespace of this Dataset.
The name of the namespace that contains the dataset.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this Dataset.
The name of the namespace that contains the dataset.
:param namespace: The namespace of this Dataset.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def summary(self) -> "str":
""" Gets the summary of this Dataset.
Summary of the dataset's purpose.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this Dataset.
Summary of the dataset's purpose.
:param summary: The summary of this Dataset.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this Dataset.
The title of the dataset. Does not have to be unique.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this Dataset.
The title of the dataset. Does not have to be unique.
:param title: The title of this Dataset.
:type: str
"""
self._attrs["title"] = title
def to_dict(self):
raise NotImplementedError()
class DatasetPATCH(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "DatasetPATCH":
instance = DatasetPATCH.__new__(DatasetPATCH)
instance._attrs = model
return instance
def __init__(self, module: "str" = None, name: "str" = None, owner: "str" = None, **extra):
"""DatasetPATCH"""
self._attrs = dict()
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
for k, v in extra.items():
self._attrs[k] = v
@property
def module(self) -> "str":
""" Gets the module of this DatasetPATCH.
The name of module to reassign dataset into.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this DatasetPATCH.
The name of module to reassign dataset into.
:param module: The module of this DatasetPATCH.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this DatasetPATCH.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this DatasetPATCH.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this DatasetPATCH.
:type: str
"""
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this DatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this DatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
:param owner: The owner of this DatasetPATCH.
:type: str
"""
self._attrs["owner"] = owner
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FieldPOST(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "FieldPOST":
instance = FieldPOST.__new__(FieldPOST)
instance._attrs = model
return instance
def __init__(self, datatype: "FieldDataType" = None, description: "str" = None, fieldtype: "FieldType" = None, indexed: "bool" = None, name: "str" = None, prevalence: "FieldPrevalence" = None, summary: "str" = None, title: "str" = None, **extra):
"""FieldPOST"""
self._attrs = dict()
if datatype is not None:
self._attrs["datatype"] = datatype
if description is not None:
self._attrs["description"] = description
if fieldtype is not None:
self._attrs["fieldtype"] = fieldtype
if indexed is not None:
self._attrs["indexed"] = indexed
if name is not None:
self._attrs["name"] = name
if prevalence is not None:
self._attrs["prevalence"] = prevalence
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
for k, v in extra.items():
self._attrs[k] = v
@property
def datatype(self) -> "FieldDataType":
""" Gets the datatype of this FieldPOST.
"""
return FieldDataType.from_value(self._attrs.get("datatype"))
@datatype.setter
def datatype(self, datatype: "FieldDataType"):
"""Sets the datatype of this FieldPOST.
:param datatype: The datatype of this FieldPOST.
:type: FieldDataType
"""
if isinstance(datatype, Enum):
self._attrs["datatype"] = datatype.value
else:
self._attrs["datatype"] = datatype # If you supply a string, we presume you know the service will take it.
@property
def description(self) -> "str":
""" Gets the description of this FieldPOST.
The field description.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this FieldPOST.
The field description.
:param description: The description of this FieldPOST.
:type: str
"""
self._attrs["description"] = description
@property
def fieldtype(self) -> "FieldType":
""" Gets the fieldtype of this FieldPOST.
"""
return FieldType.from_value(self._attrs.get("fieldtype"))
@fieldtype.setter
def fieldtype(self, fieldtype: "FieldType"):
"""Sets the fieldtype of this FieldPOST.
:param fieldtype: The fieldtype of this FieldPOST.
:type: FieldType
"""
if isinstance(fieldtype, Enum):
self._attrs["fieldtype"] = fieldtype.value
else:
self._attrs["fieldtype"] = fieldtype # If you supply a string, we presume you know the service will take it.
@property
def indexed(self) -> "bool":
""" Gets the indexed of this FieldPOST.
Whether or not the field has been indexed.
"""
return self._attrs.get("indexed")
@indexed.setter
def indexed(self, indexed: "bool"):
"""Sets the indexed of this FieldPOST.
Whether or not the field has been indexed.
:param indexed: The indexed of this FieldPOST.
:type: bool
"""
self._attrs["indexed"] = indexed
@property
def name(self) -> "str":
""" Gets the name of this FieldPOST.
The field name.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FieldPOST.
The field name.
:param name: The name of this FieldPOST.
:type: str
"""
self._attrs["name"] = name
@property
def prevalence(self) -> "FieldPrevalence":
""" Gets the prevalence of this FieldPOST.
"""
return FieldPrevalence.from_value(self._attrs.get("prevalence"))
@prevalence.setter
def prevalence(self, prevalence: "FieldPrevalence"):
"""Sets the prevalence of this FieldPOST.
:param prevalence: The prevalence of this FieldPOST.
:type: FieldPrevalence
"""
if isinstance(prevalence, Enum):
self._attrs["prevalence"] = prevalence.value
else:
self._attrs["prevalence"] = prevalence # If you supply a string, we presume you know the service will take it.
@property
def summary(self) -> "str":
""" Gets the summary of this FieldPOST.
The field summary.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this FieldPOST.
The field summary.
:param summary: The summary of this FieldPOST.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this FieldPOST.
The field title.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this FieldPOST.
The field title.
:param title: The title of this FieldPOST.
:type: str
"""
self._attrs["title"] = title
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FieldDataType(str, Enum):
DATE = "DATE"
NUMBER = "NUMBER"
OBJECT_ID = "OBJECT_ID"
STRING = "STRING"
UNKNOWN = "UNKNOWN"
@staticmethod
def from_value(value: str):
if value == "DATE":
return FieldDataType.DATE
if value == "NUMBER":
return FieldDataType.NUMBER
if value == "OBJECT_ID":
return FieldDataType.OBJECT_ID
if value == "STRING":
return FieldDataType.STRING
if value == "UNKNOWN":
return FieldDataType.UNKNOWN
class FieldType(str, Enum):
DIMENSION = "DIMENSION"
MEASURE = "MEASURE"
UNKNOWN = "UNKNOWN"
@staticmethod
def from_value(value: str):
if value == "DIMENSION":
return FieldType.DIMENSION
if value == "MEASURE":
return FieldType.MEASURE
if value == "UNKNOWN":
return FieldType.UNKNOWN
class FieldPrevalence(str, Enum):
ALL = "ALL"
SOME = "SOME"
UNKNOWN = "UNKNOWN"
@staticmethod
def from_value(value: str):
if value == "ALL":
return FieldPrevalence.ALL
if value == "SOME":
return FieldPrevalence.SOME
if value == "UNKNOWN":
return FieldPrevalence.UNKNOWN
class DatasetPOST(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "DatasetPOST":
instance = DatasetPOST.__new__(DatasetPOST)
instance._attrs = model
return instance
def __init__(self, name: "str", fields: "List[FieldPOST]" = None, id: "str" = None, module: "str" = None, **extra):
"""DatasetPOST"""
self._attrs = dict()
if name is not None:
self._attrs["name"] = name
if fields is not None:
self._attrs["fields"] = fields
if id is not None:
self._attrs["id"] = id
if module is not None:
self._attrs["module"] = module
for k, v in extra.items():
self._attrs[k] = v
@property
def name(self) -> "str":
""" Gets the name of this DatasetPOST.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this DatasetPOST.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this DatasetPOST.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def fields(self) -> "List[FieldPOST]":
""" Gets the fields of this DatasetPOST.
The fields to be associated with this dataset.
"""
return [FieldPOST._from_dict(i) for i in self._attrs.get("fields")]
@fields.setter
def fields(self, fields: "List[FieldPOST]"):
"""Sets the fields of this DatasetPOST.
The fields to be associated with this dataset.
:param fields: The fields of this DatasetPOST.
:type: List[FieldPOST]
"""
self._attrs["fields"] = fields
@property
def id(self) -> "str":
""" Gets the id of this DatasetPOST.
A unique dataset ID. Random ID used if not provided.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this DatasetPOST.
A unique dataset ID. Random ID used if not provided.
:param id: The id of this DatasetPOST.
:type: str
"""
self._attrs["id"] = id
@property
def module(self) -> "str":
""" Gets the module of this DatasetPOST.
The name of the module to create the new dataset in.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this DatasetPOST.
The name of the module to create the new dataset in.
:param module: The module of this DatasetPOST.
:type: str
"""
self._attrs["module"] = module
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class TypeEnum(str, Enum):
INFO = "INFO"
DEBUG = "DEBUG"
FATAL = "FATAL"
ERROR = "ERROR"
@staticmethod
def from_value(value: str):
if value == "INFO":
return TypeEnum.INFO
if value == "DEBUG":
return TypeEnum.DEBUG
if value == "FATAL":
return TypeEnum.FATAL
if value == "ERROR":
return TypeEnum.ERROR
class Message(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Message":
instance = Message.__new__(Message)
instance._attrs = model
return instance
def __init__(self, text: "str" = None, type: "str" = None, **extra):
"""Message"""
self._attrs = dict()
if text is not None:
self._attrs["text"] = text
if type is not None:
self._attrs["type"] = type
for k, v in extra.items():
self._attrs[k] = v
@property
def text(self) -> "str":
""" Gets the text of this Message.
"""
return self._attrs.get("text")
@text.setter
def text(self, text: "str"):
"""Sets the text of this Message.
:param text: The text of this Message.
:type: str
"""
self._attrs["text"] = text
@property
def type(self) -> "TypeEnum":
""" Gets the type of this Message.
"""
return TypeEnum.from_value(self._attrs.get("type"))
@type.setter
def type(self, type: "str"):
"""Sets the type of this Message.
:param type: The type of this Message.
:type: str
"""
if isinstance(type, Enum):
self._attrs["type"] = type.value
else:
self._attrs["type"] = type # If you supply a string, we presume you know the service will take it.
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class QueryParameters(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "QueryParameters":
instance = QueryParameters.__new__(QueryParameters)
instance._attrs = model
return instance
def __init__(self, earliest: "str" = '-24h@h', latest: "str" = 'now', relative_time_anchor: "datetime" = None, timezone: "object" = None, **extra):
"""QueryParameters"""
self._attrs = dict()
if earliest is not None:
self._attrs["earliest"] = earliest
if latest is not None:
self._attrs["latest"] = latest
if relative_time_anchor is not None:
self._attrs["relativeTimeAnchor"] = relative_time_anchor
if timezone is not None:
self._attrs["timezone"] = timezone
for k, v in extra.items():
self._attrs[k] = v
@property
def earliest(self) -> "str":
""" Gets the earliest of this QueryParameters.
The earliest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
"""
return self._attrs.get("earliest")
@earliest.setter
def earliest(self, earliest: "str"):
"""Sets the earliest of this QueryParameters.
The earliest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
:param earliest: The earliest of this QueryParameters.
:type: str
"""
self._attrs["earliest"] = earliest
@property
def latest(self) -> "str":
""" Gets the latest of this QueryParameters.
The latest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
"""
return self._attrs.get("latest")
@latest.setter
def latest(self, latest: "str"):
"""Sets the latest of this QueryParameters.
The latest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2019-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
:param latest: The latest of this QueryParameters.
:type: str
"""
self._attrs["latest"] = latest
@property
def relative_time_anchor(self) -> "datetime":
""" Gets the relative_time_anchor of this QueryParameters.
Relative values for the 'earliest' and 'latest' parameters snap to the unit that you specify. For example, if 'earliest' is set to -d@d, the unit is day. If the 'relativeTimeAnchor' is is set to '2020-10-05T13:15:30Z' then 'resolvedEarliest' is snapped to '2020-10-05T00:00:00Z', which is the day. Hours, minutes, and seconds are dropped. If no 'relativeTimeAnchor' is specified, the default value is set to the time the search job was created.
"""
return self._attrs.get("relativeTimeAnchor")
@relative_time_anchor.setter
def relative_time_anchor(self, relative_time_anchor: "datetime"):
"""Sets the relative_time_anchor of this QueryParameters.
Relative values for the 'earliest' and 'latest' parameters snap to the unit that you specify. For example, if 'earliest' is set to -d@d, the unit is day. If the 'relativeTimeAnchor' is is set to '2020-10-05T13:15:30Z' then 'resolvedEarliest' is snapped to '2020-10-05T00:00:00Z', which is the day. Hours, minutes, and seconds are dropped. If no 'relativeTimeAnchor' is specified, the default value is set to the time the search job was created.
:param relative_time_anchor: The relative_time_anchor of this QueryParameters.
:type: datetime
"""
self._attrs["relativeTimeAnchor"] = relative_time_anchor
@property
def timezone(self) -> "object":
""" Gets the timezone of this QueryParameters.
The timezone that relative time specifiers are based off of. Timezone only applies to relative time literals for 'earliest' and 'latest'. If UNIX time or UTC format is used for 'earliest' and 'latest', this field is ignored. For the list of supported timezone formats, see https://docs.splunk.com/Documentation/Splunk/latest/Data/Applytimezoneoffsetstotimestamps#zoneinfo_.28TZ.29_database type: string default: \"GMT\"
"""
return self._attrs.get("timezone")
@timezone.setter
def timezone(self, timezone: "object"):
"""Sets the timezone of this QueryParameters.
The timezone that relative time specifiers are based off of. Timezone only applies to relative time literals for 'earliest' and 'latest'. If UNIX time or UTC format is used for 'earliest' and 'latest', this field is ignored. For the list of supported timezone formats, see https://docs.splunk.com/Documentation/Splunk/latest/Data/Applytimezoneoffsetstotimestamps#zoneinfo_.28TZ.29_database type: string default: \"GMT\"
:param timezone: The timezone of this QueryParameters.
:type: object
"""
self._attrs["timezone"] = timezone
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SearchStatus(str, Enum):
RUNNING = "running"
DONE = "done"
CANCELED = "canceled"
FAILED = "failed"
@staticmethod
def from_value(value: str):
if value == "running":
return SearchStatus.RUNNING
if value == "done":
return SearchStatus.DONE
if value == "canceled":
return SearchStatus.CANCELED
if value == "failed":
return SearchStatus.FAILED
class DeleteSearchJob(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "DeleteSearchJob":
instance = DeleteSearchJob.__new__(DeleteSearchJob)
instance._attrs = model
return instance
def __init__(self, index: "str", module: "str", predicate: "str", allow_side_effects: "bool" = True, collect_event_summary: "bool" = False, collect_field_summary: "bool" = False, collect_time_buckets: "bool" = False, completion_time: "str" = None, dispatch_time: "str" = None, enable_preview: "bool" = False, extract_all_fields: "bool" = False, extract_fields: "str" = '', max_time: "int" = 3600, messages: "List[Message]" = None, name: "str" = None, percent_complete: "int" = 0, preview_available: "str" = 'false', query: "str" = None, query_parameters: "QueryParameters" = None, required_freshness: "int" = 0, resolved_earliest: "str" = None, resolved_latest: "str" = None, results_available: "int" = 0, results_preview_available: "int" = 0, sid: "str" = None, status: "SearchStatus" = None, **extra):
"""DeleteSearchJob"""
self._attrs = dict()
if index is not None:
self._attrs["index"] = index
if module is not None:
self._attrs["module"] = module
if predicate is not None:
self._attrs["predicate"] = predicate
if allow_side_effects is not None:
self._attrs["allowSideEffects"] = allow_side_effects
if collect_event_summary is not None:
self._attrs["collectEventSummary"] = collect_event_summary
if collect_field_summary is not None:
self._attrs["collectFieldSummary"] = collect_field_summary
if collect_time_buckets is not None:
self._attrs["collectTimeBuckets"] = collect_time_buckets
if completion_time is not None:
self._attrs["completionTime"] = completion_time
if dispatch_time is not None:
self._attrs["dispatchTime"] = dispatch_time
if enable_preview is not None:
self._attrs["enablePreview"] = enable_preview
if extract_all_fields is not None:
self._attrs["extractAllFields"] = extract_all_fields
if extract_fields is not None:
self._attrs["extractFields"] = extract_fields
if max_time is not None:
self._attrs["maxTime"] = max_time
if messages is not None:
self._attrs["messages"] = messages
if name is not None:
self._attrs["name"] = name
if percent_complete is not None:
self._attrs["percentComplete"] = percent_complete
if preview_available is not None:
self._attrs["previewAvailable"] = preview_available
if query is not None:
self._attrs["query"] = query
if query_parameters is not None:
self._attrs["queryParameters"] = query_parameters.to_dict()
if required_freshness is not None:
self._attrs["requiredFreshness"] = required_freshness
if resolved_earliest is not None:
self._attrs["resolvedEarliest"] = resolved_earliest
if resolved_latest is not None:
self._attrs["resolvedLatest"] = resolved_latest
if results_available is not None:
self._attrs["resultsAvailable"] = results_available
if results_preview_available is not None:
self._attrs["resultsPreviewAvailable"] = results_preview_available
if sid is not None:
self._attrs["sid"] = sid
if status is not None:
self._attrs["status"] = status
for k, v in extra.items():
self._attrs[k] = v
@property
def index(self) -> "str":
""" Gets the index of this DeleteSearchJob.
The index to delete events from.
"""
return self._attrs.get("index")
@index.setter
def index(self, index: "str"):
"""Sets the index of this DeleteSearchJob.
The index to delete events from.
:param index: The index of this DeleteSearchJob.
:type: str
"""
if index is None:
raise ValueError("Invalid value for `index`, must not be `None`")
self._attrs["index"] = index
@property
def module(self) -> "str":
""" Gets the module of this DeleteSearchJob.
The module to run the delete search job in. The default module is used if module field is empty.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this DeleteSearchJob.
The module to run the delete search job in. The default module is used if module field is empty.
:param module: The module of this DeleteSearchJob.
:type: str
"""
if module is None:
raise ValueError("Invalid value for `module`, must not be `None`")
self._attrs["module"] = module
@property
def predicate(self) -> "str":
""" Gets the predicate of this DeleteSearchJob.
The predicate expression that identifies the events to delete from the index. This expression must return true or false. To delete all events from the index, specify \"true\" instead of an expression.
"""
return self._attrs.get("predicate")
@predicate.setter
def predicate(self, predicate: "str"):
"""Sets the predicate of this DeleteSearchJob.
The predicate expression that identifies the events to delete from the index. This expression must return true or false. To delete all events from the index, specify \"true\" instead of an expression.
:param predicate: The predicate of this DeleteSearchJob.
:type: str
"""
if predicate is None:
raise ValueError("Invalid value for `predicate`, must not be `None`")
self._attrs["predicate"] = predicate
@property
def allow_side_effects(self) -> "bool":
""" Gets the allow_side_effects of this DeleteSearchJob.
Specifies that the delete search job will contain side effects, with possible security risks.
"""
return self._attrs.get("allowSideEffects")
@allow_side_effects.setter
def allow_side_effects(self, allow_side_effects: "bool"):
"""Sets the allow_side_effects of this DeleteSearchJob.
Specifies that the delete search job will contain side effects, with possible security risks.
:param allow_side_effects: The allow_side_effects of this DeleteSearchJob.
:type: bool
"""
self._attrs["allowSideEffects"] = allow_side_effects
@property
def collect_event_summary(self) -> "bool":
""" Gets the collect_event_summary of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
"""
return self._attrs.get("collectEventSummary")
@collect_event_summary.setter
def collect_event_summary(self, collect_event_summary: "bool"):
"""Sets the collect_event_summary of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
:param collect_event_summary: The collect_event_summary of this DeleteSearchJob.
:type: bool
"""
self._attrs["collectEventSummary"] = collect_event_summary
@property
def collect_field_summary(self) -> "bool":
""" Gets the collect_field_summary of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
"""
return self._attrs.get("collectFieldSummary")
@collect_field_summary.setter
def collect_field_summary(self, collect_field_summary: "bool"):
"""Sets the collect_field_summary of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
:param collect_field_summary: The collect_field_summary of this DeleteSearchJob.
:type: bool
"""
self._attrs["collectFieldSummary"] = collect_field_summary
@property
def collect_time_buckets(self) -> "bool":
""" Gets the collect_time_buckets of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
"""
return self._attrs.get("collectTimeBuckets")
@collect_time_buckets.setter
def collect_time_buckets(self, collect_time_buckets: "bool"):
"""Sets the collect_time_buckets of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
:param collect_time_buckets: The collect_time_buckets of this DeleteSearchJob.
:type: bool
"""
self._attrs["collectTimeBuckets"] = collect_time_buckets
@property
def completion_time(self) -> "str":
""" Gets the completion_time of this DeleteSearchJob.
The time, in GMT, that the search job is finished. Empty if the search job has not completed.
"""
return self._attrs.get("completionTime")
@completion_time.setter
def completion_time(self, completion_time: "str"):
"""Sets the completion_time of this DeleteSearchJob.
The time, in GMT, that the search job is finished. Empty if the search job has not completed.
:param completion_time: The completion_time of this DeleteSearchJob.
:type: str
"""
self._attrs["completionTime"] = completion_time
@property
def dispatch_time(self) -> "str":
""" Gets the dispatch_time of this DeleteSearchJob.
The time, in GMT, that the search job is dispatched.
"""
return self._attrs.get("dispatchTime")
@dispatch_time.setter
def dispatch_time(self, dispatch_time: "str"):
"""Sets the dispatch_time of this DeleteSearchJob.
The time, in GMT, that the search job is dispatched.
:param dispatch_time: The dispatch_time of this DeleteSearchJob.
:type: str
"""
self._attrs["dispatchTime"] = dispatch_time
@property
def enable_preview(self) -> "bool":
""" Gets the enable_preview of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
"""
return self._attrs.get("enablePreview")
@enable_preview.setter
def enable_preview(self, enable_preview: "bool"):
"""Sets the enable_preview of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
:param enable_preview: The enable_preview of this DeleteSearchJob.
:type: bool
"""
self._attrs["enablePreview"] = enable_preview
@property
def extract_all_fields(self) -> "bool":
""" Gets the extract_all_fields of this DeleteSearchJob.
Specifies whether the Search service should extract all of the available fields in the data, including fields not mentioned in the SPL for the search job. Set to 'false' for better search peformance. The 'extractAllFields' parameter is deprecated as of version v3alpha1. Although this parameter continues to function, it might be removed in a future version. Use the 'extractFields' parameter instead.
"""
return self._attrs.get("extractAllFields")
@extract_all_fields.setter
def extract_all_fields(self, extract_all_fields: "bool"):
"""Sets the extract_all_fields of this DeleteSearchJob.
Specifies whether the Search service should extract all of the available fields in the data, including fields not mentioned in the SPL for the search job. Set to 'false' for better search peformance. The 'extractAllFields' parameter is deprecated as of version v3alpha1. Although this parameter continues to function, it might be removed in a future version. Use the 'extractFields' parameter instead.
:param extract_all_fields: The extract_all_fields of this DeleteSearchJob.
:type: bool
"""
self._attrs["extractAllFields"] = extract_all_fields
@property
def extract_fields(self) -> "str":
""" Gets the extract_fields of this DeleteSearchJob.
Specifies how the Search service should extract fields. Valid values include 'all', 'none', or 'indexed'. 'all' will extract all fields, 'indexed' will extract only indexed fields, and 'none' will extract only the default fields. This parameter overwrites the value of the 'extractAllFields' parameter. Set to 'none' for better search performance.
"""
return self._attrs.get("extractFields")
@extract_fields.setter
def extract_fields(self, extract_fields: "str"):
"""Sets the extract_fields of this DeleteSearchJob.
Specifies how the Search service should extract fields. Valid values include 'all', 'none', or 'indexed'. 'all' will extract all fields, 'indexed' will extract only indexed fields, and 'none' will extract only the default fields. This parameter overwrites the value of the 'extractAllFields' parameter. Set to 'none' for better search performance.
:param extract_fields: The extract_fields of this DeleteSearchJob.
:type: str
"""
self._attrs["extractFields"] = extract_fields
@property
def max_time(self) -> "int":
""" Gets the max_time of this DeleteSearchJob.
The amount of time, in seconds, to run the delete search job before finalizing the search. The maximum value is 3600 seconds (1 hour).
"""
return self._attrs.get("maxTime")
@max_time.setter
def max_time(self, max_time: "int"):
"""Sets the max_time of this DeleteSearchJob.
The amount of time, in seconds, to run the delete search job before finalizing the search. The maximum value is 3600 seconds (1 hour).
:param max_time: The max_time of this DeleteSearchJob.
:type: int
"""
self._attrs["maxTime"] = max_time
@property
def messages(self) -> "List[Message]":
""" Gets the messages of this DeleteSearchJob.
"""
return [Message._from_dict(i) for i in self._attrs.get("messages")]
@messages.setter
def messages(self, messages: "List[Message]"):
"""Sets the messages of this DeleteSearchJob.
:param messages: The messages of this DeleteSearchJob.
:type: List[Message]
"""
self._attrs["messages"] = messages
@property
def name(self) -> "str":
""" Gets the name of this DeleteSearchJob.
The name of the created search job.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this DeleteSearchJob.
The name of the created search job.
:param name: The name of this DeleteSearchJob.
:type: str
"""
self._attrs["name"] = name
@property
def percent_complete(self) -> "int":
""" Gets the percent_complete of this DeleteSearchJob.
An estimate of the percent of time remaining before the delete search job completes.
"""
return self._attrs.get("percentComplete")
@percent_complete.setter
def percent_complete(self, percent_complete: "int"):
"""Sets the percent_complete of this DeleteSearchJob.
An estimate of the percent of time remaining before the delete search job completes.
:param percent_complete: The percent_complete of this DeleteSearchJob.
:type: int
"""
self._attrs["percentComplete"] = percent_complete
@property
def preview_available(self) -> "str":
""" Gets the preview_available of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
"""
return self._attrs.get("previewAvailable")
@preview_available.setter
def preview_available(self, preview_available: "str"):
"""Sets the preview_available of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to false.
:param preview_available: The preview_available of this DeleteSearchJob.
:type: str
"""
self._attrs["previewAvailable"] = preview_available
@property
def query(self) -> "str":
""" Gets the query of this DeleteSearchJob.
The SPL search string that is generated based on index, module and predicate that are specified.
"""
return self._attrs.get("query")
@query.setter
def query(self, query: "str"):
"""Sets the query of this DeleteSearchJob.
The SPL search string that is generated based on index, module and predicate that are specified.
:param query: The query of this DeleteSearchJob.
:type: str
"""
self._attrs["query"] = query
@property
def query_parameters(self) -> "QueryParameters":
""" Gets the query_parameters of this DeleteSearchJob.
Represents parameters on the search job such as 'earliest' and 'latest'.
"""
return QueryParameters._from_dict(self._attrs["queryParameters"])
@query_parameters.setter
def query_parameters(self, query_parameters: "QueryParameters"):
"""Sets the query_parameters of this DeleteSearchJob.
Represents parameters on the search job such as 'earliest' and 'latest'.
:param query_parameters: The query_parameters of this DeleteSearchJob.
:type: QueryParameters
"""
self._attrs["queryParameters"] = query_parameters.to_dict()
@property
def required_freshness(self) -> "int":
""" Gets the required_freshness of this DeleteSearchJob.
This field does not apply to delete search jobs and is set to 0.
"""
return self._attrs.get("requiredFreshness")
@required_freshness.setter
def required_freshness(self, required_freshness: "int"):
"""Sets the required_freshness of this DeleteSearchJob.
This field does not apply to delete search jobs and is set to 0.
:param required_freshness: The required_freshness of this DeleteSearchJob.
:type: int
"""
self._attrs["requiredFreshness"] = required_freshness
@property
def resolved_earliest(self) -> "str":
""" Gets the resolved_earliest of this DeleteSearchJob.
The earliest time speciifed as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
"""
return self._attrs.get("resolvedEarliest")
@resolved_earliest.setter
def resolved_earliest(self, resolved_earliest: "str"):
"""Sets the resolved_earliest of this DeleteSearchJob.
The earliest time speciifed as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
:param resolved_earliest: The resolved_earliest of this DeleteSearchJob.
:type: str
"""
self._attrs["resolvedEarliest"] = resolved_earliest
@property
def resolved_latest(self) -> "str":
""" Gets the resolved_latest of this DeleteSearchJob.
The latest time specified as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
"""
return self._attrs.get("resolvedLatest")
@resolved_latest.setter
def resolved_latest(self, resolved_latest: "str"):
"""Sets the resolved_latest of this DeleteSearchJob.
The latest time specified as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
:param resolved_latest: The resolved_latest of this DeleteSearchJob.
:type: str
"""
self._attrs["resolvedLatest"] = resolved_latest
@property
def results_available(self) -> "int":
""" Gets the results_available of this DeleteSearchJob.
The number of results produced so far by the delete search job that are going to be deleted.
"""
return self._attrs.get("resultsAvailable")
@results_available.setter
def results_available(self, results_available: "int"):
"""Sets the results_available of this DeleteSearchJob.
The number of results produced so far by the delete search job that are going to be deleted.
:param results_available: The results_available of this DeleteSearchJob.
:type: int
"""
self._attrs["resultsAvailable"] = results_available
@property
def results_preview_available(self) -> "int":
""" Gets the results_preview_available of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to 0.
"""
return self._attrs.get("resultsPreviewAvailable")
@results_preview_available.setter
def results_preview_available(self, results_preview_available: "int"):
"""Sets the results_preview_available of this DeleteSearchJob.
This field does not apply to delete search jobs and is defaulted to 0.
:param results_preview_available: The results_preview_available of this DeleteSearchJob.
:type: int
"""
self._attrs["resultsPreviewAvailable"] = results_preview_available
@property
def sid(self) -> "str":
""" Gets the sid of this DeleteSearchJob.
The ID assigned to the delete search job.
"""
return self._attrs.get("sid")
@sid.setter
def sid(self, sid: "str"):
"""Sets the sid of this DeleteSearchJob.
The ID assigned to the delete search job.
:param sid: The sid of this DeleteSearchJob.
:type: str
"""
self._attrs["sid"] = sid
@property
def status(self) -> "SearchStatus":
""" Gets the status of this DeleteSearchJob.
"""
return SearchStatus.from_value(self._attrs.get("status"))
@status.setter
def status(self, status: "SearchStatus"):
"""Sets the status of this DeleteSearchJob.
:param status: The status of this DeleteSearchJob.
:type: SearchStatus
"""
if isinstance(status, Enum):
self._attrs["status"] = status.value
else:
self._attrs["status"] = status # If you supply a string, we presume you know the service will take it.
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FederatedConnection(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "FederatedConnection":
instance = FederatedConnection.__new__(FederatedConnection)
instance._attrs = model
return instance
def __init__(self, created: "str" = None, createdby: "str" = None, hostnameip: "str" = None, modified: "str" = None, modifiedby: "str" = None, name: "str" = None, port: "float" = None, serviceaccountuser: "str" = None, **extra):
"""FederatedConnection"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if hostnameip is not None:
self._attrs["hostnameip"] = hostnameip
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if port is not None:
self._attrs["port"] = port
if serviceaccountuser is not None:
self._attrs["serviceaccountuser"] = serviceaccountuser
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this FederatedConnection.
The timestamp when the federated connection was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this FederatedConnection.
The timestamp when the federated connection was created.
:param created: The created of this FederatedConnection.
:type: str
"""
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this FederatedConnection.
The user who created the federated connection.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this FederatedConnection.
The user who created the federated connection.
:param createdby: The createdby of this FederatedConnection.
:type: str
"""
self._attrs["createdby"] = createdby
@property
def hostnameip(self) -> "str":
""" Gets the hostnameip of this FederatedConnection.
The remote hostname to connect yo.
"""
return self._attrs.get("hostnameip")
@hostnameip.setter
def hostnameip(self, hostnameip: "str"):
"""Sets the hostnameip of this FederatedConnection.
The remote hostname to connect yo.
:param hostnameip: The hostnameip of this FederatedConnection.
:type: str
"""
self._attrs["hostnameip"] = hostnameip
@property
def modified(self) -> "str":
""" Gets the modified of this FederatedConnection.
The timestamp when the federated connection was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this FederatedConnection.
The timestamp when the federated connection was modified.
:param modified: The modified of this FederatedConnection.
:type: str
"""
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this FederatedConnection.
The user who last modified the federated connection.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this FederatedConnection.
The user who last modified the federated connection.
:param modifiedby: The modifiedby of this FederatedConnection.
:type: str
"""
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this FederatedConnection.
The name of the federated connection.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FederatedConnection.
The name of the federated connection.
:param name: The name of this FederatedConnection.
:type: str
"""
self._attrs["name"] = name
@property
def port(self) -> "float":
""" Gets the port of this FederatedConnection.
The remote port number.
"""
return self._attrs.get("port")
@port.setter
def port(self, port: "float"):
"""Sets the port of this FederatedConnection.
The remote port number.
:param port: The port of this FederatedConnection.
:type: float
"""
self._attrs["port"] = port
@property
def serviceaccountuser(self) -> "str":
""" Gets the serviceaccountuser of this FederatedConnection.
The username on the service account.
"""
return self._attrs.get("serviceaccountuser")
@serviceaccountuser.setter
def serviceaccountuser(self, serviceaccountuser: "str"):
"""Sets the serviceaccountuser of this FederatedConnection.
The username on the service account.
:param serviceaccountuser: The serviceaccountuser of this FederatedConnection.
:type: str
"""
self._attrs["serviceaccountuser"] = serviceaccountuser
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FederatedConnectionInput(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "FederatedConnectionInput":
instance = FederatedConnectionInput.__new__(FederatedConnectionInput)
instance._attrs = model
return instance
def __init__(self, hostnameip: "str" = None, name: "str" = None, port: "float" = None, serviceaccountpassword: "str" = None, serviceaccountuser: "str" = None, **extra):
"""FederatedConnectionInput"""
self._attrs = dict()
if hostnameip is not None:
self._attrs["hostnameip"] = hostnameip
if name is not None:
self._attrs["name"] = name
if port is not None:
self._attrs["port"] = port
if serviceaccountpassword is not None:
self._attrs["serviceaccountpassword"] = serviceaccountpassword
if serviceaccountuser is not None:
self._attrs["serviceaccountuser"] = serviceaccountuser
for k, v in extra.items():
self._attrs[k] = v
@property
def hostnameip(self) -> "str":
""" Gets the hostnameip of this FederatedConnectionInput.
The remote hostname to connect to.
"""
return self._attrs.get("hostnameip")
@hostnameip.setter
def hostnameip(self, hostnameip: "str"):
"""Sets the hostnameip of this FederatedConnectionInput.
The remote hostname to connect to.
:param hostnameip: The hostnameip of this FederatedConnectionInput.
:type: str
"""
self._attrs["hostnameip"] = hostnameip
@property
def name(self) -> "str":
""" Gets the name of this FederatedConnectionInput.
The name of the federated connection.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FederatedConnectionInput.
The name of the federated connection.
:param name: The name of this FederatedConnectionInput.
:type: str
"""
self._attrs["name"] = name
@property
def port(self) -> "float":
""" Gets the port of this FederatedConnectionInput.
The remote port number.
"""
return self._attrs.get("port")
@port.setter
def port(self, port: "float"):
"""Sets the port of this FederatedConnectionInput.
The remote port number.
:param port: The port of this FederatedConnectionInput.
:type: float
"""
self._attrs["port"] = port
@property
def serviceaccountpassword(self) -> "str":
""" Gets the serviceaccountpassword of this FederatedConnectionInput.
The password of the service account.
"""
return self._attrs.get("serviceaccountpassword")
@serviceaccountpassword.setter
def serviceaccountpassword(self, serviceaccountpassword: "str"):
"""Sets the serviceaccountpassword of this FederatedConnectionInput.
The password of the service account.
:param serviceaccountpassword: The serviceaccountpassword of this FederatedConnectionInput.
:type: str
"""
self._attrs["serviceaccountpassword"] = serviceaccountpassword
@property
def serviceaccountuser(self) -> "str":
""" Gets the serviceaccountuser of this FederatedConnectionInput.
The username on the service account.
"""
return self._attrs.get("serviceaccountuser")
@serviceaccountuser.setter
def serviceaccountuser(self, serviceaccountuser: "str"):
"""Sets the serviceaccountuser of this FederatedConnectionInput.
The username on the service account.
:param serviceaccountuser: The serviceaccountuser of this FederatedConnectionInput.
:type: str
"""
self._attrs["serviceaccountuser"] = serviceaccountuser
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FederatedDataset(Dataset):
@staticmethod
def _from_dict(model: dict) -> "FederatedDataset":
instance = FederatedDataset.__new__(FederatedDataset)
instance._attrs = model
return instance
def __init__(self, created: "str", createdby: "str", id: "str", modified: "str", modifiedby: "str", name: "str", owner: "str", resourcename: "str", appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, description: "str" = None, federated_connection: "str" = None, federated_dataset: "str" = None, federated_dataset_kind: "str" = None, namespace: "str" = None, summary: "str" = None, title: "str" = None, **extra):
"""FederatedDataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if description is not None:
self._attrs["description"] = description
if federated_connection is not None:
self._attrs["federatedConnection"] = federated_connection
if federated_dataset is not None:
self._attrs["federatedDataset"] = federated_dataset
if federated_dataset_kind is not None:
self._attrs["federatedDatasetKind"] = federated_dataset_kind
self._attrs["kind"] = "federated"
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this FederatedDataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this FederatedDataset.
The date and time object was created.
:param created: The created of this FederatedDataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this FederatedDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this FederatedDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this FederatedDataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this FederatedDataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this FederatedDataset.
A unique dataset ID.
:param id: The id of this FederatedDataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this FederatedDataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this FederatedDataset.
The date and time object was modified.
:param modified: The modified of this FederatedDataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this FederatedDataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this FederatedDataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this FederatedDataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this FederatedDataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FederatedDataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this FederatedDataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this FederatedDataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this FederatedDataset.
The name of the object's owner.
:param owner: The owner of this FederatedDataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this FederatedDataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this FederatedDataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this FederatedDataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this FederatedDataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this FederatedDataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this FederatedDataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this FederatedDataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this FederatedDataset.
AppClinetId of the modifier app of the dataset.
:param appclientidmodifiedby: The appclientidmodifiedby of this FederatedDataset.
:type: str
"""
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
@property
def description(self) -> "str":
""" Gets the description of this FederatedDataset.
Detailed description of the dataset.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this FederatedDataset.
Detailed description of the dataset.
:param description: The description of this FederatedDataset.
:type: str
"""
self._attrs["description"] = description
@property
def federated_connection(self) -> "str":
""" Gets the federated_connection of this FederatedDataset.
Connection information to connect to remote federated connection.
"""
return self._attrs.get("federatedConnection")
@federated_connection.setter
def federated_connection(self, federated_connection: "str"):
"""Sets the federated_connection of this FederatedDataset.
Connection information to connect to remote federated connection.
:param federated_connection: The federated_connection of this FederatedDataset.
:type: str
"""
self._attrs["federatedConnection"] = federated_connection
@property
def federated_dataset(self) -> "str":
""" Gets the federated_dataset of this FederatedDataset.
Dataset information in the remote instance.
"""
return self._attrs.get("federatedDataset")
@federated_dataset.setter
def federated_dataset(self, federated_dataset: "str"):
"""Sets the federated_dataset of this FederatedDataset.
Dataset information in the remote instance.
:param federated_dataset: The federated_dataset of this FederatedDataset.
:type: str
"""
self._attrs["federatedDataset"] = federated_dataset
@property
def federated_dataset_kind(self) -> "str":
""" Gets the federated_dataset_kind of this FederatedDataset.
Dataset kind information in the remote instance.
"""
return self._attrs.get("federatedDatasetKind")
@federated_dataset_kind.setter
def federated_dataset_kind(self, federated_dataset_kind: "str"):
"""Sets the federated_dataset_kind of this FederatedDataset.
Dataset kind information in the remote instance.
:param federated_dataset_kind: The federated_dataset_kind of this FederatedDataset.
:type: str
"""
self._attrs["federatedDatasetKind"] = federated_dataset_kind
@property
def kind(self) -> str:
return "federated"
@property
def namespace(self) -> "str":
""" Gets the namespace of this FederatedDataset.
The name of the namespace that contains the dataset.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this FederatedDataset.
The name of the namespace that contains the dataset.
:param namespace: The namespace of this FederatedDataset.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def summary(self) -> "str":
""" Gets the summary of this FederatedDataset.
Summary of the dataset's purpose.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this FederatedDataset.
Summary of the dataset's purpose.
:param summary: The summary of this FederatedDataset.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this FederatedDataset.
The title of the dataset. Does not have to be unique.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this FederatedDataset.
The title of the dataset. Does not have to be unique.
:param title: The title of this FederatedDataset.
:type: str
"""
self._attrs["title"] = title
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
Dataset.from_dict_handlers["federated"] = FederatedDataset._from_dict
class FederatedDatasetKind(str, Enum):
FEDERATED = "federated"
@staticmethod
def from_value(value: str):
if value == "federated":
return FederatedDatasetKind.FEDERATED
class FederatedDatasetPATCH(DatasetPATCH):
@staticmethod
def _from_dict(model: dict) -> "FederatedDatasetPATCH":
instance = FederatedDatasetPATCH.__new__(FederatedDatasetPATCH)
instance._attrs = model
return instance
def __init__(self, federated_connection: "str" = None, federated_dataset: "str" = None, federated_dataset_kind: "str" = None, kind: "FederatedDatasetKind" = None, module: "str" = None, name: "str" = None, owner: "str" = None, **extra):
"""FederatedDatasetPATCH"""
self._attrs = dict()
if federated_connection is not None:
self._attrs["federatedConnection"] = federated_connection
if federated_dataset is not None:
self._attrs["federatedDataset"] = federated_dataset
if federated_dataset_kind is not None:
self._attrs["federatedDatasetKind"] = federated_dataset_kind
if kind is not None:
self._attrs["kind"] = kind
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
for k, v in extra.items():
self._attrs[k] = v
@property
def federated_connection(self) -> "str":
""" Gets the federated_connection of this FederatedDatasetPATCH.
Connection information to connect to remote federated connection.
"""
return self._attrs.get("federatedConnection")
@federated_connection.setter
def federated_connection(self, federated_connection: "str"):
"""Sets the federated_connection of this FederatedDatasetPATCH.
Connection information to connect to remote federated connection.
:param federated_connection: The federated_connection of this FederatedDatasetPATCH.
:type: str
"""
self._attrs["federatedConnection"] = federated_connection
@property
def federated_dataset(self) -> "str":
""" Gets the federated_dataset of this FederatedDatasetPATCH.
Dataset information in the remote instance.
"""
return self._attrs.get("federatedDataset")
@federated_dataset.setter
def federated_dataset(self, federated_dataset: "str"):
"""Sets the federated_dataset of this FederatedDatasetPATCH.
Dataset information in the remote instance.
:param federated_dataset: The federated_dataset of this FederatedDatasetPATCH.
:type: str
"""
self._attrs["federatedDataset"] = federated_dataset
@property
def federated_dataset_kind(self) -> "str":
""" Gets the federated_dataset_kind of this FederatedDatasetPATCH.
Dataset kind information in the remote instance.
"""
return self._attrs.get("federatedDatasetKind")
@federated_dataset_kind.setter
def federated_dataset_kind(self, federated_dataset_kind: "str"):
"""Sets the federated_dataset_kind of this FederatedDatasetPATCH.
Dataset kind information in the remote instance.
:param federated_dataset_kind: The federated_dataset_kind of this FederatedDatasetPATCH.
:type: str
"""
self._attrs["federatedDatasetKind"] = federated_dataset_kind
@property
def kind(self) -> "FederatedDatasetKind":
""" Gets the kind of this FederatedDatasetPATCH.
"""
return FederatedDatasetKind.from_value(self._attrs.get("kind"))
@kind.setter
def kind(self, kind: "FederatedDatasetKind"):
"""Sets the kind of this FederatedDatasetPATCH.
:param kind: The kind of this FederatedDatasetPATCH.
:type: FederatedDatasetKind
"""
if isinstance(kind, Enum):
self._attrs["kind"] = kind.value
else:
self._attrs["kind"] = kind # If you supply a string, we presume you know the service will take it.
@property
def module(self) -> "str":
""" Gets the module of this FederatedDatasetPATCH.
The name of module to reassign dataset into.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this FederatedDatasetPATCH.
The name of module to reassign dataset into.
:param module: The module of this FederatedDatasetPATCH.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this FederatedDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FederatedDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this FederatedDatasetPATCH.
:type: str
"""
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this FederatedDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this FederatedDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
:param owner: The owner of this FederatedDatasetPATCH.
:type: str
"""
self._attrs["owner"] = owner
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SingleFieldSummary(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "SingleFieldSummary":
instance = SingleFieldSummary.__new__(SingleFieldSummary)
instance._attrs = model
return instance
def __init__(self, count: "int" = None, distinct_count: "int" = None, is_exact: "bool" = None, max: "str" = None, mean: "float" = None, min: "str" = None, modes: "List[SingleValueMode]" = None, numeric_count: "int" = None, relevant: "bool" = None, stddev: "float" = None, **extra):
"""SingleFieldSummary"""
self._attrs = dict()
if count is not None:
self._attrs["count"] = count
if distinct_count is not None:
self._attrs["distinctCount"] = distinct_count
if is_exact is not None:
self._attrs["isExact"] = is_exact
if max is not None:
self._attrs["max"] = max
if mean is not None:
self._attrs["mean"] = mean
if min is not None:
self._attrs["min"] = min
if modes is not None:
self._attrs["modes"] = modes
if numeric_count is not None:
self._attrs["numericCount"] = numeric_count
if relevant is not None:
self._attrs["relevant"] = relevant
if stddev is not None:
self._attrs["stddev"] = stddev
for k, v in extra.items():
self._attrs[k] = v
@property
def count(self) -> "int":
""" Gets the count of this SingleFieldSummary.
The total number of events that contain the field.
"""
return self._attrs.get("count")
@count.setter
def count(self, count: "int"):
"""Sets the count of this SingleFieldSummary.
The total number of events that contain the field.
:param count: The count of this SingleFieldSummary.
:type: int
"""
self._attrs["count"] = count
@property
def distinct_count(self) -> "int":
""" Gets the distinct_count of this SingleFieldSummary.
The total number of unique values in the field.
"""
return self._attrs.get("distinctCount")
@distinct_count.setter
def distinct_count(self, distinct_count: "int"):
"""Sets the distinct_count of this SingleFieldSummary.
The total number of unique values in the field.
:param distinct_count: The distinct_count of this SingleFieldSummary.
:type: int
"""
self._attrs["distinctCount"] = distinct_count
@property
def is_exact(self) -> "bool":
""" Gets the is_exact of this SingleFieldSummary.
Specifies if the 'distinctCount' is accurate. The 'isExact' property is FALSE when the 'distinctCount' exceeds the maximum count and an exact count is not available.
"""
return self._attrs.get("isExact")
@is_exact.setter
def is_exact(self, is_exact: "bool"):
"""Sets the is_exact of this SingleFieldSummary.
Specifies if the 'distinctCount' is accurate. The 'isExact' property is FALSE when the 'distinctCount' exceeds the maximum count and an exact count is not available.
:param is_exact: The is_exact of this SingleFieldSummary.
:type: bool
"""
self._attrs["isExact"] = is_exact
@property
def max(self) -> "str":
""" Gets the max of this SingleFieldSummary.
The maximum numeric values in the field.
"""
return self._attrs.get("max")
@max.setter
def max(self, max: "str"):
"""Sets the max of this SingleFieldSummary.
The maximum numeric values in the field.
:param max: The max of this SingleFieldSummary.
:type: str
"""
self._attrs["max"] = max
@property
def mean(self) -> "float":
""" Gets the mean of this SingleFieldSummary.
The mean (average) for the numeric values in the field.
"""
return self._attrs.get("mean")
@mean.setter
def mean(self, mean: "float"):
"""Sets the mean of this SingleFieldSummary.
The mean (average) for the numeric values in the field.
:param mean: The mean of this SingleFieldSummary.
:type: float
"""
self._attrs["mean"] = mean
@property
def min(self) -> "str":
""" Gets the min of this SingleFieldSummary.
The minimum numeric values in the field.
"""
return self._attrs.get("min")
@min.setter
def min(self, min: "str"):
"""Sets the min of this SingleFieldSummary.
The minimum numeric values in the field.
:param min: The min of this SingleFieldSummary.
:type: str
"""
self._attrs["min"] = min
@property
def modes(self) -> "List[SingleValueMode]":
""" Gets the modes of this SingleFieldSummary.
An array of the values in the field.
"""
return [SingleValueMode._from_dict(i) for i in self._attrs.get("modes")]
@modes.setter
def modes(self, modes: "List[SingleValueMode]"):
"""Sets the modes of this SingleFieldSummary.
An array of the values in the field.
:param modes: The modes of this SingleFieldSummary.
:type: List[SingleValueMode]
"""
self._attrs["modes"] = modes
@property
def numeric_count(self) -> "int":
""" Gets the numeric_count of this SingleFieldSummary.
The count of the numeric values in the field.
"""
return self._attrs.get("numericCount")
@numeric_count.setter
def numeric_count(self, numeric_count: "int"):
"""Sets the numeric_count of this SingleFieldSummary.
The count of the numeric values in the field.
:param numeric_count: The numeric_count of this SingleFieldSummary.
:type: int
"""
self._attrs["numericCount"] = numeric_count
@property
def relevant(self) -> "bool":
""" Gets the relevant of this SingleFieldSummary.
Specifies if the field was added or changed by the search.
"""
return self._attrs.get("relevant")
@relevant.setter
def relevant(self, relevant: "bool"):
"""Sets the relevant of this SingleFieldSummary.
Specifies if the field was added or changed by the search.
:param relevant: The relevant of this SingleFieldSummary.
:type: bool
"""
self._attrs["relevant"] = relevant
@property
def stddev(self) -> "float":
""" Gets the stddev of this SingleFieldSummary.
The standard deviation for the numeric values in the field.
"""
return self._attrs.get("stddev")
@stddev.setter
def stddev(self, stddev: "float"):
"""Sets the stddev of this SingleFieldSummary.
The standard deviation for the numeric values in the field.
:param stddev: The stddev of this SingleFieldSummary.
:type: float
"""
self._attrs["stddev"] = stddev
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SingleValueMode(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "SingleValueMode":
instance = SingleValueMode.__new__(SingleValueMode)
instance._attrs = model
return instance
def __init__(self, count: "int" = None, is_exact: "bool" = None, value: "str" = None, **extra):
"""SingleValueMode"""
self._attrs = dict()
if count is not None:
self._attrs["count"] = count
if is_exact is not None:
self._attrs["isExact"] = is_exact
if value is not None:
self._attrs["value"] = value
for k, v in extra.items():
self._attrs[k] = v
@property
def count(self) -> "int":
""" Gets the count of this SingleValueMode.
The number of occurences that the value appears in a field.
"""
return self._attrs.get("count")
@count.setter
def count(self, count: "int"):
"""Sets the count of this SingleValueMode.
The number of occurences that the value appears in a field.
:param count: The count of this SingleValueMode.
:type: int
"""
self._attrs["count"] = count
@property
def is_exact(self) -> "bool":
""" Gets the is_exact of this SingleValueMode.
Specifies if the count is accurate. The 'isExact' property is FALSE when the 'count' exceeds the maximum count and an exact count is not available.
"""
return self._attrs.get("isExact")
@is_exact.setter
def is_exact(self, is_exact: "bool"):
"""Sets the is_exact of this SingleValueMode.
Specifies if the count is accurate. The 'isExact' property is FALSE when the 'count' exceeds the maximum count and an exact count is not available.
:param is_exact: The is_exact of this SingleValueMode.
:type: bool
"""
self._attrs["isExact"] = is_exact
@property
def value(self) -> "str":
""" Gets the value of this SingleValueMode.
The value in the field.
"""
return self._attrs.get("value")
@value.setter
def value(self, value: "str"):
"""Sets the value of this SingleValueMode.
The value in the field.
:param value: The value of this SingleValueMode.
:type: str
"""
self._attrs["value"] = value
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FieldsSummary(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "FieldsSummary":
instance = FieldsSummary.__new__(FieldsSummary)
instance._attrs = model
return instance
def __init__(self, duration: "float" = None, earliest_time: "str" = None, event_count: "int" = None, fields: "Dict[str, SingleFieldSummary]" = None, latest_time: "str" = None, **extra):
"""FieldsSummary"""
self._attrs = dict()
if duration is not None:
self._attrs["duration"] = duration
if earliest_time is not None:
self._attrs["earliestTime"] = earliest_time
if event_count is not None:
self._attrs["eventCount"] = event_count
if fields is not None:
self._attrs["fields"] = fields
if latest_time is not None:
self._attrs["latestTime"] = latest_time
for k, v in extra.items():
self._attrs[k] = v
@property
def duration(self) -> "float":
""" Gets the duration of this FieldsSummary.
The amount of time, in seconds, that a time bucket spans from the earliest to the latest time.
"""
return self._attrs.get("duration")
@duration.setter
def duration(self, duration: "float"):
"""Sets the duration of this FieldsSummary.
The amount of time, in seconds, that a time bucket spans from the earliest to the latest time.
:param duration: The duration of this FieldsSummary.
:type: float
"""
self._attrs["duration"] = duration
@property
def earliest_time(self) -> "str":
""" Gets the earliest_time of this FieldsSummary.
If specified, the earliest timestamp in UTC format of the events to process.
"""
return self._attrs.get("earliestTime")
@earliest_time.setter
def earliest_time(self, earliest_time: "str"):
"""Sets the earliest_time of this FieldsSummary.
If specified, the earliest timestamp in UTC format of the events to process.
:param earliest_time: The earliest_time of this FieldsSummary.
:type: str
"""
self._attrs["earliestTime"] = earliest_time
@property
def event_count(self) -> "int":
""" Gets the event_count of this FieldsSummary.
The total number of events for all fields returned in the time range ('earliestTime' and 'latestTime') specified.
"""
return self._attrs.get("eventCount")
@event_count.setter
def event_count(self, event_count: "int"):
"""Sets the event_count of this FieldsSummary.
The total number of events for all fields returned in the time range ('earliestTime' and 'latestTime') specified.
:param event_count: The event_count of this FieldsSummary.
:type: int
"""
self._attrs["eventCount"] = event_count
@property
def fields(self) -> "Dict[str, SingleFieldSummary]":
""" Gets the fields of this FieldsSummary.
A map of the fields in the time range specified.
"""
return self._attrs.get("fields")
@fields.setter
def fields(self, fields: "Dict[str, SingleFieldSummary]"):
"""Sets the fields of this FieldsSummary.
A map of the fields in the time range specified.
:param fields: The fields of this FieldsSummary.
:type: Dict[str, SingleFieldSummary]
"""
self._attrs["fields"] = fields
@property
def latest_time(self) -> "str":
""" Gets the latest_time of this FieldsSummary.
If specified, the latest timestamp in UTC format of the events to process.
"""
return self._attrs.get("latestTime")
@latest_time.setter
def latest_time(self, latest_time: "str"):
"""Sets the latest_time of this FieldsSummary.
If specified, the latest timestamp in UTC format of the events to process.
:param latest_time: The latest_time of this FieldsSummary.
:type: str
"""
self._attrs["latestTime"] = latest_time
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class IndexDataset(Dataset):
@staticmethod
def _from_dict(model: dict) -> "IndexDataset":
instance = IndexDataset.__new__(IndexDataset)
instance._attrs = model
return instance
def __init__(self, created: "str", createdby: "str", id: "str", modified: "str", modifiedby: "str", name: "str", owner: "str", resourcename: "str", appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, description: "str" = None, disabled: "bool" = None, earliest_event_time: "str" = None, earliest_ingest_time: "str" = None, frozen_time_period_in_secs: "int" = None, latest_event_time: "str" = None, latest_ingest_time: "str" = None, latest_metadata_update_time: "str" = None, namespace: "str" = None, summary: "str" = None, title: "str" = None, total_event_count: "int" = None, total_size: "int" = None, **extra):
"""IndexDataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if description is not None:
self._attrs["description"] = description
if disabled is not None:
self._attrs["disabled"] = disabled
if earliest_event_time is not None:
self._attrs["earliestEventTime"] = earliest_event_time
if earliest_ingest_time is not None:
self._attrs["earliestIngestTime"] = earliest_ingest_time
if frozen_time_period_in_secs is not None:
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
self._attrs["kind"] = "index"
if latest_event_time is not None:
self._attrs["latestEventTime"] = latest_event_time
if latest_ingest_time is not None:
self._attrs["latestIngestTime"] = latest_ingest_time
if latest_metadata_update_time is not None:
self._attrs["latestMetadataUpdateTime"] = latest_metadata_update_time
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
if total_event_count is not None:
self._attrs["totalEventCount"] = total_event_count
if total_size is not None:
self._attrs["totalSize"] = total_size
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this IndexDataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this IndexDataset.
The date and time object was created.
:param created: The created of this IndexDataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this IndexDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this IndexDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this IndexDataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this IndexDataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this IndexDataset.
A unique dataset ID.
:param id: The id of this IndexDataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this IndexDataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this IndexDataset.
The date and time object was modified.
:param modified: The modified of this IndexDataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this IndexDataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this IndexDataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this IndexDataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this IndexDataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this IndexDataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this IndexDataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this IndexDataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this IndexDataset.
The name of the object's owner.
:param owner: The owner of this IndexDataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this IndexDataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this IndexDataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this IndexDataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this IndexDataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this IndexDataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this IndexDataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this IndexDataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this IndexDataset.
AppClinetId of the modifier app of the dataset.
:param appclientidmodifiedby: The appclientidmodifiedby of this IndexDataset.
:type: str
"""
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
@property
def description(self) -> "str":
""" Gets the description of this IndexDataset.
Detailed description of the dataset.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this IndexDataset.
Detailed description of the dataset.
:param description: The description of this IndexDataset.
:type: str
"""
self._attrs["description"] = description
@property
def disabled(self) -> "bool":
""" Gets the disabled of this IndexDataset.
Specifies whether or not the Splunk index is disabled.
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this IndexDataset.
Specifies whether or not the Splunk index is disabled.
:param disabled: The disabled of this IndexDataset.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def earliest_event_time(self) -> "str":
""" Gets the earliest_event_time of this IndexDataset.
The timestamp, in seconds, of the earliest event. The timestamp is in UNIX time.
"""
return self._attrs.get("earliestEventTime")
@earliest_event_time.setter
def earliest_event_time(self, earliest_event_time: "str"):
"""Sets the earliest_event_time of this IndexDataset.
The timestamp, in seconds, of the earliest event. The timestamp is in UNIX time.
:param earliest_event_time: The earliest_event_time of this IndexDataset.
:type: str
"""
self._attrs["earliestEventTime"] = earliest_event_time
@property
def earliest_ingest_time(self) -> "str":
""" Gets the earliest_ingest_time of this IndexDataset.
The earliest index time for any of the events in this index.
"""
return self._attrs.get("earliestIngestTime")
@earliest_ingest_time.setter
def earliest_ingest_time(self, earliest_ingest_time: "str"):
"""Sets the earliest_ingest_time of this IndexDataset.
The earliest index time for any of the events in this index.
:param earliest_ingest_time: The earliest_ingest_time of this IndexDataset.
:type: str
"""
self._attrs["earliestIngestTime"] = earliest_ingest_time
@property
def frozen_time_period_in_secs(self) -> "int":
""" Gets the frozen_time_period_in_secs of this IndexDataset.
The frozenTimePeriodInSecs to use for the index
"""
return self._attrs.get("frozenTimePeriodInSecs")
@frozen_time_period_in_secs.setter
def frozen_time_period_in_secs(self, frozen_time_period_in_secs: "int"):
"""Sets the frozen_time_period_in_secs of this IndexDataset.
The frozenTimePeriodInSecs to use for the index
:param frozen_time_period_in_secs: The frozen_time_period_in_secs of this IndexDataset.
:type: int
"""
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
@property
def kind(self) -> str:
return "index"
@property
def latest_event_time(self) -> "str":
""" Gets the latest_event_time of this IndexDataset.
The timestamp, in seconds, of the latest event. The timestamp is in UNIX time.
"""
return self._attrs.get("latestEventTime")
@latest_event_time.setter
def latest_event_time(self, latest_event_time: "str"):
"""Sets the latest_event_time of this IndexDataset.
The timestamp, in seconds, of the latest event. The timestamp is in UNIX time.
:param latest_event_time: The latest_event_time of this IndexDataset.
:type: str
"""
self._attrs["latestEventTime"] = latest_event_time
@property
def latest_ingest_time(self) -> "str":
""" Gets the latest_ingest_time of this IndexDataset.
The latest index time for any of the events in this index.
"""
return self._attrs.get("latestIngestTime")
@latest_ingest_time.setter
def latest_ingest_time(self, latest_ingest_time: "str"):
"""Sets the latest_ingest_time of this IndexDataset.
The latest index time for any of the events in this index.
:param latest_ingest_time: The latest_ingest_time of this IndexDataset.
:type: str
"""
self._attrs["latestIngestTime"] = latest_ingest_time
@property
def latest_metadata_update_time(self) -> "str":
""" Gets the latest_metadata_update_time of this IndexDataset.
The latest time that the index metadata was refreshed.
"""
return self._attrs.get("latestMetadataUpdateTime")
@latest_metadata_update_time.setter
def latest_metadata_update_time(self, latest_metadata_update_time: "str"):
"""Sets the latest_metadata_update_time of this IndexDataset.
The latest time that the index metadata was refreshed.
:param latest_metadata_update_time: The latest_metadata_update_time of this IndexDataset.
:type: str
"""
self._attrs["latestMetadataUpdateTime"] = latest_metadata_update_time
@property
def namespace(self) -> "str":
""" Gets the namespace of this IndexDataset.
The name of the namespace that contains the dataset.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this IndexDataset.
The name of the namespace that contains the dataset.
:param namespace: The namespace of this IndexDataset.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def summary(self) -> "str":
""" Gets the summary of this IndexDataset.
Summary of the dataset's purpose.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this IndexDataset.
Summary of the dataset's purpose.
:param summary: The summary of this IndexDataset.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this IndexDataset.
The title of the dataset. Does not have to be unique.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this IndexDataset.
The title of the dataset. Does not have to be unique.
:param title: The title of this IndexDataset.
:type: str
"""
self._attrs["title"] = title
@property
def total_event_count(self) -> "int":
""" Gets the total_event_count of this IndexDataset.
The number of events in the index.
"""
return self._attrs.get("totalEventCount")
@total_event_count.setter
def total_event_count(self, total_event_count: "int"):
"""Sets the total_event_count of this IndexDataset.
The number of events in the index.
:param total_event_count: The total_event_count of this IndexDataset.
:type: int
"""
self._attrs["totalEventCount"] = total_event_count
@property
def total_size(self) -> "int":
""" Gets the total_size of this IndexDataset.
The raw size, in bytes, of the uncompressed data in the indexers.
"""
return self._attrs.get("totalSize")
@total_size.setter
def total_size(self, total_size: "int"):
"""Sets the total_size of this IndexDataset.
The raw size, in bytes, of the uncompressed data in the indexers.
:param total_size: The total_size of this IndexDataset.
:type: int
"""
self._attrs["totalSize"] = total_size
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
Dataset.from_dict_handlers["index"] = IndexDataset._from_dict
class IndexDatasetKind(str, Enum):
INDEX = "index"
@staticmethod
def from_value(value: str):
if value == "index":
return IndexDatasetKind.INDEX
class IndexDatasetPATCH(DatasetPATCH):
@staticmethod
def _from_dict(model: dict) -> "IndexDatasetPATCH":
instance = IndexDatasetPATCH.__new__(IndexDatasetPATCH)
instance._attrs = model
return instance
def __init__(self, disabled: "bool" = None, frozen_time_period_in_secs: "int" = None, kind: "IndexDatasetKind" = None, module: "str" = None, name: "str" = None, owner: "str" = None, **extra):
"""IndexDatasetPATCH"""
self._attrs = dict()
if disabled is not None:
self._attrs["disabled"] = disabled
if frozen_time_period_in_secs is not None:
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
if kind is not None:
self._attrs["kind"] = kind
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
for k, v in extra.items():
self._attrs[k] = v
@property
def disabled(self) -> "bool":
""" Gets the disabled of this IndexDatasetPATCH.
Specifies whether or not the Splunk index is disabled.
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this IndexDatasetPATCH.
Specifies whether or not the Splunk index is disabled.
:param disabled: The disabled of this IndexDatasetPATCH.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def frozen_time_period_in_secs(self) -> "int":
""" Gets the frozen_time_period_in_secs of this IndexDatasetPATCH.
The frozenTimePeriodInSecs to use for the index
"""
return self._attrs.get("frozenTimePeriodInSecs")
@frozen_time_period_in_secs.setter
def frozen_time_period_in_secs(self, frozen_time_period_in_secs: "int"):
"""Sets the frozen_time_period_in_secs of this IndexDatasetPATCH.
The frozenTimePeriodInSecs to use for the index
:param frozen_time_period_in_secs: The frozen_time_period_in_secs of this IndexDatasetPATCH.
:type: int
"""
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
@property
def kind(self) -> "IndexDatasetKind":
""" Gets the kind of this IndexDatasetPATCH.
"""
return IndexDatasetKind.from_value(self._attrs.get("kind"))
@kind.setter
def kind(self, kind: "IndexDatasetKind"):
"""Sets the kind of this IndexDatasetPATCH.
:param kind: The kind of this IndexDatasetPATCH.
:type: IndexDatasetKind
"""
if isinstance(kind, Enum):
self._attrs["kind"] = kind.value
else:
self._attrs["kind"] = kind # If you supply a string, we presume you know the service will take it.
@property
def module(self) -> "str":
""" Gets the module of this IndexDatasetPATCH.
The name of module to reassign dataset into.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this IndexDatasetPATCH.
The name of module to reassign dataset into.
:param module: The module of this IndexDatasetPATCH.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this IndexDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this IndexDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this IndexDatasetPATCH.
:type: str
"""
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this IndexDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this IndexDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
:param owner: The owner of this IndexDatasetPATCH.
:type: str
"""
self._attrs["owner"] = owner
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class KVCollectionDataset(Dataset):
@staticmethod
def _from_dict(model: dict) -> "KVCollectionDataset":
instance = KVCollectionDataset.__new__(KVCollectionDataset)
instance._attrs = model
return instance
def __init__(self, created: "str", createdby: "str", id: "str", modified: "str", modifiedby: "str", name: "str", owner: "str", resourcename: "str", appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, description: "str" = None, namespace: "str" = None, summary: "str" = None, title: "str" = None, **extra):
"""KVCollectionDataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if description is not None:
self._attrs["description"] = description
self._attrs["kind"] = "kvcollection"
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this KVCollectionDataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this KVCollectionDataset.
The date and time object was created.
:param created: The created of this KVCollectionDataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this KVCollectionDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this KVCollectionDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this KVCollectionDataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this KVCollectionDataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this KVCollectionDataset.
A unique dataset ID.
:param id: The id of this KVCollectionDataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this KVCollectionDataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this KVCollectionDataset.
The date and time object was modified.
:param modified: The modified of this KVCollectionDataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this KVCollectionDataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this KVCollectionDataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this KVCollectionDataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this KVCollectionDataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this KVCollectionDataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this KVCollectionDataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this KVCollectionDataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this KVCollectionDataset.
The name of the object's owner.
:param owner: The owner of this KVCollectionDataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this KVCollectionDataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this KVCollectionDataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this KVCollectionDataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this KVCollectionDataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this KVCollectionDataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this KVCollectionDataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this KVCollectionDataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this KVCollectionDataset.
AppClinetId of the modifier app of the dataset.
:param appclientidmodifiedby: The appclientidmodifiedby of this KVCollectionDataset.
:type: str
"""
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
@property
def description(self) -> "str":
""" Gets the description of this KVCollectionDataset.
Detailed description of the dataset.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this KVCollectionDataset.
Detailed description of the dataset.
:param description: The description of this KVCollectionDataset.
:type: str
"""
self._attrs["description"] = description
@property
def kind(self) -> str:
return "kvcollection"
@property
def namespace(self) -> "str":
""" Gets the namespace of this KVCollectionDataset.
The name of the namespace that contains the dataset.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this KVCollectionDataset.
The name of the namespace that contains the dataset.
:param namespace: The namespace of this KVCollectionDataset.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def summary(self) -> "str":
""" Gets the summary of this KVCollectionDataset.
Summary of the dataset's purpose.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this KVCollectionDataset.
Summary of the dataset's purpose.
:param summary: The summary of this KVCollectionDataset.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this KVCollectionDataset.
The title of the dataset. Does not have to be unique.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this KVCollectionDataset.
The title of the dataset. Does not have to be unique.
:param title: The title of this KVCollectionDataset.
:type: str
"""
self._attrs["title"] = title
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
Dataset.from_dict_handlers["kvcollection"] = KVCollectionDataset._from_dict
class KVCollectionDatasetKind(str, Enum):
KVCOLLECTION = "kvcollection"
@staticmethod
def from_value(value: str):
if value == "kvcollection":
return KVCollectionDatasetKind.KVCOLLECTION
class KVCollectionDatasetPATCH(DatasetPATCH):
@staticmethod
def _from_dict(model: dict) -> "KVCollectionDatasetPATCH":
instance = KVCollectionDatasetPATCH.__new__(KVCollectionDatasetPATCH)
instance._attrs = model
return instance
def __init__(self, kind: "KVCollectionDatasetKind" = None, module: "str" = None, name: "str" = None, owner: "str" = None, **extra):
"""KVCollectionDatasetPATCH"""
self._attrs = dict()
if kind is not None:
self._attrs["kind"] = kind
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
for k, v in extra.items():
self._attrs[k] = v
@property
def kind(self) -> "KVCollectionDatasetKind":
""" Gets the kind of this KVCollectionDatasetPATCH.
"""
return KVCollectionDatasetKind.from_value(self._attrs.get("kind"))
@kind.setter
def kind(self, kind: "KVCollectionDatasetKind"):
"""Sets the kind of this KVCollectionDatasetPATCH.
:param kind: The kind of this KVCollectionDatasetPATCH.
:type: KVCollectionDatasetKind
"""
if isinstance(kind, Enum):
self._attrs["kind"] = kind.value
else:
self._attrs["kind"] = kind # If you supply a string, we presume you know the service will take it.
@property
def module(self) -> "str":
""" Gets the module of this KVCollectionDatasetPATCH.
The name of module to reassign dataset into.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this KVCollectionDatasetPATCH.
The name of module to reassign dataset into.
:param module: The module of this KVCollectionDatasetPATCH.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this KVCollectionDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this KVCollectionDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this KVCollectionDatasetPATCH.
:type: str
"""
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this KVCollectionDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this KVCollectionDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
:param owner: The owner of this KVCollectionDatasetPATCH.
:type: str
"""
self._attrs["owner"] = owner
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class ListDatasets(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "ListDatasets":
instance = ListDatasets.__new__(ListDatasets)
instance._attrs = model
return instance
def __init__(self, results: "List[Dataset]" = None, **extra):
"""ListDatasets"""
self._attrs = dict()
if results is not None:
self._attrs["results"] = results
for k, v in extra.items():
self._attrs[k] = v
@property
def results(self) -> "List[Dataset]":
""" Gets the results of this ListDatasets.
List of all datasets
"""
return [Dataset._from_dict(i) for i in self._attrs.get("results")]
@results.setter
def results(self, results: "List[Dataset]"):
"""Sets the results of this ListDatasets.
List of all datasets
:param results: The results of this ListDatasets.
:type: List[Dataset]
"""
self._attrs["results"] = results
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class Module(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Module":
instance = Module.__new__(Module)
instance._attrs = model
return instance
def __init__(self, definition: "str", name: "str", created_at: "str" = None, created_by: "str" = None, namespace: "str" = None, **extra):
"""Module"""
self._attrs = dict()
if definition is not None:
self._attrs["definition"] = definition
if name is not None:
self._attrs["name"] = name
if created_at is not None:
self._attrs["createdAt"] = created_at
if created_by is not None:
self._attrs["createdBy"] = created_by
if namespace is not None:
self._attrs["namespace"] = namespace
for k, v in extra.items():
self._attrs[k] = v
@property
def definition(self) -> "str":
""" Gets the definition of this Module.
The definition of the module
"""
return self._attrs.get("definition")
@definition.setter
def definition(self, definition: "str"):
"""Sets the definition of this Module.
The definition of the module
:param definition: The definition of this Module.
:type: str
"""
if definition is None:
raise ValueError("Invalid value for `definition`, must not be `None`")
self._attrs["definition"] = definition
@property
def name(self) -> "str":
""" Gets the name of this Module.
The name of the module
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this Module.
The name of the module
:param name: The name of this Module.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def created_at(self) -> "str":
""" Gets the created_at of this Module.
The timestamp when the module was created
"""
return self._attrs.get("createdAt")
@created_at.setter
def created_at(self, created_at: "str"):
"""Sets the created_at of this Module.
The timestamp when the module was created
:param created_at: The created_at of this Module.
:type: str
"""
self._attrs["createdAt"] = created_at
@property
def created_by(self) -> "str":
""" Gets the created_by of this Module.
The user who created the module
"""
return self._attrs.get("createdBy")
@created_by.setter
def created_by(self, created_by: "str"):
"""Sets the created_by of this Module.
The user who created the module
:param created_by: The created_by of this Module.
:type: str
"""
self._attrs["createdBy"] = created_by
@property
def namespace(self) -> "str":
""" Gets the namespace of this Module.
The namespace of the module
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this Module.
The namespace of the module
:param namespace: The namespace of this Module.
:type: str
"""
self._attrs["namespace"] = namespace
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class ListModules(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "ListModules":
instance = ListModules.__new__(ListModules)
instance._attrs = model
return instance
def __init__(self, results: "List[Module]" = None, **extra):
"""ListModules"""
self._attrs = dict()
if results is not None:
self._attrs["results"] = results
for k, v in extra.items():
self._attrs[k] = v
@property
def results(self) -> "List[Module]":
""" Gets the results of this ListModules.
list of all modules
"""
return [Module._from_dict(i) for i in self._attrs.get("results")]
@results.setter
def results(self, results: "List[Module]"):
"""Sets the results of this ListModules.
list of all modules
:param results: The results of this ListModules.
:type: List[Module]
"""
self._attrs["results"] = results
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class ListPreviewResultsResponseFields(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "ListPreviewResultsResponseFields":
instance = ListPreviewResultsResponseFields.__new__(ListPreviewResultsResponseFields)
instance._attrs = model
return instance
def __init__(self, name: "str", data_source: "str" = None, groupby_rank: "str" = None, split_field: "str" = None, split_value: "str" = None, splitby_special: "str" = None, type_special: "str" = None, **extra):
"""ListPreviewResultsResponseFields"""
self._attrs = dict()
if name is not None:
self._attrs["name"] = name
if data_source is not None:
self._attrs["dataSource"] = data_source
if groupby_rank is not None:
self._attrs["groupbyRank"] = groupby_rank
if split_field is not None:
self._attrs["splitField"] = split_field
if split_value is not None:
self._attrs["splitValue"] = split_value
if splitby_special is not None:
self._attrs["splitbySpecial"] = splitby_special
if type_special is not None:
self._attrs["typeSpecial"] = type_special
for k, v in extra.items():
self._attrs[k] = v
@property
def name(self) -> "str":
""" Gets the name of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this ListPreviewResultsResponseFields.
:param name: The name of this ListPreviewResultsResponseFields.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def data_source(self) -> "str":
""" Gets the data_source of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("dataSource")
@data_source.setter
def data_source(self, data_source: "str"):
"""Sets the data_source of this ListPreviewResultsResponseFields.
:param data_source: The data_source of this ListPreviewResultsResponseFields.
:type: str
"""
self._attrs["dataSource"] = data_source
@property
def groupby_rank(self) -> "str":
""" Gets the groupby_rank of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("groupbyRank")
@groupby_rank.setter
def groupby_rank(self, groupby_rank: "str"):
"""Sets the groupby_rank of this ListPreviewResultsResponseFields.
:param groupby_rank: The groupby_rank of this ListPreviewResultsResponseFields.
:type: str
"""
self._attrs["groupbyRank"] = groupby_rank
@property
def split_field(self) -> "str":
""" Gets the split_field of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("splitField")
@split_field.setter
def split_field(self, split_field: "str"):
"""Sets the split_field of this ListPreviewResultsResponseFields.
:param split_field: The split_field of this ListPreviewResultsResponseFields.
:type: str
"""
self._attrs["splitField"] = split_field
@property
def split_value(self) -> "str":
""" Gets the split_value of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("splitValue")
@split_value.setter
def split_value(self, split_value: "str"):
"""Sets the split_value of this ListPreviewResultsResponseFields.
:param split_value: The split_value of this ListPreviewResultsResponseFields.
:type: str
"""
self._attrs["splitValue"] = split_value
@property
def splitby_special(self) -> "str":
""" Gets the splitby_special of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("splitbySpecial")
@splitby_special.setter
def splitby_special(self, splitby_special: "str"):
"""Sets the splitby_special of this ListPreviewResultsResponseFields.
:param splitby_special: The splitby_special of this ListPreviewResultsResponseFields.
:type: str
"""
self._attrs["splitbySpecial"] = splitby_special
@property
def type_special(self) -> "str":
""" Gets the type_special of this ListPreviewResultsResponseFields.
"""
return self._attrs.get("typeSpecial")
@type_special.setter
def type_special(self, type_special: "str"):
"""Sets the type_special of this ListPreviewResultsResponseFields.
:param type_special: The type_special of this ListPreviewResultsResponseFields.
:type: str
"""
self._attrs["typeSpecial"] = type_special
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class ListPreviewResultsResponse(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "ListPreviewResultsResponse":
instance = ListPreviewResultsResponse.__new__(ListPreviewResultsResponse)
instance._attrs = model
return instance
def __init__(self, is_preview_stable: "bool", results: "List[object]", fields: "List[ListPreviewResultsResponseFields]" = None, messages: "List[Message]" = None, next_link: "str" = None, wait: "str" = None, **extra):
"""ListPreviewResultsResponse"""
self._attrs = dict()
if is_preview_stable is not None:
self._attrs["isPreviewStable"] = is_preview_stable
if results is not None:
self._attrs["results"] = results
if fields is not None:
self._attrs["fields"] = fields
if messages is not None:
self._attrs["messages"] = messages
if next_link is not None:
self._attrs["nextLink"] = next_link
if wait is not None:
self._attrs["wait"] = wait
for k, v in extra.items():
self._attrs[k] = v
@property
def is_preview_stable(self) -> "bool":
""" Gets the is_preview_stable of this ListPreviewResultsResponse.
"""
return self._attrs.get("isPreviewStable")
@is_preview_stable.setter
def is_preview_stable(self, is_preview_stable: "bool"):
"""Sets the is_preview_stable of this ListPreviewResultsResponse.
:param is_preview_stable: The is_preview_stable of this ListPreviewResultsResponse.
:type: bool
"""
if is_preview_stable is None:
raise ValueError("Invalid value for `is_preview_stable`, must not be `None`")
self._attrs["isPreviewStable"] = is_preview_stable
@property
def results(self) -> "List[object]":
""" Gets the results of this ListPreviewResultsResponse.
"""
return self._attrs.get("results")
@results.setter
def results(self, results: "List[object]"):
"""Sets the results of this ListPreviewResultsResponse.
:param results: The results of this ListPreviewResultsResponse.
:type: List[object]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._attrs["results"] = results
@property
def fields(self) -> "List[ListPreviewResultsResponseFields]":
""" Gets the fields of this ListPreviewResultsResponse.
"""
return [ListPreviewResultsResponseFields._from_dict(i) for i in self._attrs.get("fields")]
@fields.setter
def fields(self, fields: "List[ListPreviewResultsResponseFields]"):
"""Sets the fields of this ListPreviewResultsResponse.
:param fields: The fields of this ListPreviewResultsResponse.
:type: List[ListPreviewResultsResponseFields]
"""
self._attrs["fields"] = fields
@property
def messages(self) -> "List[Message]":
""" Gets the messages of this ListPreviewResultsResponse.
"""
return [Message._from_dict(i) for i in self._attrs.get("messages")]
@messages.setter
def messages(self, messages: "List[Message]"):
"""Sets the messages of this ListPreviewResultsResponse.
:param messages: The messages of this ListPreviewResultsResponse.
:type: List[Message]
"""
self._attrs["messages"] = messages
@property
def next_link(self) -> "str":
""" Gets the next_link of this ListPreviewResultsResponse.
"""
return self._attrs.get("nextLink")
@next_link.setter
def next_link(self, next_link: "str"):
"""Sets the next_link of this ListPreviewResultsResponse.
:param next_link: The next_link of this ListPreviewResultsResponse.
:type: str
"""
self._attrs["nextLink"] = next_link
@property
def wait(self) -> "str":
""" Gets the wait of this ListPreviewResultsResponse.
"""
return self._attrs.get("wait")
@wait.setter
def wait(self, wait: "str"):
"""Sets the wait of this ListPreviewResultsResponse.
:param wait: The wait of this ListPreviewResultsResponse.
:type: str
"""
self._attrs["wait"] = wait
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class ListSearchResultsResponse(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "ListSearchResultsResponse":
instance = ListSearchResultsResponse.__new__(ListSearchResultsResponse)
instance._attrs = model
return instance
def __init__(self, results: "List[object]", fields: "List[ListPreviewResultsResponseFields]" = None, messages: "List[Message]" = None, next_link: "str" = None, wait: "str" = None, **extra):
"""ListSearchResultsResponse"""
self._attrs = dict()
if results is not None:
self._attrs["results"] = results
if fields is not None:
self._attrs["fields"] = fields
if messages is not None:
self._attrs["messages"] = messages
if next_link is not None:
self._attrs["nextLink"] = next_link
if wait is not None:
self._attrs["wait"] = wait
for k, v in extra.items():
self._attrs[k] = v
@property
def results(self) -> "List[object]":
""" Gets the results of this ListSearchResultsResponse.
"""
return self._attrs.get("results")
@results.setter
def results(self, results: "List[object]"):
"""Sets the results of this ListSearchResultsResponse.
:param results: The results of this ListSearchResultsResponse.
:type: List[object]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._attrs["results"] = results
@property
def fields(self) -> "List[ListPreviewResultsResponseFields]":
""" Gets the fields of this ListSearchResultsResponse.
"""
return [ListPreviewResultsResponseFields._from_dict(i) for i in self._attrs.get("fields")]
@fields.setter
def fields(self, fields: "List[ListPreviewResultsResponseFields]"):
"""Sets the fields of this ListSearchResultsResponse.
:param fields: The fields of this ListSearchResultsResponse.
:type: List[ListPreviewResultsResponseFields]
"""
self._attrs["fields"] = fields
@property
def messages(self) -> "List[Message]":
""" Gets the messages of this ListSearchResultsResponse.
"""
return [Message._from_dict(i) for i in self._attrs.get("messages")]
@messages.setter
def messages(self, messages: "List[Message]"):
"""Sets the messages of this ListSearchResultsResponse.
:param messages: The messages of this ListSearchResultsResponse.
:type: List[Message]
"""
self._attrs["messages"] = messages
@property
def next_link(self) -> "str":
""" Gets the next_link of this ListSearchResultsResponse.
"""
return self._attrs.get("nextLink")
@next_link.setter
def next_link(self, next_link: "str"):
"""Sets the next_link of this ListSearchResultsResponse.
:param next_link: The next_link of this ListSearchResultsResponse.
:type: str
"""
self._attrs["nextLink"] = next_link
@property
def wait(self) -> "str":
""" Gets the wait of this ListSearchResultsResponse.
"""
return self._attrs.get("wait")
@wait.setter
def wait(self, wait: "str"):
"""Sets the wait of this ListSearchResultsResponse.
:param wait: The wait of this ListSearchResultsResponse.
:type: str
"""
self._attrs["wait"] = wait
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class LookupDatasetExternalKind(str, Enum):
KVCOLLECTION = "kvcollection"
@staticmethod
def from_value(value: str):
if value == "kvcollection":
return LookupDatasetExternalKind.KVCOLLECTION
class LookupDataset(Dataset):
@staticmethod
def _from_dict(model: dict) -> "LookupDataset":
instance = LookupDataset.__new__(LookupDataset)
instance._attrs = model
return instance
def __init__(self, created: "str", createdby: "str", id: "str", modified: "str", modifiedby: "str", name: "str", owner: "str", resourcename: "str", appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, case_sensitive_match: "bool" = True, description: "str" = None, external_kind: "LookupDatasetExternalKind" = None, external_name: "str" = None, filter: "str" = None, namespace: "str" = None, summary: "str" = None, title: "str" = None, **extra):
"""LookupDataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if case_sensitive_match is not None:
self._attrs["caseSensitiveMatch"] = case_sensitive_match
if description is not None:
self._attrs["description"] = description
if external_kind is not None:
self._attrs["externalKind"] = external_kind
if external_name is not None:
self._attrs["externalName"] = external_name
if filter is not None:
self._attrs["filter"] = filter
self._attrs["kind"] = "lookup"
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this LookupDataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this LookupDataset.
The date and time object was created.
:param created: The created of this LookupDataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this LookupDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this LookupDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this LookupDataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this LookupDataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this LookupDataset.
A unique dataset ID.
:param id: The id of this LookupDataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this LookupDataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this LookupDataset.
The date and time object was modified.
:param modified: The modified of this LookupDataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this LookupDataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this LookupDataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this LookupDataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this LookupDataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this LookupDataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this LookupDataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this LookupDataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this LookupDataset.
The name of the object's owner.
:param owner: The owner of this LookupDataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this LookupDataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this LookupDataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this LookupDataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this LookupDataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this LookupDataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this LookupDataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this LookupDataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this LookupDataset.
AppClinetId of the modifier app of the dataset.
:param appclientidmodifiedby: The appclientidmodifiedby of this LookupDataset.
:type: str
"""
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
@property
def case_sensitive_match(self) -> "bool":
""" Gets the case_sensitive_match of this LookupDataset.
Match case-sensitively against the lookup.
"""
return self._attrs.get("caseSensitiveMatch")
@case_sensitive_match.setter
def case_sensitive_match(self, case_sensitive_match: "bool"):
"""Sets the case_sensitive_match of this LookupDataset.
Match case-sensitively against the lookup.
:param case_sensitive_match: The case_sensitive_match of this LookupDataset.
:type: bool
"""
self._attrs["caseSensitiveMatch"] = case_sensitive_match
@property
def description(self) -> "str":
""" Gets the description of this LookupDataset.
Detailed description of the dataset.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this LookupDataset.
Detailed description of the dataset.
:param description: The description of this LookupDataset.
:type: str
"""
self._attrs["description"] = description
@property
def external_kind(self) -> "LookupDatasetExternalKind":
""" Gets the external_kind of this LookupDataset.
"""
return LookupDatasetExternalKind.from_value(self._attrs.get("externalKind"))
@external_kind.setter
def external_kind(self, external_kind: "LookupDatasetExternalKind"):
"""Sets the external_kind of this LookupDataset.
:param external_kind: The external_kind of this LookupDataset.
:type: LookupDatasetExternalKind
"""
if isinstance(external_kind, Enum):
self._attrs["externalKind"] = external_kind.value
else:
self._attrs["externalKind"] = external_kind # If you supply a string, we presume you know the service will take it.
@property
def external_name(self) -> "str":
""" Gets the external_name of this LookupDataset.
The name of the external lookup.
"""
return self._attrs.get("externalName")
@external_name.setter
def external_name(self, external_name: "str"):
"""Sets the external_name of this LookupDataset.
The name of the external lookup.
:param external_name: The external_name of this LookupDataset.
:type: str
"""
self._attrs["externalName"] = external_name
@property
def filter(self) -> "str":
""" Gets the filter of this LookupDataset.
A query that filters results out of the lookup before those results are returned.
"""
return self._attrs.get("filter")
@filter.setter
def filter(self, filter: "str"):
"""Sets the filter of this LookupDataset.
A query that filters results out of the lookup before those results are returned.
:param filter: The filter of this LookupDataset.
:type: str
"""
self._attrs["filter"] = filter
@property
def kind(self) -> str:
return "lookup"
@property
def namespace(self) -> "str":
""" Gets the namespace of this LookupDataset.
The name of the namespace that contains the dataset.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this LookupDataset.
The name of the namespace that contains the dataset.
:param namespace: The namespace of this LookupDataset.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def summary(self) -> "str":
""" Gets the summary of this LookupDataset.
Summary of the dataset's purpose.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this LookupDataset.
Summary of the dataset's purpose.
:param summary: The summary of this LookupDataset.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this LookupDataset.
The title of the dataset. Does not have to be unique.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this LookupDataset.
The title of the dataset. Does not have to be unique.
:param title: The title of this LookupDataset.
:type: str
"""
self._attrs["title"] = title
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
Dataset.from_dict_handlers["lookup"] = LookupDataset._from_dict
class LookupDatasetKind(str, Enum):
LOOKUP = "lookup"
@staticmethod
def from_value(value: str):
if value == "lookup":
return LookupDatasetKind.LOOKUP
class LookupDatasetPATCH(DatasetPATCH):
@staticmethod
def _from_dict(model: dict) -> "LookupDatasetPATCH":
instance = LookupDatasetPATCH.__new__(LookupDatasetPATCH)
instance._attrs = model
return instance
def __init__(self, case_sensitive_match: "bool" = True, external_kind: "LookupDatasetExternalKind" = None, external_name: "str" = None, filter: "str" = None, kind: "LookupDatasetKind" = None, module: "str" = None, name: "str" = None, owner: "str" = None, **extra):
"""LookupDatasetPATCH"""
self._attrs = dict()
if case_sensitive_match is not None:
self._attrs["caseSensitiveMatch"] = case_sensitive_match
if external_kind is not None:
self._attrs["externalKind"] = external_kind
if external_name is not None:
self._attrs["externalName"] = external_name
if filter is not None:
self._attrs["filter"] = filter
if kind is not None:
self._attrs["kind"] = kind
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
for k, v in extra.items():
self._attrs[k] = v
@property
def case_sensitive_match(self) -> "bool":
""" Gets the case_sensitive_match of this LookupDatasetPATCH.
Match case-sensitively against the lookup.
"""
return self._attrs.get("caseSensitiveMatch")
@case_sensitive_match.setter
def case_sensitive_match(self, case_sensitive_match: "bool"):
"""Sets the case_sensitive_match of this LookupDatasetPATCH.
Match case-sensitively against the lookup.
:param case_sensitive_match: The case_sensitive_match of this LookupDatasetPATCH.
:type: bool
"""
self._attrs["caseSensitiveMatch"] = case_sensitive_match
@property
def external_kind(self) -> "LookupDatasetExternalKind":
""" Gets the external_kind of this LookupDatasetPATCH.
"""
return LookupDatasetExternalKind.from_value(self._attrs.get("externalKind"))
@external_kind.setter
def external_kind(self, external_kind: "LookupDatasetExternalKind"):
"""Sets the external_kind of this LookupDatasetPATCH.
:param external_kind: The external_kind of this LookupDatasetPATCH.
:type: LookupDatasetExternalKind
"""
if isinstance(external_kind, Enum):
self._attrs["externalKind"] = external_kind.value
else:
self._attrs["externalKind"] = external_kind # If you supply a string, we presume you know the service will take it.
@property
def external_name(self) -> "str":
""" Gets the external_name of this LookupDatasetPATCH.
The name of the external lookup.
"""
return self._attrs.get("externalName")
@external_name.setter
def external_name(self, external_name: "str"):
"""Sets the external_name of this LookupDatasetPATCH.
The name of the external lookup.
:param external_name: The external_name of this LookupDatasetPATCH.
:type: str
"""
self._attrs["externalName"] = external_name
@property
def filter(self) -> "str":
""" Gets the filter of this LookupDatasetPATCH.
A query that filters results out of the lookup before those results are returned.
"""
return self._attrs.get("filter")
@filter.setter
def filter(self, filter: "str"):
"""Sets the filter of this LookupDatasetPATCH.
A query that filters results out of the lookup before those results are returned.
:param filter: The filter of this LookupDatasetPATCH.
:type: str
"""
self._attrs["filter"] = filter
@property
def kind(self) -> "LookupDatasetKind":
""" Gets the kind of this LookupDatasetPATCH.
"""
return LookupDatasetKind.from_value(self._attrs.get("kind"))
@kind.setter
def kind(self, kind: "LookupDatasetKind"):
"""Sets the kind of this LookupDatasetPATCH.
:param kind: The kind of this LookupDatasetPATCH.
:type: LookupDatasetKind
"""
if isinstance(kind, Enum):
self._attrs["kind"] = kind.value
else:
self._attrs["kind"] = kind # If you supply a string, we presume you know the service will take it.
@property
def module(self) -> "str":
""" Gets the module of this LookupDatasetPATCH.
The name of module to reassign dataset into.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this LookupDatasetPATCH.
The name of module to reassign dataset into.
:param module: The module of this LookupDatasetPATCH.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this LookupDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this LookupDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this LookupDatasetPATCH.
:type: str
"""
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this LookupDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this LookupDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
:param owner: The owner of this LookupDatasetPATCH.
:type: str
"""
self._attrs["owner"] = owner
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class MetricDataset(Dataset):
@staticmethod
def _from_dict(model: dict) -> "MetricDataset":
instance = MetricDataset.__new__(MetricDataset)
instance._attrs = model
return instance
def __init__(self, created: "str", createdby: "str", id: "str", modified: "str", modifiedby: "str", name: "str", owner: "str", resourcename: "str", appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, description: "str" = None, disabled: "bool" = None, earliest_event_time: "str" = None, earliest_ingest_time: "str" = None, frozen_time_period_in_secs: "int" = None, latest_event_time: "str" = None, latest_ingest_time: "str" = None, latest_metadata_update_time: "str" = None, namespace: "str" = None, summary: "str" = None, title: "str" = None, total_event_count: "int" = None, total_size: "int" = None, **extra):
"""MetricDataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if description is not None:
self._attrs["description"] = description
if disabled is not None:
self._attrs["disabled"] = disabled
if earliest_event_time is not None:
self._attrs["earliestEventTime"] = earliest_event_time
if earliest_ingest_time is not None:
self._attrs["earliestIngestTime"] = earliest_ingest_time
if frozen_time_period_in_secs is not None:
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
self._attrs["kind"] = "metric"
if latest_event_time is not None:
self._attrs["latestEventTime"] = latest_event_time
if latest_ingest_time is not None:
self._attrs["latestIngestTime"] = latest_ingest_time
if latest_metadata_update_time is not None:
self._attrs["latestMetadataUpdateTime"] = latest_metadata_update_time
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
if total_event_count is not None:
self._attrs["totalEventCount"] = total_event_count
if total_size is not None:
self._attrs["totalSize"] = total_size
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this MetricDataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this MetricDataset.
The date and time object was created.
:param created: The created of this MetricDataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this MetricDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this MetricDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this MetricDataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this MetricDataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this MetricDataset.
A unique dataset ID.
:param id: The id of this MetricDataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this MetricDataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this MetricDataset.
The date and time object was modified.
:param modified: The modified of this MetricDataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this MetricDataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this MetricDataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this MetricDataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this MetricDataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this MetricDataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this MetricDataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this MetricDataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this MetricDataset.
The name of the object's owner.
:param owner: The owner of this MetricDataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this MetricDataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this MetricDataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this MetricDataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this MetricDataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this MetricDataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this MetricDataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this MetricDataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this MetricDataset.
AppClinetId of the modifier app of the dataset.
:param appclientidmodifiedby: The appclientidmodifiedby of this MetricDataset.
:type: str
"""
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
@property
def description(self) -> "str":
""" Gets the description of this MetricDataset.
Detailed description of the dataset.
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this MetricDataset.
Detailed description of the dataset.
:param description: The description of this MetricDataset.
:type: str
"""
self._attrs["description"] = description
@property
def disabled(self) -> "bool":
""" Gets the disabled of this MetricDataset.
Specifies whether or not the Splunk index is disabled.
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this MetricDataset.
Specifies whether or not the Splunk index is disabled.
:param disabled: The disabled of this MetricDataset.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def earliest_event_time(self) -> "str":
""" Gets the earliest_event_time of this MetricDataset.
The timestamp, in seconds, of the earliest measure. The timestamp is in UNIX time.
"""
return self._attrs.get("earliestEventTime")
@earliest_event_time.setter
def earliest_event_time(self, earliest_event_time: "str"):
"""Sets the earliest_event_time of this MetricDataset.
The timestamp, in seconds, of the earliest measure. The timestamp is in UNIX time.
:param earliest_event_time: The earliest_event_time of this MetricDataset.
:type: str
"""
self._attrs["earliestEventTime"] = earliest_event_time
@property
def earliest_ingest_time(self) -> "str":
""" Gets the earliest_ingest_time of this MetricDataset.
The earliest index time for any of the measures in this index.
"""
return self._attrs.get("earliestIngestTime")
@earliest_ingest_time.setter
def earliest_ingest_time(self, earliest_ingest_time: "str"):
"""Sets the earliest_ingest_time of this MetricDataset.
The earliest index time for any of the measures in this index.
:param earliest_ingest_time: The earliest_ingest_time of this MetricDataset.
:type: str
"""
self._attrs["earliestIngestTime"] = earliest_ingest_time
@property
def frozen_time_period_in_secs(self) -> "int":
""" Gets the frozen_time_period_in_secs of this MetricDataset.
The frozenTimePeriodInSecs to use for the index
"""
return self._attrs.get("frozenTimePeriodInSecs")
@frozen_time_period_in_secs.setter
def frozen_time_period_in_secs(self, frozen_time_period_in_secs: "int"):
"""Sets the frozen_time_period_in_secs of this MetricDataset.
The frozenTimePeriodInSecs to use for the index
:param frozen_time_period_in_secs: The frozen_time_period_in_secs of this MetricDataset.
:type: int
"""
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
@property
def kind(self) -> str:
return "metric"
@property
def latest_event_time(self) -> "str":
""" Gets the latest_event_time of this MetricDataset.
The timestamp, in seconds, of the latest measure. The timestamp is in UNIX time.
"""
return self._attrs.get("latestEventTime")
@latest_event_time.setter
def latest_event_time(self, latest_event_time: "str"):
"""Sets the latest_event_time of this MetricDataset.
The timestamp, in seconds, of the latest measure. The timestamp is in UNIX time.
:param latest_event_time: The latest_event_time of this MetricDataset.
:type: str
"""
self._attrs["latestEventTime"] = latest_event_time
@property
def latest_ingest_time(self) -> "str":
""" Gets the latest_ingest_time of this MetricDataset.
The earliest index time for any of the measures in this index.
"""
return self._attrs.get("latestIngestTime")
@latest_ingest_time.setter
def latest_ingest_time(self, latest_ingest_time: "str"):
"""Sets the latest_ingest_time of this MetricDataset.
The earliest index time for any of the measures in this index.
:param latest_ingest_time: The latest_ingest_time of this MetricDataset.
:type: str
"""
self._attrs["latestIngestTime"] = latest_ingest_time
@property
def latest_metadata_update_time(self) -> "str":
""" Gets the latest_metadata_update_time of this MetricDataset.
The latest time that the metric index metadata was refreshed.
"""
return self._attrs.get("latestMetadataUpdateTime")
@latest_metadata_update_time.setter
def latest_metadata_update_time(self, latest_metadata_update_time: "str"):
"""Sets the latest_metadata_update_time of this MetricDataset.
The latest time that the metric index metadata was refreshed.
:param latest_metadata_update_time: The latest_metadata_update_time of this MetricDataset.
:type: str
"""
self._attrs["latestMetadataUpdateTime"] = latest_metadata_update_time
@property
def namespace(self) -> "str":
""" Gets the namespace of this MetricDataset.
The name of the namespace that contains the dataset.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this MetricDataset.
The name of the namespace that contains the dataset.
:param namespace: The namespace of this MetricDataset.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def summary(self) -> "str":
""" Gets the summary of this MetricDataset.
Summary of the dataset's purpose.
"""
return self._attrs.get("summary")
@summary.setter
def summary(self, summary: "str"):
"""Sets the summary of this MetricDataset.
Summary of the dataset's purpose.
:param summary: The summary of this MetricDataset.
:type: str
"""
self._attrs["summary"] = summary
@property
def title(self) -> "str":
""" Gets the title of this MetricDataset.
The title of the dataset. Does not have to be unique.
"""
return self._attrs.get("title")
@title.setter
def title(self, title: "str"):
"""Sets the title of this MetricDataset.
The title of the dataset. Does not have to be unique.
:param title: The title of this MetricDataset.
:type: str
"""
self._attrs["title"] = title
@property
def total_event_count(self) -> "int":
""" Gets the total_event_count of this MetricDataset.
THe number of measures in the metric index.
"""
return self._attrs.get("totalEventCount")
@total_event_count.setter
def total_event_count(self, total_event_count: "int"):
"""Sets the total_event_count of this MetricDataset.
THe number of measures in the metric index.
:param total_event_count: The total_event_count of this MetricDataset.
:type: int
"""
self._attrs["totalEventCount"] = total_event_count
@property
def total_size(self) -> "int":
""" Gets the total_size of this MetricDataset.
For metrics indexes, the totalSize is set to 0.
"""
return self._attrs.get("totalSize")
@total_size.setter
def total_size(self, total_size: "int"):
"""Sets the total_size of this MetricDataset.
For metrics indexes, the totalSize is set to 0.
:param total_size: The total_size of this MetricDataset.
:type: int
"""
self._attrs["totalSize"] = total_size
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
Dataset.from_dict_handlers["metric"] = MetricDataset._from_dict
class MetricDatasetKind(str, Enum):
METRIC = "metric"
@staticmethod
def from_value(value: str):
if value == "metric":
return MetricDatasetKind.METRIC
class MetricDatasetPATCH(DatasetPATCH):
@staticmethod
def _from_dict(model: dict) -> "MetricDatasetPATCH":
instance = MetricDatasetPATCH.__new__(MetricDatasetPATCH)
instance._attrs = model
return instance
def __init__(self, disabled: "bool" = None, frozen_time_period_in_secs: "int" = None, kind: "MetricDatasetKind" = None, module: "str" = None, name: "str" = None, owner: "str" = None, **extra):
"""MetricDatasetPATCH"""
self._attrs = dict()
if disabled is not None:
self._attrs["disabled"] = disabled
if frozen_time_period_in_secs is not None:
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
if kind is not None:
self._attrs["kind"] = kind
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
for k, v in extra.items():
self._attrs[k] = v
@property
def disabled(self) -> "bool":
""" Gets the disabled of this MetricDatasetPATCH.
Specifies whether or not the Splunk index is disabled.
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this MetricDatasetPATCH.
Specifies whether or not the Splunk index is disabled.
:param disabled: The disabled of this MetricDatasetPATCH.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def frozen_time_period_in_secs(self) -> "int":
""" Gets the frozen_time_period_in_secs of this MetricDatasetPATCH.
The frozenTimePeriodInSecs to use for the index
"""
return self._attrs.get("frozenTimePeriodInSecs")
@frozen_time_period_in_secs.setter
def frozen_time_period_in_secs(self, frozen_time_period_in_secs: "int"):
"""Sets the frozen_time_period_in_secs of this MetricDatasetPATCH.
The frozenTimePeriodInSecs to use for the index
:param frozen_time_period_in_secs: The frozen_time_period_in_secs of this MetricDatasetPATCH.
:type: int
"""
self._attrs["frozenTimePeriodInSecs"] = frozen_time_period_in_secs
@property
def kind(self) -> "MetricDatasetKind":
""" Gets the kind of this MetricDatasetPATCH.
"""
return MetricDatasetKind.from_value(self._attrs.get("kind"))
@kind.setter
def kind(self, kind: "MetricDatasetKind"):
"""Sets the kind of this MetricDatasetPATCH.
:param kind: The kind of this MetricDatasetPATCH.
:type: MetricDatasetKind
"""
if isinstance(kind, Enum):
self._attrs["kind"] = kind.value
else:
self._attrs["kind"] = kind # If you supply a string, we presume you know the service will take it.
@property
def module(self) -> "str":
""" Gets the module of this MetricDatasetPATCH.
The name of module to reassign dataset into.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this MetricDatasetPATCH.
The name of module to reassign dataset into.
:param module: The module of this MetricDatasetPATCH.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this MetricDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this MetricDatasetPATCH.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this MetricDatasetPATCH.
:type: str
"""
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this MetricDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this MetricDatasetPATCH.
The name of the dataset owner. This value is obtained from the bearer token.
:param owner: The owner of this MetricDatasetPATCH.
:type: str
"""
self._attrs["owner"] = owner
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SearchJob(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "SearchJob":
instance = SearchJob.__new__(SearchJob)
instance._attrs = model
return instance
def __init__(self, query: "str", allow_side_effects: "bool" = False, collect_event_summary: "bool" = False, collect_field_summary: "bool" = False, collect_time_buckets: "bool" = False, completion_time: "str" = None, dispatch_time: "str" = None, enable_preview: "bool" = False, extract_all_fields: "bool" = False, extract_fields: "str" = '', max_time: "int" = 3600, messages: "List[Message]" = None, module: "str" = '', name: "str" = None, parent: "str" = None, percent_complete: "int" = 0, preview_available: "str" = 'false', query_parameters: "QueryParameters" = None, required_freshness: "int" = 0, resolved_earliest: "str" = None, resolved_latest: "str" = None, results_available: "int" = 0, results_preview_available: "int" = 0, sid: "str" = None, status: "SearchStatus" = None, **extra):
"""SearchJob"""
self._attrs = dict()
if query is not None:
self._attrs["query"] = query
if allow_side_effects is not None:
self._attrs["allowSideEffects"] = allow_side_effects
if collect_event_summary is not None:
self._attrs["collectEventSummary"] = collect_event_summary
if collect_field_summary is not None:
self._attrs["collectFieldSummary"] = collect_field_summary
if collect_time_buckets is not None:
self._attrs["collectTimeBuckets"] = collect_time_buckets
if completion_time is not None:
self._attrs["completionTime"] = completion_time
if dispatch_time is not None:
self._attrs["dispatchTime"] = dispatch_time
if enable_preview is not None:
self._attrs["enablePreview"] = enable_preview
if extract_all_fields is not None:
self._attrs["extractAllFields"] = extract_all_fields
if extract_fields is not None:
self._attrs["extractFields"] = extract_fields
if max_time is not None:
self._attrs["maxTime"] = max_time
if messages is not None:
self._attrs["messages"] = messages
if module is not None:
self._attrs["module"] = module
if name is not None:
self._attrs["name"] = name
if parent is not None:
self._attrs["parent"] = parent
if percent_complete is not None:
self._attrs["percentComplete"] = percent_complete
if preview_available is not None:
self._attrs["previewAvailable"] = preview_available
if query_parameters is not None:
self._attrs["queryParameters"] = query_parameters.to_dict()
if required_freshness is not None:
self._attrs["requiredFreshness"] = required_freshness
if resolved_earliest is not None:
self._attrs["resolvedEarliest"] = resolved_earliest
if resolved_latest is not None:
self._attrs["resolvedLatest"] = resolved_latest
if results_available is not None:
self._attrs["resultsAvailable"] = results_available
if results_preview_available is not None:
self._attrs["resultsPreviewAvailable"] = results_preview_available
if sid is not None:
self._attrs["sid"] = sid
if status is not None:
self._attrs["status"] = status
for k, v in extra.items():
self._attrs[k] = v
@property
def query(self) -> "str":
""" Gets the query of this SearchJob.
The SPL search string.
"""
return self._attrs.get("query")
@query.setter
def query(self, query: "str"):
"""Sets the query of this SearchJob.
The SPL search string.
:param query: The query of this SearchJob.
:type: str
"""
if query is None:
raise ValueError("Invalid value for `query`, must not be `None`")
self._attrs["query"] = query
@property
def allow_side_effects(self) -> "bool":
""" Gets the allow_side_effects of this SearchJob.
Specifies whether a search that contains commands with side effects (with possible security risks) is allowed to run.
"""
return self._attrs.get("allowSideEffects")
@allow_side_effects.setter
def allow_side_effects(self, allow_side_effects: "bool"):
"""Sets the allow_side_effects of this SearchJob.
Specifies whether a search that contains commands with side effects (with possible security risks) is allowed to run.
:param allow_side_effects: The allow_side_effects of this SearchJob.
:type: bool
"""
self._attrs["allowSideEffects"] = allow_side_effects
@property
def collect_event_summary(self) -> "bool":
""" Gets the collect_event_summary of this SearchJob.
Specifies whether a search is allowed to collect events summary information during the run time.
"""
return self._attrs.get("collectEventSummary")
@collect_event_summary.setter
def collect_event_summary(self, collect_event_summary: "bool"):
"""Sets the collect_event_summary of this SearchJob.
Specifies whether a search is allowed to collect events summary information during the run time.
:param collect_event_summary: The collect_event_summary of this SearchJob.
:type: bool
"""
self._attrs["collectEventSummary"] = collect_event_summary
@property
def collect_field_summary(self) -> "bool":
""" Gets the collect_field_summary of this SearchJob.
Specifies whether a search is allowed to collect fields summary information during the run time.
"""
return self._attrs.get("collectFieldSummary")
@collect_field_summary.setter
def collect_field_summary(self, collect_field_summary: "bool"):
"""Sets the collect_field_summary of this SearchJob.
Specifies whether a search is allowed to collect fields summary information during the run time.
:param collect_field_summary: The collect_field_summary of this SearchJob.
:type: bool
"""
self._attrs["collectFieldSummary"] = collect_field_summary
@property
def collect_time_buckets(self) -> "bool":
""" Gets the collect_time_buckets of this SearchJob.
Specifies whether a search is allowed to collect timeline buckets summary information during the run time.
"""
return self._attrs.get("collectTimeBuckets")
@collect_time_buckets.setter
def collect_time_buckets(self, collect_time_buckets: "bool"):
"""Sets the collect_time_buckets of this SearchJob.
Specifies whether a search is allowed to collect timeline buckets summary information during the run time.
:param collect_time_buckets: The collect_time_buckets of this SearchJob.
:type: bool
"""
self._attrs["collectTimeBuckets"] = collect_time_buckets
@property
def completion_time(self) -> "str":
""" Gets the completion_time of this SearchJob.
The time, in GMT, that the search job is finished. Empty if the search job has not completed.
"""
return self._attrs.get("completionTime")
@completion_time.setter
def completion_time(self, completion_time: "str"):
"""Sets the completion_time of this SearchJob.
The time, in GMT, that the search job is finished. Empty if the search job has not completed.
:param completion_time: The completion_time of this SearchJob.
:type: str
"""
self._attrs["completionTime"] = completion_time
@property
def dispatch_time(self) -> "str":
""" Gets the dispatch_time of this SearchJob.
The time, in GMT, that the search job is dispatched.
"""
return self._attrs.get("dispatchTime")
@dispatch_time.setter
def dispatch_time(self, dispatch_time: "str"):
"""Sets the dispatch_time of this SearchJob.
The time, in GMT, that the search job is dispatched.
:param dispatch_time: The dispatch_time of this SearchJob.
:type: str
"""
self._attrs["dispatchTime"] = dispatch_time
@property
def enable_preview(self) -> "bool":
""" Gets the enable_preview of this SearchJob.
Specifies whether a search is allowed to collect preview results during the run time.
"""
return self._attrs.get("enablePreview")
@enable_preview.setter
def enable_preview(self, enable_preview: "bool"):
"""Sets the enable_preview of this SearchJob.
Specifies whether a search is allowed to collect preview results during the run time.
:param enable_preview: The enable_preview of this SearchJob.
:type: bool
"""
self._attrs["enablePreview"] = enable_preview
@property
def extract_all_fields(self) -> "bool":
""" Gets the extract_all_fields of this SearchJob.
Specifies whether the Search service should extract all of the available fields in the data, including fields not mentioned in the SPL for the search job. Set to 'false' for better search performance. The 'extractAllFields' parameter is deprecated as of version v3alpha1. Although this parameter continues to function, it might be removed in a future version. Use the 'extractFields' parameter instead.
"""
return self._attrs.get("extractAllFields")
@extract_all_fields.setter
def extract_all_fields(self, extract_all_fields: "bool"):
"""Sets the extract_all_fields of this SearchJob.
Specifies whether the Search service should extract all of the available fields in the data, including fields not mentioned in the SPL for the search job. Set to 'false' for better search performance. The 'extractAllFields' parameter is deprecated as of version v3alpha1. Although this parameter continues to function, it might be removed in a future version. Use the 'extractFields' parameter instead.
:param extract_all_fields: The extract_all_fields of this SearchJob.
:type: bool
"""
self._attrs["extractAllFields"] = extract_all_fields
@property
def extract_fields(self) -> "str":
""" Gets the extract_fields of this SearchJob.
Specifies how the Search service should extract fields. Valid values include 'all', 'none', or 'indexed'. 'all' will extract all fields, 'indexed' will extract only indexed fields, and 'none' will extract only the default fields. This parameter overwrites the value of the 'extractAllFields' parameter. Set to 'none' for better search performance.
"""
return self._attrs.get("extractFields")
@extract_fields.setter
def extract_fields(self, extract_fields: "str"):
"""Sets the extract_fields of this SearchJob.
Specifies how the Search service should extract fields. Valid values include 'all', 'none', or 'indexed'. 'all' will extract all fields, 'indexed' will extract only indexed fields, and 'none' will extract only the default fields. This parameter overwrites the value of the 'extractAllFields' parameter. Set to 'none' for better search performance.
:param extract_fields: The extract_fields of this SearchJob.
:type: str
"""
self._attrs["extractFields"] = extract_fields
@property
def max_time(self) -> "int":
""" Gets the max_time of this SearchJob.
The number of seconds to run the search before finalizing the search. The default value is 3600 seconds (1 hour). The maximum value is 3600 seconds (1 hour).
"""
return self._attrs.get("maxTime")
@max_time.setter
def max_time(self, max_time: "int"):
"""Sets the max_time of this SearchJob.
The number of seconds to run the search before finalizing the search. The default value is 3600 seconds (1 hour). The maximum value is 3600 seconds (1 hour).
:param max_time: The max_time of this SearchJob.
:type: int
"""
self._attrs["maxTime"] = max_time
@property
def messages(self) -> "List[Message]":
""" Gets the messages of this SearchJob.
"""
return [Message._from_dict(i) for i in self._attrs.get("messages")]
@messages.setter
def messages(self, messages: "List[Message]"):
"""Sets the messages of this SearchJob.
:param messages: The messages of this SearchJob.
:type: List[Message]
"""
self._attrs["messages"] = messages
@property
def module(self) -> "str":
""" Gets the module of this SearchJob.
The module to run the search in. The default module is used if a module is not specified.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this SearchJob.
The module to run the search in. The default module is used if a module is not specified.
:param module: The module of this SearchJob.
:type: str
"""
self._attrs["module"] = module
@property
def name(self) -> "str":
""" Gets the name of this SearchJob.
The name of the created search job.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this SearchJob.
The name of the created search job.
:param name: The name of this SearchJob.
:type: str
"""
self._attrs["name"] = name
@property
def parent(self) -> "str":
""" Gets the parent of this SearchJob.
The 'rsid' of an associated recurring-search, if this search job is dispatched by a recurring-search.
"""
return self._attrs.get("parent")
@parent.setter
def parent(self, parent: "str"):
"""Sets the parent of this SearchJob.
The 'rsid' of an associated recurring-search, if this search job is dispatched by a recurring-search.
:param parent: The parent of this SearchJob.
:type: str
"""
self._attrs["parent"] = parent
@property
def percent_complete(self) -> "int":
""" Gets the percent_complete of this SearchJob.
An estimate of the percent of time remaining before the job completes.
"""
return self._attrs.get("percentComplete")
@percent_complete.setter
def percent_complete(self, percent_complete: "int"):
"""Sets the percent_complete of this SearchJob.
An estimate of the percent of time remaining before the job completes.
:param percent_complete: The percent_complete of this SearchJob.
:type: int
"""
self._attrs["percentComplete"] = percent_complete
@property
def preview_available(self) -> "str":
""" Gets the preview_available of this SearchJob.
Specifies if preview results are available for the search job. The valid status values are 'unknown', 'true', and 'false'.
"""
return self._attrs.get("previewAvailable")
@preview_available.setter
def preview_available(self, preview_available: "str"):
"""Sets the preview_available of this SearchJob.
Specifies if preview results are available for the search job. The valid status values are 'unknown', 'true', and 'false'.
:param preview_available: The preview_available of this SearchJob.
:type: str
"""
self._attrs["previewAvailable"] = preview_available
@property
def query_parameters(self) -> "QueryParameters":
""" Gets the query_parameters of this SearchJob.
Represents parameters on the search job such as 'earliest' and 'latest'.
"""
return QueryParameters._from_dict(self._attrs["queryParameters"])
@query_parameters.setter
def query_parameters(self, query_parameters: "QueryParameters"):
"""Sets the query_parameters of this SearchJob.
Represents parameters on the search job such as 'earliest' and 'latest'.
:param query_parameters: The query_parameters of this SearchJob.
:type: QueryParameters
"""
self._attrs["queryParameters"] = query_parameters.to_dict()
@property
def required_freshness(self) -> "int":
""" Gets the required_freshness of this SearchJob.
Specifies a maximum time interval, in seconds, between identical existing searches. The 'requiredFreshness' parameter is used to determine if an existing search with the same query and the same time boundaries can be reused, instead of running the same search again. Freshness is applied to the resolvedEarliest and resolvedLatest parameters. If an existing search has the same exact criteria as this search and the resolvedEarliest and resolvedLatest values are within the freshness interval, the existing search metadata is returned instead of initiating a new search job. By default, the requiredFreshness parameter is set to 0 which means that the platform does not attempt to use an existing search.
"""
return self._attrs.get("requiredFreshness")
@required_freshness.setter
def required_freshness(self, required_freshness: "int"):
"""Sets the required_freshness of this SearchJob.
Specifies a maximum time interval, in seconds, between identical existing searches. The 'requiredFreshness' parameter is used to determine if an existing search with the same query and the same time boundaries can be reused, instead of running the same search again. Freshness is applied to the resolvedEarliest and resolvedLatest parameters. If an existing search has the same exact criteria as this search and the resolvedEarliest and resolvedLatest values are within the freshness interval, the existing search metadata is returned instead of initiating a new search job. By default, the requiredFreshness parameter is set to 0 which means that the platform does not attempt to use an existing search.
:param required_freshness: The required_freshness of this SearchJob.
:type: int
"""
self._attrs["requiredFreshness"] = required_freshness
@property
def resolved_earliest(self) -> "str":
""" Gets the resolved_earliest of this SearchJob.
The earliest time speciifed as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
"""
return self._attrs.get("resolvedEarliest")
@resolved_earliest.setter
def resolved_earliest(self, resolved_earliest: "str"):
"""Sets the resolved_earliest of this SearchJob.
The earliest time speciifed as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
:param resolved_earliest: The resolved_earliest of this SearchJob.
:type: str
"""
self._attrs["resolvedEarliest"] = resolved_earliest
@property
def resolved_latest(self) -> "str":
""" Gets the resolved_latest of this SearchJob.
The latest time specified as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
"""
return self._attrs.get("resolvedLatest")
@resolved_latest.setter
def resolved_latest(self, resolved_latest: "str"):
"""Sets the resolved_latest of this SearchJob.
The latest time specified as an absolute value in GMT. The time is computed based on the values you specify for the 'timezone' and 'earliest' queryParameters.
:param resolved_latest: The resolved_latest of this SearchJob.
:type: str
"""
self._attrs["resolvedLatest"] = resolved_latest
@property
def results_available(self) -> "int":
""" Gets the results_available of this SearchJob.
The number of results produced so far for the search job.
"""
return self._attrs.get("resultsAvailable")
@results_available.setter
def results_available(self, results_available: "int"):
"""Sets the results_available of this SearchJob.
The number of results produced so far for the search job.
:param results_available: The results_available of this SearchJob.
:type: int
"""
self._attrs["resultsAvailable"] = results_available
@property
def results_preview_available(self) -> "int":
""" Gets the results_preview_available of this SearchJob.
The number of the preview search results for the job with the specified search ID (sid).
"""
return self._attrs.get("resultsPreviewAvailable")
@results_preview_available.setter
def results_preview_available(self, results_preview_available: "int"):
"""Sets the results_preview_available of this SearchJob.
The number of the preview search results for the job with the specified search ID (sid).
:param results_preview_available: The results_preview_available of this SearchJob.
:type: int
"""
self._attrs["resultsPreviewAvailable"] = results_preview_available
@property
def sid(self) -> "str":
""" Gets the sid of this SearchJob.
The ID assigned to the search job.
"""
return self._attrs.get("sid")
@sid.setter
def sid(self, sid: "str"):
"""Sets the sid of this SearchJob.
The ID assigned to the search job.
:param sid: The sid of this SearchJob.
:type: str
"""
self._attrs["sid"] = sid
@property
def status(self) -> "SearchStatus":
""" Gets the status of this SearchJob.
"""
return SearchStatus.from_value(self._attrs.get("status"))
@status.setter
def status(self, status: "SearchStatus"):
"""Sets the status of this SearchJob.
:param status: The status of this SearchJob.
:type: SearchStatus
"""
if isinstance(status, Enum):
self._attrs["status"] = status.value
else:
self._attrs["status"] = status # If you supply a string, we presume you know the service will take it.
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SingleStatementQueryParameters(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "SingleStatementQueryParameters":
instance = SingleStatementQueryParameters.__new__(SingleStatementQueryParameters)
instance._attrs = model
return instance
def __init__(self, allow_side_effects: "bool" = False, collect_field_summary: "bool" = False, collect_time_buckets: "bool" = False, earliest: "str" = '-24h@h', enable_preview: "bool" = False, extract_fields: "str" = 'indexed', latest: "str" = 'now', max_time: "int" = 3600, relative_time_anchor: "datetime" = None, sid: "str" = '', timezone: "object" = None, **extra):
"""SingleStatementQueryParameters"""
self._attrs = dict()
if allow_side_effects is not None:
self._attrs["allowSideEffects"] = allow_side_effects
if collect_field_summary is not None:
self._attrs["collectFieldSummary"] = collect_field_summary
if collect_time_buckets is not None:
self._attrs["collectTimeBuckets"] = collect_time_buckets
if earliest is not None:
self._attrs["earliest"] = earliest
if enable_preview is not None:
self._attrs["enablePreview"] = enable_preview
if extract_fields is not None:
self._attrs["extractFields"] = extract_fields
if latest is not None:
self._attrs["latest"] = latest
if max_time is not None:
self._attrs["maxTime"] = max_time
if relative_time_anchor is not None:
self._attrs["relativeTimeAnchor"] = relative_time_anchor
if sid is not None:
self._attrs["sid"] = sid
if timezone is not None:
self._attrs["timezone"] = timezone
for k, v in extra.items():
self._attrs[k] = v
@property
def allow_side_effects(self) -> "bool":
""" Gets the allow_side_effects of this SingleStatementQueryParameters.
Specifies whether a search that contains commands with side effects (with possible security risks) is allowed to run. The search contains commands, thru or into, that have side effects.
"""
return self._attrs.get("allowSideEffects")
@allow_side_effects.setter
def allow_side_effects(self, allow_side_effects: "bool"):
"""Sets the allow_side_effects of this SingleStatementQueryParameters.
Specifies whether a search that contains commands with side effects (with possible security risks) is allowed to run. The search contains commands, thru or into, that have side effects.
:param allow_side_effects: The allow_side_effects of this SingleStatementQueryParameters.
:type: bool
"""
self._attrs["allowSideEffects"] = allow_side_effects
@property
def collect_field_summary(self) -> "bool":
""" Gets the collect_field_summary of this SingleStatementQueryParameters.
Specifies whether a search is allowed to collect the Fields summary during the run time.
"""
return self._attrs.get("collectFieldSummary")
@collect_field_summary.setter
def collect_field_summary(self, collect_field_summary: "bool"):
"""Sets the collect_field_summary of this SingleStatementQueryParameters.
Specifies whether a search is allowed to collect the Fields summary during the run time.
:param collect_field_summary: The collect_field_summary of this SingleStatementQueryParameters.
:type: bool
"""
self._attrs["collectFieldSummary"] = collect_field_summary
@property
def collect_time_buckets(self) -> "bool":
""" Gets the collect_time_buckets of this SingleStatementQueryParameters.
Specifies whether a search is allowed to collect the Timeline Buckets summary during the run time.
"""
return self._attrs.get("collectTimeBuckets")
@collect_time_buckets.setter
def collect_time_buckets(self, collect_time_buckets: "bool"):
"""Sets the collect_time_buckets of this SingleStatementQueryParameters.
Specifies whether a search is allowed to collect the Timeline Buckets summary during the run time.
:param collect_time_buckets: The collect_time_buckets of this SingleStatementQueryParameters.
:type: bool
"""
self._attrs["collectTimeBuckets"] = collect_time_buckets
@property
def earliest(self) -> "str":
""" Gets the earliest of this SingleStatementQueryParameters.
The earliest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2020-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
"""
return self._attrs.get("earliest")
@earliest.setter
def earliest(self, earliest: "str"):
"""Sets the earliest of this SingleStatementQueryParameters.
The earliest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2020-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
:param earliest: The earliest of this SingleStatementQueryParameters.
:type: str
"""
self._attrs["earliest"] = earliest
@property
def enable_preview(self) -> "bool":
""" Gets the enable_preview of this SingleStatementQueryParameters.
Specifies whether a search is allowed to collect the preview results during the run time.
"""
return self._attrs.get("enablePreview")
@enable_preview.setter
def enable_preview(self, enable_preview: "bool"):
"""Sets the enable_preview of this SingleStatementQueryParameters.
Specifies whether a search is allowed to collect the preview results during the run time.
:param enable_preview: The enable_preview of this SingleStatementQueryParameters.
:type: bool
"""
self._attrs["enablePreview"] = enable_preview
@property
def extract_fields(self) -> "str":
""" Gets the extract_fields of this SingleStatementQueryParameters.
Specifies how the Search service should extract fields. Valid values include 'all', 'none', or 'indexed'. 'all' extracts all fields, 'indexed' extracts only indexed fields, and 'none' extracts only the default fields.
"""
return self._attrs.get("extractFields")
@extract_fields.setter
def extract_fields(self, extract_fields: "str"):
"""Sets the extract_fields of this SingleStatementQueryParameters.
Specifies how the Search service should extract fields. Valid values include 'all', 'none', or 'indexed'. 'all' extracts all fields, 'indexed' extracts only indexed fields, and 'none' extracts only the default fields.
:param extract_fields: The extract_fields of this SingleStatementQueryParameters.
:type: str
"""
self._attrs["extractFields"] = extract_fields
@property
def latest(self) -> "str":
""" Gets the latest of this SingleStatementQueryParameters.
The latest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2020-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
"""
return self._attrs.get("latest")
@latest.setter
def latest(self, latest: "str"):
"""Sets the latest of this SingleStatementQueryParameters.
The latest time, in absolute or relative format, to retrieve events. When specifying an absolute time specify either UNIX time, or UTC in seconds using the ISO-8601 (%FT%T.%Q) format. For example 2020-01-25T13:15:30Z. GMT is the default timezone. You must specify GMT when you specify UTC. Any offset specified is ignored.
:param latest: The latest of this SingleStatementQueryParameters.
:type: str
"""
self._attrs["latest"] = latest
@property
def max_time(self) -> "int":
""" Gets the max_time of this SingleStatementQueryParameters.
The number of seconds to run the search before finalizing the search. The maximum value is 3600 seconds (1 hour).
"""
return self._attrs.get("maxTime")
@max_time.setter
def max_time(self, max_time: "int"):
"""Sets the max_time of this SingleStatementQueryParameters.
The number of seconds to run the search before finalizing the search. The maximum value is 3600 seconds (1 hour).
:param max_time: The max_time of this SingleStatementQueryParameters.
:type: int
"""
self._attrs["maxTime"] = max_time
@property
def relative_time_anchor(self) -> "datetime":
""" Gets the relative_time_anchor of this SingleStatementQueryParameters.
Relative values for the 'earliest' and 'latest' parameters snap to the unit that you specify. For example, if 'earliest' is set to -d@d, the unit is day. If the 'relativeTimeAnchor' is is set to '2020-10-05T13:15:30Z' then 'resolvedEarliest' is snapped to '2020-10-05T00:00:00Z', which is the day. Hours, minutes, and seconds are dropped. If no 'relativeTimeAnchor' is specified, the default value is set to the time the search job was created.
"""
return self._attrs.get("relativeTimeAnchor")
@relative_time_anchor.setter
def relative_time_anchor(self, relative_time_anchor: "datetime"):
"""Sets the relative_time_anchor of this SingleStatementQueryParameters.
Relative values for the 'earliest' and 'latest' parameters snap to the unit that you specify. For example, if 'earliest' is set to -d@d, the unit is day. If the 'relativeTimeAnchor' is is set to '2020-10-05T13:15:30Z' then 'resolvedEarliest' is snapped to '2020-10-05T00:00:00Z', which is the day. Hours, minutes, and seconds are dropped. If no 'relativeTimeAnchor' is specified, the default value is set to the time the search job was created.
:param relative_time_anchor: The relative_time_anchor of this SingleStatementQueryParameters.
:type: datetime
"""
self._attrs["relativeTimeAnchor"] = relative_time_anchor
@property
def sid(self) -> "str":
""" Gets the sid of this SingleStatementQueryParameters.
Reuse the results from the previous search ID (sid) for the statement. For customized default queryParameters, the sid is ignored.
"""
return self._attrs.get("sid")
@sid.setter
def sid(self, sid: "str"):
"""Sets the sid of this SingleStatementQueryParameters.
Reuse the results from the previous search ID (sid) for the statement. For customized default queryParameters, the sid is ignored.
:param sid: The sid of this SingleStatementQueryParameters.
:type: str
"""
self._attrs["sid"] = sid
@property
def timezone(self) -> "object":
""" Gets the timezone of this SingleStatementQueryParameters.
The timezone that relative time specifiers are based off of. Timezone only applies to relative time literals for 'earliest' and 'latest'. If UNIX time or UTC format is used for 'earliest' and 'latest', this field is ignored. For the list of supported timezone formats, see https://docs.splunk.com/Documentation/Splunk/latest/Data/Applytimezoneoffsetstotimestamps#zoneinfo_.28TZ.29_database type: string default: \"GMT\"
"""
return self._attrs.get("timezone")
@timezone.setter
def timezone(self, timezone: "object"):
"""Sets the timezone of this SingleStatementQueryParameters.
The timezone that relative time specifiers are based off of. Timezone only applies to relative time literals for 'earliest' and 'latest'. If UNIX time or UTC format is used for 'earliest' and 'latest', this field is ignored. For the list of supported timezone formats, see https://docs.splunk.com/Documentation/Splunk/latest/Data/Applytimezoneoffsetstotimestamps#zoneinfo_.28TZ.29_database type: string default: \"GMT\"
:param timezone: The timezone of this SingleStatementQueryParameters.
:type: object
"""
self._attrs["timezone"] = timezone
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SearchModule(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "SearchModule":
instance = SearchModule.__new__(SearchModule)
instance._attrs = model
return instance
def __init__(self, module: "str" = None, namespace: "str" = '', query_parameters: "Dict[str, SingleStatementQueryParameters]" = None, wip_modules: "Dict[str, Module]" = None, **extra):
"""SearchModule"""
self._attrs = dict()
if module is not None:
self._attrs["module"] = module
if namespace is not None:
self._attrs["namespace"] = namespace
if query_parameters is not None:
self._attrs["queryParameters"] = query_parameters
if wip_modules is not None:
self._attrs["wipModules"] = wip_modules
for k, v in extra.items():
self._attrs[k] = v
@property
def module(self) -> "str":
""" Gets the module of this SearchModule.
Multi-statement module with inter-dependencies between statements. Statements are separated by semicolons.
"""
return self._attrs.get("module")
@module.setter
def module(self, module: "str"):
"""Sets the module of this SearchModule.
Multi-statement module with inter-dependencies between statements. Statements are separated by semicolons.
:param module: The module of this SearchModule.
:type: str
"""
self._attrs["module"] = module
@property
def namespace(self) -> "str":
""" Gets the namespace of this SearchModule.
The namespace to run the search in. The default namespace is used if a namespace is not specified.
"""
return self._attrs.get("namespace")
@namespace.setter
def namespace(self, namespace: "str"):
"""Sets the namespace of this SearchModule.
The namespace to run the search in. The default namespace is used if a namespace is not specified.
:param namespace: The namespace of this SearchModule.
:type: str
"""
self._attrs["namespace"] = namespace
@property
def query_parameters(self) -> "Dict[str, SingleStatementQueryParameters]":
""" Gets the query_parameters of this SearchModule.
The parameters on the search statement, such as 'earliest' and 'latest. The request can specify a \"defaults\" set of statement queryParameters which override the system default queryParameters. Each export statement requires to have a statement queryParameters in the object, it can be empty if there is no override.
"""
return self._attrs.get("queryParameters")
@query_parameters.setter
def query_parameters(self, query_parameters: "Dict[str, SingleStatementQueryParameters]"):
"""Sets the query_parameters of this SearchModule.
The parameters on the search statement, such as 'earliest' and 'latest. The request can specify a \"defaults\" set of statement queryParameters which override the system default queryParameters. Each export statement requires to have a statement queryParameters in the object, it can be empty if there is no override.
:param query_parameters: The query_parameters of this SearchModule.
:type: Dict[str, SingleStatementQueryParameters]
"""
self._attrs["queryParameters"] = query_parameters
@property
def wip_modules(self) -> "Dict[str, Module]":
""" Gets the wip_modules of this SearchModule.
WIP (Work in progress) modules which are used in the module's search statements, but not yet registered .
"""
return self._attrs.get("wipModules")
@wip_modules.setter
def wip_modules(self, wip_modules: "Dict[str, Module]"):
"""Sets the wip_modules of this SearchModule.
WIP (Work in progress) modules which are used in the module's search statements, but not yet registered .
:param wip_modules: The wip_modules of this SearchModule.
:type: Dict[str, Module]
"""
self._attrs["wipModules"] = wip_modules
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class SingleTimeBucket(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "SingleTimeBucket":
instance = SingleTimeBucket.__new__(SingleTimeBucket)
instance._attrs = model
return instance
def __init__(self, available_count: "int" = None, duration: "float" = None, earliest_time: "float" = None, earliest_time_strf_time: "str" = None, is_finalized: "bool" = None, total_count: "int" = None, **extra):
"""SingleTimeBucket"""
self._attrs = dict()
if available_count is not None:
self._attrs["availableCount"] = available_count
if duration is not None:
self._attrs["duration"] = duration
if earliest_time is not None:
self._attrs["earliestTime"] = earliest_time
if earliest_time_strf_time is not None:
self._attrs["earliestTimeStrfTime"] = earliest_time_strf_time
if is_finalized is not None:
self._attrs["isFinalized"] = is_finalized
if total_count is not None:
self._attrs["totalCount"] = total_count
for k, v in extra.items():
self._attrs[k] = v
@property
def available_count(self) -> "int":
""" Gets the available_count of this SingleTimeBucket.
Count of available events. Not all events in a bucket are retrievable. Typically this count is capped at 10000.
"""
return self._attrs.get("availableCount")
@available_count.setter
def available_count(self, available_count: "int"):
"""Sets the available_count of this SingleTimeBucket.
Count of available events. Not all events in a bucket are retrievable. Typically this count is capped at 10000.
:param available_count: The available_count of this SingleTimeBucket.
:type: int
"""
self._attrs["availableCount"] = available_count
@property
def duration(self) -> "float":
""" Gets the duration of this SingleTimeBucket.
"""
return self._attrs.get("duration")
@duration.setter
def duration(self, duration: "float"):
"""Sets the duration of this SingleTimeBucket.
:param duration: The duration of this SingleTimeBucket.
:type: float
"""
self._attrs["duration"] = duration
@property
def earliest_time(self) -> "float":
""" Gets the earliest_time of this SingleTimeBucket.
The timestamp of the earliest event in the current bucket, in UNIX format. This is the same time as 'earliestTimeStrfTime' in UNIX format.
"""
return self._attrs.get("earliestTime")
@earliest_time.setter
def earliest_time(self, earliest_time: "float"):
"""Sets the earliest_time of this SingleTimeBucket.
The timestamp of the earliest event in the current bucket, in UNIX format. This is the same time as 'earliestTimeStrfTime' in UNIX format.
:param earliest_time: The earliest_time of this SingleTimeBucket.
:type: float
"""
self._attrs["earliestTime"] = earliest_time
@property
def earliest_time_strf_time(self) -> "str":
""" Gets the earliest_time_strf_time of this SingleTimeBucket.
The timestamp of the earliest event in the current bucket, in UTC format with seconds. For example 2019-01-25T13:15:30Z, which follows the ISO-8601 (%FT%T.%Q) format.
"""
return self._attrs.get("earliestTimeStrfTime")
@earliest_time_strf_time.setter
def earliest_time_strf_time(self, earliest_time_strf_time: "str"):
"""Sets the earliest_time_strf_time of this SingleTimeBucket.
The timestamp of the earliest event in the current bucket, in UTC format with seconds. For example 2019-01-25T13:15:30Z, which follows the ISO-8601 (%FT%T.%Q) format.
:param earliest_time_strf_time: The earliest_time_strf_time of this SingleTimeBucket.
:type: str
"""
self._attrs["earliestTimeStrfTime"] = earliest_time_strf_time
@property
def is_finalized(self) -> "bool":
""" Gets the is_finalized of this SingleTimeBucket.
Specifies if all of the events in the current bucket have been finalized.
"""
return self._attrs.get("isFinalized")
@is_finalized.setter
def is_finalized(self, is_finalized: "bool"):
"""Sets the is_finalized of this SingleTimeBucket.
Specifies if all of the events in the current bucket have been finalized.
:param is_finalized: The is_finalized of this SingleTimeBucket.
:type: bool
"""
self._attrs["isFinalized"] = is_finalized
@property
def total_count(self) -> "int":
""" Gets the total_count of this SingleTimeBucket.
The total count of the events in the current bucket.
"""
return self._attrs.get("totalCount")
@total_count.setter
def total_count(self, total_count: "int"):
"""Sets the total_count of this SingleTimeBucket.
The total count of the events in the current bucket.
:param total_count: The total_count of this SingleTimeBucket.
:type: int
"""
self._attrs["totalCount"] = total_count
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class TimeBucketsSummary(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "TimeBucketsSummary":
instance = TimeBucketsSummary.__new__(TimeBucketsSummary)
instance._attrs = model
return instance
def __init__(self, is_time_cursored: "bool" = None, buckets: "List[SingleTimeBucket]" = None, cursor_time: "float" = None, event_count: "int" = None, **extra):
"""TimeBucketsSummary"""
self._attrs = dict()
if is_time_cursored is not None:
self._attrs["IsTimeCursored"] = is_time_cursored
if buckets is not None:
self._attrs["buckets"] = buckets
if cursor_time is not None:
self._attrs["cursorTime"] = cursor_time
if event_count is not None:
self._attrs["eventCount"] = event_count
for k, v in extra.items():
self._attrs[k] = v
@property
def is_time_cursored(self) -> "bool":
""" Gets the is_time_cursored of this TimeBucketsSummary.
Specifies if the events are returned in time order.
"""
return self._attrs.get("IsTimeCursored")
@is_time_cursored.setter
def is_time_cursored(self, is_time_cursored: "bool"):
"""Sets the is_time_cursored of this TimeBucketsSummary.
Specifies if the events are returned in time order.
:param is_time_cursored: The is_time_cursored of this TimeBucketsSummary.
:type: bool
"""
self._attrs["IsTimeCursored"] = is_time_cursored
@property
def buckets(self) -> "List[SingleTimeBucket]":
""" Gets the buckets of this TimeBucketsSummary.
"""
return [SingleTimeBucket._from_dict(i) for i in self._attrs.get("buckets")]
@buckets.setter
def buckets(self, buckets: "List[SingleTimeBucket]"):
"""Sets the buckets of this TimeBucketsSummary.
:param buckets: The buckets of this TimeBucketsSummary.
:type: List[SingleTimeBucket]
"""
self._attrs["buckets"] = buckets
@property
def cursor_time(self) -> "float":
""" Gets the cursor_time of this TimeBucketsSummary.
Identifies where the cursor is, in processing the events. The 'cursorTime' is a timestamp specified in UNIX time.
"""
return self._attrs.get("cursorTime")
@cursor_time.setter
def cursor_time(self, cursor_time: "float"):
"""Sets the cursor_time of this TimeBucketsSummary.
Identifies where the cursor is, in processing the events. The 'cursorTime' is a timestamp specified in UNIX time.
:param cursor_time: The cursor_time of this TimeBucketsSummary.
:type: float
"""
self._attrs["cursorTime"] = cursor_time
@property
def event_count(self) -> "int":
""" Gets the event_count of this TimeBucketsSummary.
The number of events processed at the 'cursorTime'.
"""
return self._attrs.get("eventCount")
@event_count.setter
def event_count(self, event_count: "int"):
"""Sets the event_count of this TimeBucketsSummary.
The number of events processed at the 'cursorTime'.
:param event_count: The event_count of this TimeBucketsSummary.
:type: int
"""
self._attrs["eventCount"] = event_count
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class StatusEnum(str, Enum):
CANCELED = "canceled"
FINALIZED = "finalized"
@staticmethod
def from_value(value: str):
if value == "canceled":
return StatusEnum.CANCELED
if value == "finalized":
return StatusEnum.FINALIZED
class UpdateJob(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "UpdateJob":
instance = UpdateJob.__new__(UpdateJob)
instance._attrs = model
return instance
def __init__(self, status: "str", **extra):
"""UpdateJob"""
self._attrs = dict()
if status is not None:
self._attrs["status"] = status
for k, v in extra.items():
self._attrs[k] = v
@property
def status(self) -> "StatusEnum":
""" Gets the status of this UpdateJob.
The status to PATCH to an existing search job. The only status values you can PATCH are 'canceled' and 'finalized'. You can PATCH the 'canceled' status only to a search job that is running.
"""
return StatusEnum.from_value(self._attrs.get("status"))
@status.setter
def status(self, status: "str"):
"""Sets the status of this UpdateJob.
The status to PATCH to an existing search job. The only status values you can PATCH are 'canceled' and 'finalized'. You can PATCH the 'canceled' status only to a search job that is running.
:param status: The status of this UpdateJob.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
if isinstance(status, Enum):
self._attrs["status"] = status.value
else:
self._attrs["status"] = status # If you supply a string, we presume you know the service will take it.
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
| StarcoderdataPython |
9668416 | <filename>src/commercetools/testing/predicates.py<gh_stars>0
import ast
import logging
import operator
import re
import typing
import marshmallow
logger = logging.getLogger(__name__)
token_pat = re.compile(
r"""
(
(?:\d+\.\d+) | # Floats
(?:\d+) | # Integers
"(?:\\.|[^"\\])*" | # Double quoted strings
'(?:\\.|[^'\\])*' | # Single quoted strings
(?:true|false) # Booleans
)
|
(
(?:[a-zA-Z_][a-zA-Z0-9_-]+) # keywords
)
|
(
(?:[<>!=]+) | # symbols
(?:[^\s]) # symbols
)
""",
re.VERBOSE,
)
class Tokenizer:
def __init__(self, parser):
self.parser = parser
self.combined_tokens: typing.List[typing.List[str]] = []
self._iterator = None
def register_token_combination(self, tokens: typing.List[str]):
self.combined_tokens.append(tokens)
def tokenize(self, program):
self._iterator = self._get_tokens(program)
def __next__(self):
return next(self._iterator)
def _get_tokens(self, program):
buf: typing.List[str] = []
for match in token_pat.finditer(program):
value, identifier, symbol = match.groups()
if identifier:
for combination in self.combined_tokens:
i = len(buf)
if combination[i] == identifier:
buf.append(identifier)
if buf == combination:
identifier = " ".join(buf)
buf.clear()
break
else:
for item in buf:
yield self.get_symbol(item, NameToken)
buf.clear()
if buf:
continue
yield self.get_symbol(identifier, NameToken)
else:
if buf:
for item in buf:
yield self.get_symbol(item, NameToken)
buf.clear()
if value:
yield LiteralToken(self.parser, value)
elif symbol:
yield self.get_symbol(symbol)
yield self.parser.symbol_table["(end)"]
def get_symbol(self, symbol, fallback=None):
s = self.parser.symbol_table.get(symbol)
if s:
return s(self.parser, symbol)
elif fallback:
return NameToken(self.parser, symbol)
else:
raise ValueError("No symbol found for %r", symbol)
class Symbol:
identifier: typing.Optional[str] = None
lbp: typing.Optional[int] = None
def __init__(self, parser, value):
self.value = value
self.parser = parser
self.first = None
self.second = None
def nud(self):
raise SyntaxError(
"Syntax error (%r, token=%r)." % (self.identifier, self.__class__.__name__)
)
def led(self, left):
raise SyntaxError("Unknown operator (%r)." % self.id)
def __repr__(self):
return "Symbol(identifier=%r, value=%r)" % (self.identifier, self.value)
class Parser:
def __init__(self):
self.symbol_table: typing.Dict[str, typing.Type[Symbol]] = {}
self._peek = None
self.tokenizer = Tokenizer(self)
def parse(self, program):
self.tokenizer.tokenize(program)
self.advance()
assert self.token is not None
def next(self):
return next(self.tokenizer)
def expression(self, rbp=0):
t = self.token
self.advance()
left = t.nud()
# t = age, left = ast.Name
# self.token = not
while rbp < self.token.lbp:
t = self.token
self.advance()
left = t.led(left)
return left
def advance(self, identifier=None):
if identifier and self.token.identifier != identifier:
raise SyntaxError(
"Expected %r, received %r" % (identifier, self.token.identifier)
)
if self._peek:
self.token = self._peek
self._peek = None
else:
self.token = self.next()
def peek(self):
if self._peek:
return self._peek
self._peek = self.next()
return self._peek
def define(self, sid, bp=0, symbol_class=Symbol):
symbol_table = self.symbol_table
sym = symbol_table[sid] = type(
symbol_class.__name__, (symbol_class,), {"identifier": sid, "lbp": bp}
)
def wrapper(val):
val.id = sid
val.lbp = sym.lbp
symbol_table[sid] = val
return val
if " " in sid:
self.tokenizer.register_token_combination(sid.split())
return wrapper
class Infix(Symbol):
rightAssoc = False
_operator_map = {"in": ast.In(), ">": ast.Gt(), "<": ast.Lt(), "=": ast.Eq()}
_logical_map = {"and": ast.And(), "or": ast.Or()}
def led(self, left):
self.first = left
rbp = self.lbp - int(self.rightAssoc)
self.second = self.parser.expression(rbp)
return self
def __repr__(self):
return "<'%s'>(%s, %s)" % (self.value, self.first, self.second)
def ast(self, context=None):
lhs = self.first.ast(context)
if self.second:
rhs = self.second.ast(context)
if self.value in self._logical_map:
return ast.BoolOp(op=self._logical_map[self.value], values=[lhs, rhs])
else:
path = list(context.stack)
path.append(self.first.value)
return ast.Call(
func=ast.Name(id="filter_field", ctx=ast.Load()),
args=[
ast.Name(id="obj", ctx=ast.Load()),
ast.List(elts=[ast.Str(s=i) for i in path], ctx=ast.Load()),
ast.Str(s=self.value),
rhs,
],
keywords=[],
)
return ast.Name(id="not", ctx=ast.Load())
class InfixR(Infix):
rightAssoc = True
def nud(self):
self.first = self.parser.expression(2000)
return self
def ast(self, context=None):
return ast.UnaryOp(op=ast.Not(), operand=self.first.ast(context))
class BinOp(Symbol):
def __init__(self, value):
self.lhs = None
self.rhs = None
def __repr__(self):
out = [self.lhs, self.identifier, self.rhs]
out = map(str, filter(None, out))
return "BinOp(" + " ".join(out) + ")"
class Prefix(Symbol):
def led(self):
return self
def nud(self):
self.first = self.parser.expression(2000)
return self
def ast(self, context=None):
return self.first.ast(context)
return ast.UnaryOp(op=ast.Not(), operand=self.first.ast(context))
def __repr__(self):
return "<'%s'>(%s)" % (self.value, repr(self.first))
class LParen(Symbol):
def led(self, left):
self.first = left
self.second = []
while self.parser.token.identifier != ")":
self.second.append(self.parser.expression())
self.parser.advance(")")
return self
def nud(self):
self.first = []
self.second = []
comma = False
if self.parser.token.identifier != ")":
while 1:
if self.parser.token.identifier == ")":
break
self.first.append(self.parser.expression())
if self.parser.token.identifier != ",":
break
comma = True
self.parser.advance(",")
self.parser.advance(")")
if not self.first or comma:
return self # tuple
else:
return self.first[0]
def ast(self, context=None):
if not self.second:
return ast.Tuple(
elts=[item.ast(context) for item in self.first], ctx=ast.Load()
)
context.stack.append(self.first.value)
node = self.second[0].ast(context)
context.stack.pop()
return node
def __repr__(self):
out = [self.first, self.second]
out = map(repr, filter(None, out))
return "LParen(" + " ".join(out) + ")"
class LiteralToken(Symbol):
identifier = "(literal)"
lbp = 0
def nud(self):
return self
def ast(self, context=None):
if self.value.isdigit():
return ast.Num(n=int(self.value))
if self.value in ["true", "false"]:
return ast.NameConstant(self.parse_bool(self.value))
else:
return ast.Str(s=self.value[1:-1])
def parse_bool(self, value) -> bool:
return value == "true"
class LogicalToken(Symbol):
identifier = "(logical)"
lbp = 0
def nud(self):
return self
class NameToken(Symbol):
identifier: str = "(name)"
lbp: int = 0
def nud(self):
return self
def ast(self, context=None):
return ast.Name(id=self.value, ctx=ast.Load())
class Constant(Symbol):
identifier = "(constant)"
lbp = 0
def nud(self):
return self
def ast(self, context=None):
return ast.Name(id="None", ctx=ast.Load())
class FunctionCall(Symbol):
lbp = 0
def nud(self):
self.first = self.parser.expression(2000)
return self
def ast(self, context):
node = self.first.ast()
node.elts.insert(0, ast.Str(s=self.value))
return node
parser = Parser()
parser.define("<>", 60, Infix)
parser.define("in", 60, Infix)
parser.define("not in", 70, Infix)
parser.define("is", 60, Infix)
parser.define("is not", 60, Infix)
parser.define("defined", 60, Constant)
parser.define("within", 60, Infix)
parser.define("contains any", 60, Infix)
parser.define("contains all", 60, Infix)
parser.define("not", 90, InfixR)
parser.define("=", 60, Infix)
parser.define("and", 30, Infix)
parser.define("or", 30, Infix)
parser.define("(", 90, LParen)
parser.define("circle", 60, FunctionCall)
parser.define(")")
parser.define(">", 60, Infix)
parser.define("<", 60, Infix)
parser.define(",")
parser.define("(end)")
class Context:
def __init__(self):
self.stack: typing.List[str] = []
def __repr__(self):
return "Context(stack=%r)" % self.stack
class PredicateFilter:
operators = {
"<": operator.lt,
"=<": operator.le,
">": operator.gt,
">=": operator.ge,
"=": operator.eq,
"!=": operator.ne,
"is not": operator.is_not,
}
def __init__(self, predicate, schema):
self.predicate = predicate
self.schema = schema
ast_node = self.parse(predicate)
self._code = compile(ast_node, "internal.py", mode="eval")
logger.info("Compiled python code:", ast.dump(ast_node))
def match(self, obj):
try:
return eval(self._code, {}, {"obj": obj, "filter_field": self.filter_field})
except TypeError as exc:
return False
def _get_schema_fields(self, schema):
result = {}
for name, field in schema._declared_fields.items():
key = field.data_key or name
result[key] = field
return result
def filter_field(
self,
obj: typing.Dict[typing.Any, typing.Any],
path: typing.List[str],
operator_value: str,
value: typing.Any,
schema=None,
):
if schema is None:
schema = self.schema
schema_field = None
for i, key in enumerate(path):
fields = self._get_schema_fields(schema)
schema_field = self.case_insensitive_get(fields, key, None)
# Query field doesn't exist
if schema_field is None:
raise ValueError("No field %s on schema %s" % (key, schema))
if isinstance(schema_field, marshmallow.fields.Nested):
schema = schema_field.schema
# Get value
if isinstance(obj, dict):
obj = self.case_insensitive_get(obj, key, {})
if isinstance(schema_field, marshmallow.fields.Dict):
path = path[i + 1 :]
break
elif isinstance(obj, list):
return any(
self.filter_field(
child_doc, path[i:], operator_value, value, schema=schema
)
for child_doc in obj
)
if isinstance(schema_field, marshmallow.fields.Dict):
obj = schema_field._deserialize(obj, None, None)
assert len(path) == 1
obj = self.case_insensitive_get(obj, path[0], None)
else:
if obj is not None:
obj = schema_field._deserialize(obj, None, None)
if value is not None:
value = schema_field._deserialize(value, None, None)
# Case insensitve comparison for strings
if isinstance(obj, str):
obj = obj.lower()
if isinstance(value, str):
value = value.lower()
op = self.operators[operator_value]
return op(obj, value)
def case_insensitive_get(sef, dict, key, default=None):
for k, v in dict.items():
if k.lower() == key.lower():
return v
return default
def parse(self, program):
expr = parser.parse(program)
expr = parser.expression()
context = Context()
node = ast.Expression(body=expr.ast(context))
for child in ast.walk(node):
child.lineno = 0
child.col_offset = 0
return node
| StarcoderdataPython |
394223 | <reponame>defnngj/movie-website
"""
author: bugmaster
data: 2021-09-25
function: 爬取 豆瓣 top250 电影信息
"""
import os
import sqlite3
from requests_html import HTMLSession
session = HTMLSession()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
IMG_DIR = os.path.join(BASE_DIR, "static", "images")
def save_db(name, img_url, grade, number):
"""
保存数据库
"""
conn = sqlite3.connect('dev.db')
print(conn)
c = conn.cursor()
c.execute(f"""INSERT INTO movie (name,img,grade,number)
VALUES ('{name}', '{img_url}', {grade}, {number} )""")
conn.commit()
conn.close()
def download_img(img_path, img_name):
"""
保存图片
:param img_path:
:param img_name:
:return:
"""
try:
req_pic = session.get(img_path)
save_pic = os.path.join(IMG_DIR, img_name)
with open(save_pic, "wb") as fp:
fp.write(req_pic.content)
except Exception as msg:
print("下载中出现异常:%s" % str(msg))
def get_top_250():
"""
获取电影:top250
"""
for page in range(0, 250, 25): # 10页
print(f'https://movie.douban.com/top250?start={page}&filter=')
r = session.get(f'https://movie.douban.com/top250?start={page}&filter=')
imgs = r.html.find('a > img')
stars = r.html.find('.rating_num')
evaluates = r.html.find('.star > span:nth-child(4)')
for i in range(25):
name = imgs[i].attrs["alt"]
img_path = imgs[i].attrs["src"]
grade = stars[i].text
number = int(evaluates[i].text[0:-3])
# print(name)
# print(img_path)
# print(grade)
# print(number)
img_name = img_path.split("/")[-1]
download_img(img_path, img_name)
save_db(name, img_name, grade, number)
if __name__ == '__main__':
get_top_250()
| StarcoderdataPython |
11359287 | <reponame>Guilherme-Lanna/Python
from datetime import date
print('Me de algumas informações para eu saber sobre seu alistamento militar!')
atual = date.today().year
ano = int(input('Qual o seu ano de nascimento? '))
idade = atual - ano
if idade < 18:
print('Você deve se alistar daqui {} anos'.format(18-idade))
elif idade == 18:
print('Está na hora de você realizar seu alistamento')
elif idade > 18:
print('Você deveria ter se alistado a {} anos atras'.format(idade-18)) | StarcoderdataPython |
51848 | <filename>app/db/models.py
import datetime
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
db = SQLAlchemy()
class Users(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True, nullable=False)
password = db.Column(db.String, nullable=False)
email = db.Column(db.String, unique=True, nullable=False)
is_admin = db.Column(db.Boolean, nullable=False, default=False)
verify_code = db.Column(db.String, unique=True, nullable=True) # for futurn use
api_key = db.Column(db.String, unique=True)
urls = db.relationship("urls")
register_time = db.Column(
db.DateTime, default=datetime.datetime.now, nullable=False
)
def __init__(self, username, password, email, is_admin=False) -> None:
self.username = username
self.password = <PASSWORD>password_hash(password)
self.email = email
self.is_admin = is_admin
def check_password(self, password):
return check_password_hash(self.password, password)
class urls(db.Model):
__tablename__ = "urls"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
old = db.Column(db.String, nullable=False)
new = db.Column(db.String, nullable=False, unique=True)
use = db.Column(db.Integer, nullable=False, default=0)
create_by = db.Column(db.String, nullable=False, default="web") # web or api
created_time = db.Column(db.DateTime, default=datetime.datetime.now, nullable=False)
def __init__(self, user_id, old, new) -> None:
self.user_id = user_id
self.old = old
self.new = new
| StarcoderdataPython |
9610633 | #!d:/python27/python -u
"""
Qt-for-Python (PySide2) GUI drawing framework
"""
import sys
import math
from PySide2 import QtGui, QtCore, QtWidgets
__author__ = '<NAME>'
__copyright__ = '2018'
__credits__ = []
__license__ = "MIT"
__version__ = "0.3"
__email__ = '<EMAIL>'
__status__ = 'Prototype'
class GraphicsItem(QtWidgets.QGraphicsItem):
"""
From the QT docs:
To write your own graphics item, you first create a subclass
of QGraphicsItem, and then start by implementing its two pure
virtual public functions: boundingRect(), which returns an estimate
of the area painted by the item, and paint(),
which implements the actual painting.
"""
# call constructor of GraphicsItem
def __init__(self, rect, pen, brush, tooltip='No tip here', parent=None):
# call constructor of QGraphicsItem
super(GraphicsItem, self).__init__()
self.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, False)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsFocusable, True)
self.setAcceptHoverEvents(True)
self.pen = pen
pw = self.pen.widthF()
self.brush = QtGui.QBrush(QtCore.Qt.blue)
self.brush = brush
self.setToolTip(tooltip)
self.parent = parent
self.rect = QtCore.QRectF(rect[0], rect[1], rect[2], rect[3])
self.focusrect = QtCore.QRectF(rect[0]-pw/2, rect[1]-pw/2,
rect[2]+pw, rect[3]+pw)
def mousePressEvent(self, event):
self.setCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
# set item as topmost in stack
self.setZValue(self.parent.items()[0].zValue() + 1)
self.setSelected(True)
# propagate event
QtWidgets.QGraphicsItem.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
self.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
# propagate event
QtWidgets.QGraphicsItem.mouseReleaseEvent(self, event)
def mouseMoveEvent(self, event):
# propagate event
QtWidgets.QGraphicsItem.mouseMoveEvent(self, event)
def boundingRect(self):
# bounding box rect shall be set to the bounds of the item. Due to the
# line thickness this rect is bigger than the rect of the ellipse or rect, etc.
return self.focusrect
def paint(self, painter, option, widget):
painter.setBrush(self.brush)
painter.setPen(self.pen)
painter.drawEllipse(self.rect)
if self.isSelected():
self.drawFocusRect(painter)
self.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, True)
def drawFocusRect(self, painter):
self.focusbrush = QtGui.QBrush()
self.focuspen = QtGui.QPen(QtCore.Qt.DotLine)
self.focuspen.setColor(QtCore.Qt.black)
self.focuspen.setWidthF(1.5)
painter.setBrush(self.focusbrush)
painter.setPen(self.focuspen)
painter.drawRect(self.focusrect)
def hoverEnterEvent(self, event):
self.setCursor(QtGui.QCursor(QtCore.Qt.OpenHandCursor))
self.pen.setStyle(QtCore.Qt.DotLine)
# propagate event
QtWidgets.QGraphicsItem.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
self.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pen.setStyle(QtCore.Qt.SolidLine)
# propagate event
QtWidgets.QGraphicsItem.hoverLeaveEvent(self, event)
class GraphicsScene (QtWidgets.QGraphicsScene):
# call constructor of GraphicsScene
def __init__ (self, parent=None):
# call constructor of QGraphicsScene
super(GraphicsScene, self).__init__(parent)
self.parent = parent
self.setSceneRect(-200, -200, 400, 400)
def mousePressEvent(self, event):
self.clearSelection()
# propagate event
super(GraphicsScene, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
# propagate event
super(GraphicsScene, self).mouseReleaseEvent(event)
def mouseMoveEvent(self, event):
super(GraphicsScene, self).mouseMoveEvent(event)
def addGraphicsItem(self, rect, pw, pc, bc, tooltip):
pen = QtGui.QPen(QtCore.Qt.SolidLine)
pen.setColor(QtGui.QColor(pc[0], pc[1], pc[2], 255))
pen.setWidth(pw)
brush = QtGui.QBrush(QtGui.QColor(bc[0], bc[1], bc[2], 255))
self.item = GraphicsItem(rect, pen, brush, tooltip, self)
self.parent.scene.addItem(self.item)
class GraphicsView (QtWidgets.QGraphicsView):
# call constructor of GraphicsView
def __init__(self, parent=None):
# call constructor of QGraphicsView
super(GraphicsView, self).__init__(parent)
# set QGraphicsView attributes
self.setRenderHints(QtGui.QPainter.Antialiasing |
QtGui.QPainter.HighQualityAntialiasing)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.FullViewportUpdate)
self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
# save home position
self.home = self.matrix()
# set background style
self.viewstyle = \
'background-color:QLinearGradient( \
x1: 0.0, y1: 0.0, x2: 0.0, y2: 1.0, \
stop: 0.3 white, \
stop: 1.0 blue); \
'
self.setStyleSheet(self.viewstyle)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Escape:
sys.exit(QtGui.qApp.quit())
elif key == QtCore.Qt.Key_Plus:
self.scaleView(1.2)
elif key == QtCore.Qt.Key_Minus:
self.scaleView(1 / 1.2)
elif key == QtCore.Qt.Key_Home:
self.setMatrix(self.home)
else:
# propagate event
super(GraphicsView, self).keyPressEvent(event)
def wheelEvent(self, event):
self.scaleView(math.pow(2.0, -event.delta() / 500.0))
# propagate event
super(GraphicsView, self).wheelEvent(event)
def scaleView(self, factor):
f = self.matrix().scale(factor, factor). \
mapRect(QtCore.QRectF(0, 0, 1, 1)).width()
if f < 0.05 or f > 50:
return
self.scale(factor, factor)
class CentralWidget(QtWidgets.QWidget):
# call constructor of CentralWidget
def __init__(self, parent=None):
# call constructor of QWidget
super(CentralWidget, self).__init__(parent)
self.parent = parent
# create toolbox widget for left side of splitter
self.toolBox = QtWidgets.QToolBox()
self.item1 = self.toolBox.addItem(QtWidgets.QWidget(), "Item 1")
self.item2 = self.toolBox.addItem(QtWidgets.QWidget(), "Item 2")
self.item3 = self.toolBox.addItem(QtWidgets.QWidget(), "Item 3")
self.item4 = self.toolBox.addItem(QtWidgets.QWidget(), "Item 4")
self.toolBox.setItemToolTip(0, 'Mal sehn ... aus Item 1')
icon = QtGui.QIcon('icons/document-open.png')
self.toolBox.setItemIcon(2, icon)
self.toolBox.setCurrentIndex(3)
# split main window horizontally into two panes
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.toolBox)
self.splitter.addWidget(self.parent.view)
self.splitter.setStretchFactor(0, 1) # 0 ... left pane, 1 ... fraction of split
self.splitter.setStretchFactor(1, 4) # 1 ... right pane, 4 ... fraction of split
# put splitter in a layout box
hbox = QtWidgets.QHBoxLayout(self)
hbox.addWidget(self.splitter)
self.setLayout(hbox)
QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Cleanlooks'))
class MainWindow(QtWidgets.QMainWindow):
# call constructor of MainWindow
def __init__(self, parent=None):
# call constructor of QMainWindow
super(MainWindow, self).__init__(parent)
self.view = GraphicsView(self)
self.scene = GraphicsScene(self)
# set the scene
self.view.setScene(self.scene)
# add items to the scene
self.scene.addGraphicsItem((0, 0, 250, 250), 8.0, (255, 0, 0), (0, 0, 255), 'My first item')
self.scene.addGraphicsItem((-250, -250, 300, 200), 4.0, (0, 0, 0), (255, 0, 100), 'My 2nd item')
self.scene.addGraphicsItem((200, -200, 200, 200), 10.0, (0, 0, 255), (0, 255, 100), 'My 3rd item')
# set central widget for the application
self.setCentralWidget(CentralWidget(self))
# setup user interface and menus
self.initUI()
def initUI(self):
# window size, position and title
self.setGeometry(600, 100, 1200, 900)
self.setWindowTitle('Qt for Python (PySide2) Graphics View Framework')
self.show()
# create a status bar
self.statusbar = self.statusBar()
self.statusbar.setCursor(QtGui.QCursor(QtCore.Qt.ForbiddenCursor))
self.statusbar.showMessage('Ready.')
self.statusbarstyle = 'background-color:rgb(200,200,200); color:black'
self.statusbar.setStyleSheet(self.statusbarstyle)
# define extension to be filtered in file dialogs
self.filefilter = \
'Airfoil mesh files (*.txt *.msh);;Airfoil contour files (*.dat)'
# create a menu bar
menubar = self.menuBar()
# populate menus
fileMenu = menubar.addMenu('&File')
icon = QtGui.QIcon('icons/document-open.png')
actionOpen = QtWidgets.QAction(icon, '&Open', self, shortcut='CTRL+o',
statusTip='Open file ...', triggered=self.onOpen)
f_open = fileMenu.addAction(actionOpen)
icon = QtGui.QIcon('icons/document-save.png')
actionSave = QtWidgets.QAction(icon, '&Save', self, shortcut='CTRL+s',
statusTip='Save file ...', triggered=self.onSave)
f_save = fileMenu.addAction(actionSave)
icon = QtGui.QIcon('icons/system-log-out.png')
actionExit = QtWidgets.QAction(icon, '&Exit', self, shortcut='CTRL+x',
statusTip='Exit application', triggered=self.onExit)
exit = fileMenu.addAction(actionExit)
toolMenu = menubar.addMenu('&Tools')
prevMenu = toolMenu.addMenu('Preferences')
calcMenu = toolMenu.addMenu('Calculator')
helpMenu = menubar.addMenu('&Help')
icon = QtGui.QIcon('icons/info.png')
self.aboutQtAct = QtWidgets.QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=QtGui.qApp.aboutQt)
qtabout = helpMenu.addAction(self.aboutQtAct)
actionAbout = QtWidgets.QAction(icon, '&About', self, shortcut='',
statusTip='Information about the software and its licensing.',
triggered=self.onAbout)
about = helpMenu.addAction(actionAbout)
def onOpen(self):
(fname, thefilter) = QtWidgets.QFileDialog.getOpenFileName(self,
'Open file', '.', filter=self.filefilter)
if not fname: return
with open(fname, 'r') as f:
self.data = f.read()
def onSave(self):
(fname, thefilter) = QtWidgets.QFileDialog.getSaveFileName(self,
'Save file', '.', filter=self.filefilter)
if not fname: return
with open(fname, 'w') as f:
f.write('This test worked for me ...')
def onExit(self):
sys.exit(QtGui.qApp.quit())
def onAbout(self):
QtWidgets.QMessageBox.about(self, "About Graphics View Framework",
"Qt-for-Python (PySide2) Graphics View Framework is used as "
"a starting point for GUIs with a scene containing items"
"and a specific, modifiable view on them (zoom, pan, etc.).<br><br>"
"License: " + __license__ + " <br><br>"
"Copyright (C) 2018 <NAME>.")
def main():
app = QtWidgets.QApplication(sys.argv)
# app.setWindowIcon(QtGui.QIcon('icons/some_icon.png'))
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| StarcoderdataPython |
247385 | # Copyright 2015-2021 <NAME>
#
# This file is part of phonemizer: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Phonemizer is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phonemizer. If not, see <http://www.gnu.org/licenses/>.
"""Parse a Scheme expression as a nested list
The main function of this module is lispy.parse, other ones should be
considered private. This module is a dependency of the festival
backend.
From http://www.norvig.com/lispy.html
"""
def parse(program):
"""Read a Scheme expression from a string
Return a nested list
Raises an IndexError if the expression is not valid scheme
(unbalanced parenthesis).
>>> parse('(+ 2 (* 5 2))')
['+', '2', ['*', '5', '2']]
"""
return _read_from_tokens(_tokenize(program))
def _tokenize(chars):
"Convert a string of characters into a list of tokens."
return chars.replace('(', ' ( ').replace(')', ' ) ').split()
def _read_from_tokens(tokens):
"Read an expression from a sequence of tokens"
if len(tokens) == 0: # pragma: nocover
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(_read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
if token == ')': # pragma: nocover
raise SyntaxError('unexpected )')
return token
| StarcoderdataPython |
9614982 | """Information about Python operators"""
from typing_extensions import Final
# Map from binary operator id to related method name (in Python 3).
op_methods: Final = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__truediv__',
'%': '__mod__',
'divmod': '__divmod__',
'//': '__floordiv__',
'**': '__pow__',
'@': '__matmul__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<<': '__lshift__',
'>>': '__rshift__',
'==': '__eq__',
'!=': '__ne__',
'<': '__lt__',
'>=': '__ge__',
'>': '__gt__',
'<=': '__le__',
'in': '__contains__',
}
op_methods_to_symbols: Final = {v: k for (k, v) in op_methods.items()}
op_methods_to_symbols['__div__'] = '/'
comparison_fallback_method: Final = "__cmp__"
ops_falling_back_to_cmp: Final = {"__ne__", "__eq__", "__lt__", "__le__", "__gt__", "__ge__"}
ops_with_inplace_method: Final = {
"+",
"-",
"*",
"/",
"%",
"//",
"**",
"@",
"&",
"|",
"^",
"<<",
">>",
}
inplace_operator_methods: Final = set("__i" + op_methods[op][2:] for op in ops_with_inplace_method)
reverse_op_methods: Final = {
'__add__': '__radd__',
'__sub__': '__rsub__',
'__mul__': '__rmul__',
'__truediv__': '__rtruediv__',
'__mod__': '__rmod__',
'__divmod__': '__rdivmod__',
'__floordiv__': '__rfloordiv__',
'__pow__': '__rpow__',
'__matmul__': '__rmatmul__',
'__and__': '__rand__',
'__or__': '__ror__',
'__xor__': '__rxor__',
'__lshift__': '__rlshift__',
'__rshift__': '__rrshift__',
'__eq__': '__eq__',
'__ne__': '__ne__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__gt__': '__lt__',
'__le__': '__ge__',
}
reverse_op_method_names: Final = set(reverse_op_methods.values())
# Suppose we have some class A. When we do A() + A(), Python will only check
# the output of A().__add__(A()) and skip calling the __radd__ method entirely.
# This shortcut is used only for the following methods:
op_methods_that_shortcut: Final = {
'__add__',
'__sub__',
'__mul__',
'__div__',
'__truediv__',
'__mod__',
'__divmod__',
'__floordiv__',
'__pow__',
'__matmul__',
'__and__',
'__or__',
'__xor__',
'__lshift__',
'__rshift__',
}
normal_from_reverse_op: Final = dict((m, n) for n, m in reverse_op_methods.items())
reverse_op_method_set: Final = set(reverse_op_methods.values())
unary_op_methods: Final = {
'-': '__neg__',
'+': '__pos__',
'~': '__invert__',
}
| StarcoderdataPython |
3373663 | <reponame>mononobi/charma-server<filename>src/charma/scraper/services.py
# -*- coding: utf-8 -*-
"""
scraper services module.
"""
from pyrin.application.services import get_component
from charma.scraper import ScraperPackage
def get(url, **options):
"""
gets the result of given url and returns a `Response` object.
:param str url: url to be fetched.
:keyword bool add_user_agent: add user agent into request headers.
defaults to True if not provided.
:keyword bool allow_redirects: allow redirects.
defaults to True if not provided.
:keyword dict headers: headers to be sent with request.
:rtype: requests.Response
"""
return get_component(ScraperPackage.COMPONENT_NAME).get(url, **options)
def get_soup(url, **options):
"""
gets the result of given url and returns a `BeautifulSoup` object.
:param str url: url to be fetched.
:keyword bool add_user_agent: add user agent into request headers.
defaults to True if not provided.
:keyword bool allow_redirects: allow redirects.
defaults to True if not provided.
:keyword dict headers: headers to be sent with request.
:rtype: bs4.BeautifulSoup
"""
return get_component(ScraperPackage.COMPONENT_NAME).get_soup(url, **options)
| StarcoderdataPython |
4829973 | import alerts
def handler(event, context):
alert_type = event['currentIntent']['slots']['AlertLevel']
print(alert_type)
alert_response = {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": (
"Thanks, I have started the configured "
"alerting procedures for a {0} Alert."
).format(alert_type)
}
}
}
alerts.send_alerts(alert_type)
return alert_response
| StarcoderdataPython |
8046184 | from core.advbase import *
def module():
return Xander
class Xander(Adv):
conf = {}
conf['slots.a'] = ['The_Shining_Overlord', 'His_Clever_Brother']
conf['slots.d'] = 'Gaibhne_and_Creidhne'
conf['acl'] = """
if c_s(1, enhanced)
`s3
`s4
`s2
`s1
else
`dragon(c3-s-end), fsc
`s3, fsc or s
`s2, fsc or s
`s1, fsc or s
`fs, x=2
end
"""
conf['coabs'] = ['Blade', 'Yurius', 'Hunter_Sarisse']
conf['share'] = ['Gala_Elisanne', 'Hunter_Sarisse']
# def fs_proc(self, e):
# if self.born_ruler_2.get():
# with KillerModifier('fs_killer', 'hit', 0.30, ['frostbite']):
# self.dmg_make('fs', 6.66)
# else:
# self.dmg_make('fs', 3.26)
# self.conf['s1'].dmg = 8.32
# self.born_ruler.off()
# self.born_ruler_1.off()
# self.born_ruler_2.off()
# def prerun(self):
# self.fs_alt = FSAltBuff(group='a', hidden=True)
# self.born_ruler = Selfbuff('born_ruler', 0.05, -1, 'att', 'buff')
# self.born_ruler_1 = Selfbuff('born_ruler_1', 1, -1, 'xunder', 'buff')
# self.born_ruler_2 = Selfbuff('born_ruler_2', 1, -1, 'xunder', 'buff')
# def s1_proc(self, e):
# boost = 0.05*self.buffcount
# self.afflics.frostbite(e.name,120,0.41*(1+boost))
# try:
# if self.born_ruler_2.get():
# # phase 3
# self.dmg_make(f'o_{e.name}_boost',self.conf[e.name].dmg*boost)
# self.conf[e.name].dmg = 8.32
# self.fs_alt.off()
# self.born_ruler.off()
# self.born_ruler_1.off()
# self.born_ruler_2.off()
# elif self.born_ruler_1.get():
# # phase 2
# self.dmg_make(f'o_{e.name}_boost',self.conf[e.name].dmg*boost)
# self.conf[e.name].dmg = 8.40
# self.born_ruler_2.on()
# else:
# self.fs_alt.on()
# self.born_ruler.on()
# self.born_ruler_1.on()
# # phase 1
# self.dmg_make(f'o_{e.name}_boost',self.conf[e.name].dmg*boost)
# self.conf[e.name].dmg = 8.37
# except:
# self.dmg_make(f'o_{e.name}_boost',self.conf[e.name].dmg*boost)
# log('debug', 'xander_s1_boost', f'x{self.buffcount} = {self.conf[e.name].dmg*(1+boost):.2%}')
# def s2_proc(self, e):
# boost = 0.05*self.buffcount
# self.dmg_make(f'o_{e.name}_boost',self.conf[e.name].dmg*boost)
# log('debug', 'xander_s2_boost', f'x{self.buffcount} = {self.conf[e.name].dmg*(1+boost):.2%}')
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| StarcoderdataPython |
6503129 | class Solution:
def binaryGap(self, N: int) -> int:
bin_str = str(bin(N))[2:]
output = list()
tmp_index = 0
for index in range(0, len(bin_str)):
if bin_str[index] == "1":
output.append(index - tmp_index)
tmp_index = index
return max(output) | StarcoderdataPython |
3398952 | <filename>ope-backend/src/core/use_cases/order_use_cases/create_order_use_case.py<gh_stars>1-10
from src.core.validations import create_order_validation as validate
from datetime import datetime
class CreateOrder:
def __init__(self, order_repository):
self.order_repository = order_repository
def create_order(self,
consumed_in: int,
table: int,
payment_method: int,
obs: str):
done = False
initial_date = datetime.now()
end_date = None
confirmed = False
invalid_inputs = validate(done=done,
initial_date=initial_date,
end_date=end_date,
consumed_in=consumed_in,
table=table,
payment_method=payment_method,
obs=obs,
confirmed=confirmed)
input_is_valid = len(invalid_inputs) == 0
if input_is_valid:
response = self.order_repository.create_order(done=done, initial_date=initial_date,
end_date=end_date, consumed_in=consumed_in,
table=table, payment_method=payment_method,
obs=obs, confirmed=confirmed)
return response
return {"data": None, "status": 400, "errors": invalid_inputs}
| StarcoderdataPython |
1875390 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequences_lib."""
import copy
# internal imports
import tensorflow as tf
from magenta.common import testing_lib as common_testing_lib
from magenta.music import sequences_lib
from magenta.music import testing_lib
from magenta.protobuf import music_pb2
class SequencesLibTest(tf.test.TestCase):
def setUp(self):
self.steps_per_quarter = 4
self.note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
tempos: {
qpm: 60}""")
self.expected_quantized_sequence = sequences_lib.QuantizedSequence()
self.expected_quantized_sequence.qpm = 60.0
self.expected_quantized_sequence.steps_per_quarter = self.steps_per_quarter
def testEq(self):
left_hand = sequences_lib.QuantizedSequence()
left_hand.qpm = 123.0
left_hand.steps_per_quarter = 7
left_hand.time_signature = sequences_lib.TimeSignature(7, 8)
testing_lib.add_quantized_track(
left_hand, 0,
[(12, 100, 0, 40), (11, 100, 1, 2)])
testing_lib.add_quantized_track(
left_hand, 2,
[(55, 100, 4, 6), (14, 120, 4, 10)])
testing_lib.add_quantized_track(
left_hand, 3,
[(1, 10, 0, 6), (2, 50, 20, 21), (0, 101, 17, 21)])
testing_lib.add_quantized_chords(
left_hand, [('Cmaj7', 1), ('G9', 2)])
right_hand = sequences_lib.QuantizedSequence()
right_hand.qpm = 123.0
right_hand.steps_per_quarter = 7
right_hand.time_signature = sequences_lib.TimeSignature(7, 8)
testing_lib.add_quantized_track(
right_hand, 0,
[(11, 100, 1, 2), (12, 100, 0, 40)])
testing_lib.add_quantized_track(
right_hand, 2,
[(14, 120, 4, 10), (55, 100, 4, 6)])
testing_lib.add_quantized_track(
right_hand, 3,
[(0, 101, 17, 21), (2, 50, 20, 21), (1, 10, 0, 6)])
testing_lib.add_quantized_chords(
right_hand, [('G9', 2), ('Cmaj7', 1)])
self.assertEqual(left_hand, right_hand)
def testNotEq(self):
left_hand = sequences_lib.QuantizedSequence()
left_hand.bpm = 123.0
left_hand.steps_per_beat = 7
left_hand.time_signature = sequences_lib.TimeSignature(7, 8)
testing_lib.add_quantized_track(
left_hand, 0,
[(12, 100, 0, 40), (11, 100, 1, 2)])
testing_lib.add_quantized_track(
left_hand, 2,
[(55, 100, 4, 6), (15, 120, 4, 10)])
testing_lib.add_quantized_track(
left_hand, 3,
[(1, 10, 0, 6), (2, 50, 20, 21), (0, 101, 17, 21)])
testing_lib.add_quantized_chords(
left_hand, [('Cmaj7', 1), ('G9', 2)])
right_hand = sequences_lib.QuantizedSequence()
right_hand.bpm = 123.0
right_hand.steps_per_beat = 7
right_hand.time_signature = sequences_lib.TimeSignature(7, 8)
testing_lib.add_quantized_track(
right_hand, 0,
[(11, 100, 1, 2), (12, 100, 0, 40)])
testing_lib.add_quantized_track(
right_hand, 2,
[(14, 120, 4, 10), (55, 100, 4, 6)])
testing_lib.add_quantized_track(
right_hand, 3,
[(0, 101, 17, 21), (2, 50, 20, 21), (1, 10, 0, 6)])
testing_lib.add_quantized_chords(
right_hand, [('G9', 2), ('C7', 1)])
self.assertNotEqual(left_hand, right_hand)
def testFromNoteSequence(self):
testing_lib.add_track(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords(
self.note_sequence,
[('B7', 0.22), ('Em9', 4.0)])
testing_lib.add_quantized_track(
self.expected_quantized_sequence, 0,
[(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
(55, 120, 16, 17), (52, 99, 19, 20)])
testing_lib.add_quantized_chords(
self.expected_quantized_sequence,
[('B7', 1), ('Em9', 16)])
quantized = sequences_lib.QuantizedSequence()
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
self.assertEqual(self.expected_quantized_sequence, quantized)
def testFromNoteSequence_TimeSignatureChange(self):
testing_lib.add_track(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.time_signatures[:]
quantized = sequences_lib.QuantizedSequence()
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
# Single time signature.
self.note_sequence.time_signatures.add(numerator=4, denominator=4)
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
# Multiple time signatures with no change.
self.note_sequence.time_signatures.add(numerator=4, denominator=4)
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
# Time signature change.
self.note_sequence.time_signatures.add(numerator=2, denominator=4)
with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
def testRounding(self):
testing_lib.add_track(
self.note_sequence, 1,
[(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),
(41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])
testing_lib.add_quantized_track(
self.expected_quantized_sequence, 1,
[(12, 100, 0, 1), (11, 100, 1, 2), (40, 100, 2, 3),
(41, 100, 3, 5), (44, 100, 5, 7), (55, 100, 16, 17)])
quantized = sequences_lib.QuantizedSequence()
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
self.assertEqual(self.expected_quantized_sequence, quantized)
def testMultiTrack(self):
testing_lib.add_track(
self.note_sequence, 0,
[(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])
testing_lib.add_track(
self.note_sequence, 3,
[(12, 100, 1.0, 4.0), (19, 100, 2.0, 5.0)])
testing_lib.add_track(
self.note_sequence, 7,
[(12, 100, 1.0, 5.0), (19, 100, 2.0, 4.0), (24, 100, 3.0, 3.5)])
testing_lib.add_quantized_track(
self.expected_quantized_sequence, 0,
[(12, 100, 4, 16), (19, 100, 4, 12)])
testing_lib.add_quantized_track(
self.expected_quantized_sequence, 3,
[(12, 100, 4, 16), (19, 100, 8, 20)])
testing_lib.add_quantized_track(
self.expected_quantized_sequence, 7,
[(12, 100, 4, 20), (19, 100, 8, 16), (24, 100, 12, 14)])
quantized = sequences_lib.QuantizedSequence()
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
self.assertEqual(self.expected_quantized_sequence, quantized)
def testStepsPerBar(self):
quantized = sequences_lib.QuantizedSequence()
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
self.assertEqual(16, quantized.steps_per_bar())
self.note_sequence.time_signatures[0].numerator = 6
self.note_sequence.time_signatures[0].denominator = 8
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
self.assertEqual(12.0, quantized.steps_per_bar())
def testDeepcopy(self):
quantized = sequences_lib.QuantizedSequence()
testing_lib.add_track(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
quantized_copy = copy.deepcopy(quantized)
self.assertEqual(quantized, quantized_copy)
testing_lib.add_quantized_track(
quantized, 1,
[(12, 100, 4, 20), (19, 100, 8, 16), (24, 100, 12, 14)])
self.assertNotEqual(quantized, quantized_copy)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
5089468 | #
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ifx_db
import config
from testfunctions import IfxDbTestFunctions
class IfxDbTestCase(unittest.TestCase):
def test_015_InsertDeleteRowCount_01(self):
obj = IfxDbTestFunctions()
obj.assert_expect(self.run_test_015)
def run_test_015(self):
conn = ifx_db.connect(config.ConnStr, config.user, config.password)
if conn:
result = ifx_db.exec_immediate(conn,"insert into t_string values(123,1.222333,'one to one')")
if result:
cols = ifx_db.num_fields(result)
# NOTE: Removed '\n' from the following and a few more prints here (refer to ruby test_015.rb)
print "col:", cols
rows = ifx_db.num_rows(result)
print "affected row:", rows
else:
print ifx_db.stmt_errormsg()
result = ifx_db.exec_immediate(conn,"delete from t_string where a=123")
if result:
cols = ifx_db.num_fields(result)
print "col:", cols
rows = ifx_db.num_rows(result)
print "affected row:", rows
else:
print ifx_db.stmt_errormsg()
ifx_db.close(conn)
else:
print "no connection:", ifx_db.conn_errormsg()
#__END__
#__LUW_EXPECTED__
#col: 0
#affected row: 1
#col: 0
#affected row: 1
#__ZOS_EXPECTED__
#col: 0
#affected row: 1
#col: 0
#affected row: 1
#__SYSTEMI_EXPECTED__
#col: 0
#affected row: 1
#col: 0
#affected row: 1
#__IDS_EXPECTED__
#col: 0
#affected row: 1
#col: 0
#affected row: 1
| StarcoderdataPython |
5039658 | <gh_stars>0
#Faça um programa que leia nome e peso de várias pessoas, guardando tudo em uma lista. No final, mostre:
#A) Quantas pessoas foram cadastradas. B)Uma listagem com as pessoas mais pesadas.
#C) Uma listagem com as pessoas mais leves.
dados = list()
grupo = list()
pesada = list()
leve = list()
cont = maior = menor = 0
while True:
dados.append(str(input('Nome: ')))
dados.append(int(input('Peso: ')))
grupo.append(dados[:])
if cont == 0:
maior = dados[1]
menor = dados[1]
else:
if dados[1] >= maior:
maior = dados[1]
if dados[1] <= menor:
menor = dados[1]
dados.clear()
cont += 1
resp = str(input('Você quer adicionar mais ? [S/N]')).upper().strip()[0]
if resp == 'N':
break
for n, p in grupo:
if menor == p:
leve.append(n)
if maior == p:
pesada.append(n)
print('-='*30)
print(f'{cont} pessoas foram cadastradas.')
print(f'O maior peso foi de {maior:.1f}Kg. Peso de {pesada} ')
print(f'O menor peso foi de {menor:.1f}Kg. Peso de {leve} ')
| StarcoderdataPython |
120636 | <reponame>TakoiHirokazu/kaggle_commonLit_readability_prize
# ========================================
# library
# ========================================
from scipy.optimize import minimize
import os
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import logging
import sys
from contextlib import contextmanager
import time
# ==================
# Constant
# ==================
ex = "_ensemble"
TRAIN_PATH = "../data/train.csv"
FOLD_PATH = "../data/fe001_train_folds.csv"
if not os.path.exists(f"../output/ex/ex{ex}"):
os.makedirs(f"../output/ex/ex{ex}")
LOGGER_PATH = f"../output/ex/ex{ex}/ex{ex}.txt"
# ===============
# Functions
# ===============
def setup_logger(out_file=None, stderr=True, stderr_level=logging.INFO, file_level=logging.DEBUG):
LOGGER.handlers = []
LOGGER.setLevel(min(stderr_level, file_level))
if stderr:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(FORMATTER)
handler.setLevel(stderr_level)
LOGGER.addHandler(handler)
if out_file is not None:
handler = logging.FileHandler(out_file)
handler.setFormatter(FORMATTER)
handler.setLevel(file_level)
LOGGER.addHandler(handler)
LOGGER.info("logger set up")
return LOGGER
@contextmanager
def timer(name):
t0 = time.time()
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s')
LOGGER = logging.getLogger()
FORMATTER = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
setup_logger(out_file=LOGGER_PATH)
# ================================
# Main
# ================================
train = pd.read_csv(TRAIN_PATH)
y = train["target"]
fold_df = pd.read_csv(FOLD_PATH)
fold_array = fold_df["kfold"].values
# ================================
# exp
# ================================
ex15_svr = np.load("../output/ex/ex015/ex015_svr.npy")
ex15_ridge = np.load("../output/ex/ex015/ex015_ridge.npy")
ex15 = (ex15_svr + ex15_ridge) / 2
ex72 = np.load("../output/ex/ex072/ex072_oof.npy")
ex107 = np.load("../output/ex/ex107/ex107_oof.npy")
ex182 = np.load("../output/ex/ex182/ex182_oof.npy")
ex190 = np.load("../output/ex/ex190/ex190_oof.npy")
ex194 = np.load("../output/ex/ex194/ex194_oof.npy")
ex216 = np.load("../output/ex/ex216/ex216_oof.npy")
ex237 = np.load("../output/ex/ex237/ex237_oof.npy")
ex272 = np.load("../output/ex/ex272/ex272_oof.npy")
ex292 = np.load("../output/ex/ex292/ex292_oof.npy")
ex384 = np.load("../output/ex/ex384/ex384_oof.npy")
ex407 = np.load("../output/ex/ex407/ex407_oof.npy")
ex429 = np.load("../output/ex/ex429/ex429_oof.npy")
ex434 = np.load("../output/ex/ex434/ex434_oof.npy")
ex448 = np.load("../output/ex/ex448/ex448_oof.npy")
ex450 = np.load("../output/ex/ex450/ex450_oof.npy")
ex450[fold_array == 2] = ex448[fold_array == 2]
ex465 = np.load("../output/ex/ex465/ex465_oof.npy")
ex497 = np.load("../output/ex/ex497/ex497_oof.npy")
ex507 = np.load("../output/ex/ex507/ex507_oof.npy")
def f(x):
pred1 = (ex15 + ex237)/2 * x[0] + (ex72 * 0.8 + ex384 * 0.2) * x[1] + ex107 * x[2] + (ex190 + ex272) / 2 * x[3] + ex182 * x[4] + \
ex194 * x[5] + ex292 * x[6] + ex216 * x[7] + ex407 * x[8] + ex429 * x[9] + ex450 * x[10] + ex465 * x[11] + ex434 * x[12] + \
ex497 * x[13] + ex507 * x[14]
score = np.sqrt(mean_squared_error(y, pred1))
return score
with timer("ensemble"):
weight_init = [1 / 15 for _ in range(15)]
result = minimize(f, weight_init, method="Nelder-Mead")
LOGGER.info(f'ensemble_weight:{result.x}')
| StarcoderdataPython |
1880226 | <reponame>cekicbaris/inm703
from model import *
# -------------------
# Extinction Design
# -------------------
# Define the model
exitinction = Model_Rescorla_Wagner(experiment_name="Extinction", lambda_US=1, beta_US=0.5)
# Define the predictors
A = Predictor(name='A', alpha = 0.2)
# Define the experiment groups
exitinction_group = Group(name="Experiment Group")
exitinction_group.add_phase_for_group(phase_name='Conditioning', predictors=[A], outcome=True, number_of_trial=10)
exitinction_group.add_phase_for_group(phase_name='Extinction', predictors=[A], outcome=False, number_of_trial=10)
exitinction.add_group(exitinction_group)
# Run the model
exitinction.model_run()
exitinction.display_results(save_to_file=True) | StarcoderdataPython |
5035874 | ############
# BitTorrent server launch
############
import logging,sys,pdb,time,traceback,os
from thread import get_ident
from operator import mod
#need this prior to BT imports
import gettext
gettext.install('bittorrent', 'locale')
from BitTorrent.launchmanycore import LaunchMany
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent import configfile
from BitTorrent import BTFailure
from BitTorrent.bencode import bdecode
from twisted.spread import pb
from twisted.internet import reactor
from twisted.internet import threads
class DataLock:
def __init__(self):
if sys.platform == 'win32':
from qt import QMutex
self.mutex = QMutex(True)
elif sys.platform == 'darwin':
from Foundation import NSRecursiveLock
self.mutex = NSRecursiveLock.alloc().init()
def lock(self): self.mutex.lock()
def unlock(self): self.mutex.unlock()
def downloadDir():
ddir=None
if sys.platform=='win32':
ddir = os.environ.get('HOMEPATH')
if ddir and len(ddir)>0:
ddir = '%s%sMy Documents%sTorrentServer' % (ddir,os.sep,os.sep)
if not os.path.isdir(ddir): os.makedirs(ddir)
else: ddir='.'
else:
ddir = os.environ.get('HOME')
try:
ddir += '/Documents/TorrentServer'
if not os.path.isdir(ddir): os.makedirs(ddir)
except: ddir = os.environ.get('HOME')
return ddir
class TorrentServer(pb.Root):
torrentPort = 11989
log = None
tServer = None
isProcess = True
def __init__(self, td):
TorrentServer.tServer = self
self.launcher = None
self.torrentDir = td
self.torrentLock = DataLock()
self.status = ''
self.torrents = {}
self.stoppedTorrents = {}
self.moduloCount = 0
def listen(self):
from twisted.internet import reactor
TorrentServer.log.debug('init(%s): listening '%get_ident())
reactor.listenTCP( TorrentServer.torrentPort, pb.PBServerFactory(self))
reactor.run()
def remote_UpdateExecutionStatus(self, execStatus):
self.torrentLock.lock()
try:
TorrentServer.log.debug('remote_UpdateExecutionStatus(%s): %s' %\
(get_ident(),execStatus))
try:
td = self.torrentDir
ttdd = execStatus.get('torrent_dir',td)
self.status = execStatus.get('status','')
self.torrents = execStatus.get('torrents',{})
if td and ttdd!=td:
self.torrentDir = ttdd
self.status = 'restart'
elif not td: self.torrentDir = ttdd
except:
traceback.print_exc()
finally: self.torrentLock.unlock()
def remote_ReportProgressStatus(self):
#TorrentServer.log.debug('remote_ReportProgressStatus: ')
self.torrentLock.lock()
try: return {'torrents':self.torrents, 'status': self.status}
finally: self.torrentLock.unlock()
def initTorrent(self):
self.torrentLock.lock()
self.status=''
uiname = 'btlaunchmany'
defaults = get_defaults(uiname)
try:
config, args = configfile.parse_configuration_and_args(defaults, uiname, [], 0, 1)
#config, args = configfile.parse_configuration_and_args(defaults, uiname, sys.argv[1:], 0, 1)
config['torrent_dir'] = self.torrentDir
config['parse_dir_interval'] = 20 #make the dir scan happen @20 seconds, not default of 60
self.config = config
except BTFailure, e:
traceback.print_exc()
TorrentServer.log.error(_("%s\nrun with no args for parameter explanations")) % str(e)
self.torrentLock.unlock()
if self.torrentDir: self.runTorrents()
def runTorrents(self):
TorrentServer.log.debug('runTorrents(%s): LaunchMany... %s'%\
(get_ident(), self.torrentDir))
self.launcher = LaunchMany(self.config, self, 'btlaunchmany')
self.launcher.run()
TorrentServer.log.debug('runTorrents(%s): DONE with torrents...'%get_ident())
if self.status=='quit':
if TorrentServer.isProcess:
reactor.stop()
#sys.exit()
else:
if self.status=='restart':
log.debug('torrentServer(): Will restart %s '%self.torrentDir)
self.initTorrent()
if self.torrentDir: self.runTorrents()
def display(self, data):
self.torrentLock.lock()
try:
if self.status == 'quit': return True
if self.status=='restart': return True
while self.status=='paused':
#TorrentServer.log.debug( 'display(%s): is paused' % (get_ident()))
self.torrentLock.unlock()
time.sleep(1.0)
self.torrentLock.lock()
self.moduloCount += 1
modulo = mod(self.moduloCount, 3)
for xx in data:
( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) = xx
if status is not 'downloading':
pass #TorrentServer.log.debug( 'display(%s): %s: %s (%s)' % (get_ident(),status, name, progress))
stopstatus = self.torrents.get(name)
if stopstatus and (stopstatus[0]=='cancel' or stopstatus[0]=='stop'):
try: os.remove(name)
except: traceback.print_exc()
del self.torrents[name]
else:
self.torrents[name] = ['progress',progress]
del data
return False
finally: self.torrentLock.unlock()
def message(self, str):
TorrentServer.log.debug('FeedbackReporter.message(): %s'%str)
def exception(self, str):
TorrentServer.log.warn('FeedbackReporter: exception=%s'%str)
def didEndTorrentThread(self,foo=None):
TorrentServer.log.debug('didEndTorrentThread(): %s'%foo)
def didEndTorrentThreadErr(self,foo=None):
TorrentServer.log.debug('didEndTorrentThreadErr(): %s'%foo)
def main(args):
if __debug__:
level = logging.DEBUG
else:
level = logging.WARN
logging.basicConfig(level=level, format='%(asctime)s %(levelname)s %(message)s')
handler = logging.StreamHandler()
TorrentServer.log = logging.getLogger('TorrentServer')
TorrentServer.log.addHandler(handler)
TorrentServer.log.propagate = 0
TorrentServer.log.debug('torrentServer.main(): will load config')
TorrentServer.tServer = TorrentServer(downloadDir())
dfr = threads.deferToThread(TorrentServer.tServer.initTorrent)
dfr.addCallback(TorrentServer.tServer.didEndTorrentThread)
dfr.addErrback(TorrentServer.tServer.didEndTorrentThreadErr)
TorrentServer.tServer.listen()
if __name__=="__main__":
main(sys.argv)
############
# Twisted-based client
############
import pdb,os,time,traceback
from twisted.internet import threads
from twisted.spread import pb
from twisted.internet import reactor
from torrentServer import *
class Torrenting(object):
def start(doLaunch=True):
theTorrenter = Torrenting(doLaunch)
start = staticmethod(start)
def __init__(self, doLaunch=True):
self.filenames = {}
self.didQuit = self.isPaused = False
self.torrents = {}
self.pinger = None
if doLaunch: self.launch()
def willQuit(self,event=None):
self.didQuit = True
if TorrentServer.tServer:
TorrentServer.tServer.didQuit = True
self.pinger.quitTorrents()
def _threadLaunch(self):
TorrentServer.tServer = TorrentServer(downloadDir())
TorrentServer.log = log
TorrentServer.tServer.initTorrent()
def didEndTorrentThread(self,foo=None):
pass
def didEndTorrentThreadErr(self,foo=None):
pass
def launch(self):
launched=False
if not __debug__:
if sys.platform=='win32':
TorrentServer.isProcess = True
dir,fn = os.path.split(sys.argv[0])
path = dir+os.sep+'torrentServer.exe'
try:
os.startfile(path)
launched=True
except: traceback.print_exc()
if not launched:
TorrentServer.isProcess = False
dfr = threads.deferToThread(self._threadLaunch)
dfr.addCallback(self.didEndTorrentThread)
dfr.addErrback(self.didEndTorrentThreadErr)
launched=True
if launched:
if TorrentServer.isProcess:
self.pinger = TorrentPingerRemote(self)
else: self.pinger = TorrentPingerLocal(self)
def gotProgress(self, progress):
torrents = progress.get('torrents',{})
for fn,status in torrents.iteritems():
if not self.torrents.has_key(fn): continue
if status[0]=='progress':
progressf = float(status[1])
if progressf==100.0: self.didDownloadTorrent(fn)
else: print 'gotProgress: %s, %.0f' % (fn, progressf)
elif status[0]=='failed':
self.didNotDownloadTorrent(fn)
def addTorrent(self,filename):
self.torrents[filename] = ['progress',0.0]
self.pinger.queryProgress()
def didCancelDownload(self,filename):
self.torrents[filename] = ['cancel']
try: del self.torrents[filename]
except: pass
self.didNotDownloadTorrent(filename)
def didDownloadTorrent(self,filename=None):
try: del self.filenames[filename]
except: pass
def didNotDownloadTorrent(self,filename=None):
try: del self.filenames[filename]
except: pass
def didCancelBuild(self):
for tt in self.torrents.keys():
try: os.remove(tt)
except: traceback.print_exc()
self.torrents = {}
self.pinger.cancelTorrents()
def togglePause(self):
self.isPaused = not self.isPaused
self.pinger.pauseTorrents(self.isPaused)
##########################
class TorrentPinger:
def __init__( self, delegate=None ):
self.delegate = delegate
self.progressPing = None
def _didFail( self, foo ):
try:
if self.progressPing:
self.progressPing.cancel()
self.progressPing = None
reactor.callLater(1,self.start)
except: traceback.print_exc()
def didPause(self,foo=None):
pass
def didQuit(self,foo=None):
pass
def didUpdateTorrents(self,foo=None):
pass
def updateTorrentsExecutionStatus(self, torrents):
cmds = {'torrents': torrents}
return cmds
def quitTorrents(self):
cmds = {'status':'quit'}
return cmds
def pauseTorrents(self, yn):
if yn: cmds = {'status':'paused'}
else: cmds = {'status':'active'}
return cmds
def queryProgress( self ):
pass
def _progressStatus( self, progress):
if self.delegate: self.delegate.gotProgress(progress)
self.progressPing = reactor.callLater(1, self.queryProgress )
def cancelTorrents(self):
try:
self.progressPing.cancel()
self.progressPing = None
except: traceback.print_exc()
class TorrentPingerLocal(TorrentPinger):
def __init__( self, delegate=None ):
TorrentPinger.__init__(self, delegate)
def _didFail(self,foo=None):
pass
def didPause(self,foo=None):
pass
def didQuit(self,foo=None):
pass
def didUpdateTorrents(self,foo=None):
pass
def updateTorrentsExecutionStatus(self, torrents):
cmds = TorrentPinger.updateTorrentsExecutionStatus(self,torrents)
def quitTorrents(self):
TorrentServer.tServer.remote_UpdateExecutionStatus(TorrentPinger.quitTorrents(self))
def pauseTorrents(self, yn):
dfr = reactor.callLater(0,TorrentServer.tServer.remote_UpdateExecutionStatus,
TorrentPinger.pauseTorrents(self,yn))
def queryProgress( self ):
status = TorrentServer.tServer.remote_ReportProgressStatus()
self._progressStatus(status)
class TorrentPingerRemote(TorrentPinger):
def __init__( self, delegate=None ):
TorrentPinger.__init__(self, delegate)
self.torrentServerRoot = None
reactor.callLater(1,self.start)
def start( self ):
try:
client = pb.PBClientFactory()
reactor.connectTCP('127.0.0.1', TorrentServer.torrentPort, client, 20)
dfr = client.getRootObject()
dfr.addCallbacks(self._gotRemote, self._didFail)
except: traceback.print_exc()
def _gotRemote( self, remote ):
try:
self.torrentServerRoot = remote
remote.notifyOnDisconnect(self._didFail)
except: traceback.print_exc()
def updateTorrentsExecutionStatus(self, torrents):
cmds = TorrentPinger.updateTorrentsExecutionStatus(torrents)
dfr = self.torrentServerRoot.callRemote('UpdateExecutionStatus', cmds)
dfr.addCallbacks(self.didUpdateTorrents, self._didFail)
def quitTorrents(self):
cmds = TorrentPinger.quitTorrents()
dfr = self.torrentServerRoot.callRemote('UpdateExecutionStatus', cmds)
dfr.addCallbacks(self.didQuit, self._didFail)
def pauseTorrents(self, yn):
cmds = TorrentPinger.pauseTorrents(yn)
dfr = self.torrentServerRoot.callRemote('UpdateExecutionStatus', cmds)
dfr.addCallbacks(self.didPause, self._didFail)
def queryProgress( self ):
dfr = self.torrentServerRoot.callRemote('ReportProgressStatus')
dfr.addCallbacks(self._progressStatus, self._didFail)
if __name__=="__main__":
png = TorrentPingerRemote()
reactor.callLater(3,png.queryProgress)
reactor.run()
| StarcoderdataPython |
1951587 | <reponame>Goodjooy/AzurLane-GirlFrontLine-PaintingRestore
class DefferError(BaseException):
def __init__(self, arg):
self.arg = arg
class AzurLaneWork(BaseException):
def __init__(self, arg):
self.arg = arg
class GirlFrontLaneWork(BaseException):
def __init__(self, arg):
self.arg = arg
| StarcoderdataPython |
11253419 | <filename>zeograph/dmeasure.py
"""This module provides functions to calculate graph distances.
"""
__author__ = "<NAME>"
__version__ = "1.0"
__email__ = "dskoda [at] mit [dot] edu"
__date__ = "Oct 7, 2019"
import numpy as np
import networkx as nx
EPS = 1e-10
WEIGHTS_DEFAULT = [0.45, 0.45, 0.10]
def distance_distribution(G):
"""Computes the distribution of node distances of a graph. Uses the
Floyd-Warshall algorithm for the distances and formats it in
the D-measure article standard.
Args:
G (nx.Graph): graph with N nodes
Returns:
nodes_distrib (np.array): (N, N) matrix containing the normalized
distribution of distances of the graph. For each node i, the
distribution is (p_1, p_2, ..., p_j, ..., p_N), where p_j is
the proportion of nodes in the graph at distance j of the node i.
Nodes with distance N are disconnected from the graph.
"""
N = G.number_of_nodes()
dist_matrix = np.asarray(nx.floyd_warshall_numpy(G, weight=1))
dist_matrix[dist_matrix == np.inf] = N
nodes_distrib = np.zeros((N, N + 1))
for row in range(len(dist_matrix)):
for length in dist_matrix[row]:
nodes_distrib[row][int(length)] += 1
nodes_distrib /= (N - 1)
return nodes_distrib[:, 1:]
def distrib2diameter(mu):
"""Returns the diameter of the graph given its mean distribution
of distances.
Args:
mu (np.array): mean distribution of distances
Returns:
diameter (int)
"""
# Do not consider disconnected graphs
mu = mu[:-1]
return len(mu[mu > 0])
def entropy(p, eps=EPS):
"""Returns the entropy of a distribution.
Args:
p (np.array): normalized distribution.
eps (float): threshold for numerical stability.
Returns:
entropy (float)
"""
return -np.sum(p * np.log(np.maximum(p, eps)))
def NND(distributions_matrix, eps=EPS):
"""Returns the network node dispersion. Computed according to Eq. 1 in
the original D-measure paper.
Args:
p (np.array): normalized distribution.
eps (float): threshold for numerical stability.
Returns:
NND (float): network node dispersion
mu_j (np.array): mean distribution of distances
"""
N = distributions_matrix.shape[0]
mu_j = np.mean(distributions_matrix, axis=0)
J = np.max([0, entropy(mu_j) - entropy(distributions_matrix) / N])
norm = np.log(np.max([2, distrib2diameter(mu_j) + 1]))
return J / norm, mu_j
def alpha_centrality(G):
"""Computes the alpha-centrality of a graph.
Args:
G (nx.Graph)
Returns:
centrality (float)
"""
N = G.number_of_nodes()
degree = []
for _, deg in nx.degree_centrality(G).items():
degree.append(deg)
degree = np.asmatrix(degree).T
alpha = 1 / N
A = nx.adjacency_matrix(G).todense()
centrality = np.dot(np.linalg.inv(np.eye(N) - alpha * A.T), degree)
centrality = np.sort(np.asarray(centrality).squeeze()) / (N**2)
centrality = np.append(centrality, 1 - np.sum(centrality))
return centrality
def zero_pad(mu_1, mu_2, end=True):
"""Zero pads one of the vectors so that both have the same length.
Generally used to zero pad the mean distribution for a graph.
Args:
mu_1 (np.array)
mu_2 (np.array)
end (bool): if True, zero pads the end of the vector. To ensure
compatibility with the D-measure calculation, the last element
is kept as non-zero (infinite distance distribution weight).
Returns:
centrality (float)
"""
L1 = len(mu_1)
L2 = len(mu_2)
if L1 > L2:
z = np.zeros_like(mu_1)
if end:
z[:L2-1] = mu_2[:-1]
z[-1] = mu_2[-1]
else:
z[-L2:] = mu_2
mu_2 = z
elif L2 > L1:
z = np.zeros_like(mu_2)
if end:
z[:L1-1] = mu_1[:-1]
z[-1] = mu_1[-1]
else:
z[-L1:] = mu_1
mu_1 = z
return mu_1, mu_2
def dmeasure(G1, G2, w=WEIGHTS_DEFAULT):
"""Calculates the D-measure between two graphs.
Args:
G1 (nx.Graph)
G2 (nx.Graph)
w (list of floats): weights w1, w2 and w3 from equation 2 of the
original paper.
Returns:
D (float): D-measure between G1 and G2.
"""
assert len(w) == 3, 'three weights have to be specified. Check argument `w`.'
w1, w2, w3 = w
# First term
Pij_1 = distance_distribution(G1)
Pij_2 = distance_distribution(G2)
nnd_1, mu_1 = NND(Pij_1)
nnd_2, mu_2 = NND(Pij_2)
mu_1, mu_2 = zero_pad(mu_1, mu_2, end=True)
mu_mean = (mu_1 + mu_2) / 2
first = np.sqrt(
np.maximum(
(entropy(mu_mean) - (entropy(mu_1) + entropy(mu_2)) / 2), 0)
/ np.log(2)
)
# Second term
second = np.abs(np.sqrt(nnd_1) - np.sqrt(nnd_2))
# Third term
alphaG_1 = alpha_centrality(G1)
alphaG_2 = alpha_centrality(G2)
alphaG_1, alphaG_2 = zero_pad(alphaG_1, alphaG_2, end=False)
alphaG_mean = (alphaG_1 + alphaG_2) / 2
third_1 = np.sqrt(
np.maximum(
(entropy(alphaG_mean) - (entropy(alphaG_1) + entropy(alphaG_2)) / 2), 0)
/ np.log(2)
)
# Complement
alphaGcomp_1 = alpha_centrality(nx.complement(G1))
alphaGcomp_2 = alpha_centrality(nx.complement(G2))
alphaGcomp_1, alphaGcomp_2 = zero_pad(alphaGcomp_1,
alphaGcomp_2, end=False)
alphaGcomp_mean = (alphaGcomp_1 + alphaGcomp_2) / 2
third_2 = np.sqrt(
np.maximum((entropy(alphaGcomp_mean) - (entropy(alphaGcomp_1) + entropy(alphaGcomp_2)) / 2), 0)
/ np.log(2)
)
third = third_1 + third_2
return w1 * first + w2 * second + w3 / 2 * third
| StarcoderdataPython |
3376683 | <filename>code/ARAX/test/test_ARAX_resultify.py
#!/usr/bin/env python3
# Usage: python3 ARAX_resultify_testcases.py
# python3 ARAX_resultify_testcases.py test_issue692
import os
import sys
import pytest
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../ARAXQuery")
from response import Response
from typing import List, Union, Dict, Tuple
import ARAX_resultify
from ARAX_resultify import ARAXResultify
from ARAX_query import ARAXQuery
# is there a better way to import swagger_server? Following SO posting 16981921
PACKAGE_PARENT = '../../UI/OpenAPI/python-flask-server'
sys.path.append(os.path.normpath(os.path.join(os.getcwd(), PACKAGE_PARENT)))
from swagger_server.models.edge import Edge
from swagger_server.models.node import Node
from swagger_server.models.q_edge import QEdge
from swagger_server.models.q_node import QNode
from swagger_server.models.query_graph import QueryGraph
from swagger_server.models.knowledge_graph import KnowledgeGraph
from swagger_server.models.result import Result
from swagger_server.models.message import Message
def _slim_kg(kg: KnowledgeGraph) -> KnowledgeGraph:
slimmed_nodes = [Node(id=node.id,
type=node.type,
name=node.name,
qnode_ids=node.qnode_ids) for node in kg.nodes]
slimmed_edges = [Edge(id=edge.id,
source_id=edge.source_id,
target_id=edge.target_id,
type=edge.type,
qedge_ids=edge.qedge_ids) for edge in kg.edges]
return KnowledgeGraph(nodes=slimmed_nodes, edges=slimmed_edges)
def _create_node(node_id: str, node_type: List[str], qnode_ids: List[str], node_name: str = None) -> Node:
node = Node(id=node_id,
type=node_type,
name=node_name)
node.qnode_ids = qnode_ids # Must set outside initializer until (if?) qnode_ids is made an actual class attribute
return node
def _create_edge(edge_id: str, source_id: str, target_id: str, qedge_ids: List[str], edge_type: str = None) -> Edge:
edge = Edge(id=edge_id,
source_id=source_id,
target_id=target_id,
type=edge_type)
edge.qedge_ids = qedge_ids # Must set outside initializer until (if?) qedge_ids is made an actual class attribute
return edge
def _print_results_for_debug(results: List[Result]):
print()
for result in results:
print(result.essence)
for node_binding in result.node_bindings:
print(f" {node_binding.qg_id}: {node_binding.kg_id}")
for edge_binding in result.edge_bindings:
print(f" {edge_binding.qg_id}: {edge_binding.kg_id}")
def _get_result_nodes_by_qg_id(result: Result, kg_nodes_map: Dict[str, Node], qg: QueryGraph) -> Dict[str, Dict[str, Node]]:
return {qnode.id: {node_binding.kg_id: kg_nodes_map[node_binding.kg_id] for node_binding in result.node_bindings
if node_binding.qg_id == qnode.id} for qnode in qg.nodes}
def _get_result_edges_by_qg_id(result: Result, kg_edges_map: Dict[str, Edge], qg: QueryGraph) -> Dict[str, Dict[str, Edge]]:
return {qedge.id: {edge_binding.kg_id: kg_edges_map[edge_binding.kg_id] for edge_binding in result.edge_bindings
if edge_binding.qg_id == qedge.id} for qedge in qg.edges}
def _do_arax_query(actions_list: List[str], debug=False) -> Tuple[Response, Message]:
query = {"previous_message_processing_plan": {"processing_actions": actions_list}}
araxq = ARAXQuery()
response = araxq.query(query)
message = araxq.message
if response.status != 'OK' or debug:
_print_results_for_debug(message.results)
print(response.show(level=response.DEBUG))
return response, message
def _run_resultify_directly(query_graph: QueryGraph,
knowledge_graph: KnowledgeGraph,
ignore_edge_direction=True,
debug=False) -> Tuple[Response, Message]:
response = Response()
from actions_parser import ActionsParser
actions_parser = ActionsParser()
actions_list = [f"resultify(ignore_edge_direction={ignore_edge_direction})"]
result = actions_parser.parse(actions_list)
response.merge(result)
actions = result.data['actions']
assert result.status == 'OK'
resultifier = ARAXResultify()
message = Message(query_graph=query_graph,
knowledge_graph=knowledge_graph,
results=[])
parameters = actions[0]['parameters']
parameters['debug'] = 'true'
result = resultifier.apply(message, parameters)
response.merge(result)
if response.status != 'OK' or debug:
_print_results_for_debug(message.results)
print(response.show(level=response.DEBUG))
return response, message
def _convert_shorthand_to_qg(shorthand_qnodes: Dict[str, str], shorthand_qedges: Dict[str, str]) -> QueryGraph:
return QueryGraph(nodes=[QNode(id=qnode_id, is_set=bool(is_set))
for qnode_id, is_set in shorthand_qnodes.items()],
edges=[QEdge(id=qedge_id, source_id=qnodes.split("--")[0], target_id=qnodes.split("--")[1])
for qedge_id, qnodes in shorthand_qedges.items()])
def _convert_shorthand_to_kg(shorthand_nodes: Dict[str, List[str]], shorthand_edges: Dict[str, List[str]]) -> KnowledgeGraph:
nodes_dict = dict()
for qnode_id, nodes_list in shorthand_nodes.items():
for node_id in nodes_list:
node = nodes_dict.get(node_id, Node(id=node_id, qnode_ids=[]))
node.qnode_ids.append(qnode_id)
nodes_dict[node_id] = node
edges_dict = dict()
for qedge_id, edges_list in shorthand_edges.items():
for edge_key in edges_list:
source_node_id = edge_key.split("--")[0]
target_node_id = edge_key.split("--")[1]
edge = edges_dict.get(edge_key, Edge(id=edge_key, source_id=source_node_id, target_id=target_node_id, qedge_ids=[]))
edge.qedge_ids.append(qedge_id)
edges_dict[f"{qedge_id}:{edge_key}"] = edge
return KnowledgeGraph(nodes=list(nodes_dict.values()), edges=list(edges_dict.values()))
def test01():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:67890',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:34567',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'UniProtKB:12345',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'UniProtKB:23456',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'DOID:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'DOID:12345',
'target_id': 'HP:67890',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'HP:34567',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': False},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph)
assert len(results_list) == 2
def test02():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:67890',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:34567',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'UniProtKB:12345',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'UniProtKB:23456',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'DOID:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'DOID:12345',
'target_id': 'HP:67890',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'HP:34567',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': None},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph)
assert len(results_list) == 2
def test03():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:67890',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']},
{'id': 'HP:34567',
'type': 'phenotypic_feature',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'UniProtKB:23456',
'target_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'DOID:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'DOID:12345',
'target_id': 'HP:67890',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'HP:34567',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': None},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph,
ignore_edge_direction=True)
assert len(results_list) == 2
def test04():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'UniProtKB:56789',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'ChEMBL.COMPOUND:12345',
'type': 'chemical_substance',
'qnode_ids': ['n02']},
{'id': 'ChEMBL.COMPOUND:23456',
'type': 'chemical_substance',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke04',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke06',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': True},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'chemical_substance',
'is_set': False})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n02',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n01'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph,
ignore_edge_direction=True)
assert len(results_list) == 2
def test05():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'UniProtKB:56789',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'ChEMBL.COMPOUND:12345',
'type': 'chemical_substance',
'qnode_ids': ['n02']},
{'id': 'ChEMBL.COMPOUND:23456',
'type': 'chemical_substance',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke04',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke06',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': True},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'chemical_substance',
'is_set': False},
)
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n02',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n01'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
message = Message(query_graph=query_graph,
knowledge_graph=knowledge_graph,
results=[])
resultifier = ARAXResultify()
input_parameters = {'ignore_edge_direction': 'true'}
resultifier.apply(message, input_parameters)
assert resultifier.response.status == 'OK'
assert len(resultifier.message.results) == 2
def test07():
kg_node_info = ({'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['DOID:12345']},
{'id': 'UniProtKB:56789',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'ChEMBL.COMPOUND:12345',
'type': 'chemical_substance',
'qnode_ids': ['n02']},
{'id': 'ChEMBL.COMPOUND:23456',
'type': 'chemical_substance',
'qnode_ids': ['n02']})
kg_edge_info = ({'edge_id': 'ke01',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'source_id': 'ChEMBL.COMPOUND:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke04',
'source_id': 'ChEMBL.COMPOUND:23456',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe01']},
{'edge_id': 'ke05',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke06',
'source_id': 'DOID:12345',
'target_id': 'UniProtKB:23456',
'qedge_ids': ['qe02']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': True},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'chemical_substance',
'is_set': False})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n02',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n01'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
response = Response()
from actions_parser import ActionsParser
actions_parser = ActionsParser()
actions_list = ['resultify(ignore_edge_direction=true)']
result = actions_parser.parse(actions_list)
response.merge(result)
actions = result.data['actions']
assert result.status == 'OK'
resultifier = ARAXResultify()
message = Message(query_graph=query_graph,
knowledge_graph=knowledge_graph,
results=[])
parameters = actions[0]['parameters']
parameters['debug'] = 'true'
result = resultifier.apply(message, parameters)
response.merge(result)
assert len(message.results) == 2
assert result.status == 'OK'
def test08():
shorthand_qnodes = {"n00": "",
"n01": ""}
shorthand_qedges = {"e00": "n00--n01"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["DOID:731"],
"n01": ["HP:01", "HP:02", "HP:03", "HP:04"]}
shorthand_kg_edges = {"e00": ["DOID:731--HP:01", "DOID:731--HP:02", "DOID:731--HP:03", "DOID:731--HP:04"]}
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
n01_nodes = {node.id for node in message.knowledge_graph.nodes if "n01" in node.qnode_ids}
assert len(message.results) == len(n01_nodes)
@pytest.mark.slow
def test09():
actions = [
"add_qnode(name=DOID:731, id=n00, type=disease, is_set=false)",
"add_qnode(type=phenotypic_feature, is_set=false, id=n01)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"expand(edge_id=e00)",
"resultify(ignore_edge_direction=true, debug=true)",
"filter_results(action=limit_number_of_results, max_results=100)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert len(message.results) == 100
def test10():
resultifier = ARAXResultify()
desc = resultifier.describe_me()
assert 'brief_description' in desc[0]
assert 'ignore_edge_direction' in desc[0]
@pytest.mark.slow
def test_example1():
actions = [
"add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)",
"add_qnode(id=qg1, type=protein)",
"add_qedge(source_id=qg1, target_id=qg0, id=qe0)",
"expand(edge_id=qe0)",
"resultify(ignore_edge_direction=true, debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert len(message.results) == len({node.id for node in message.knowledge_graph.nodes if "qg1" in node.qnode_ids})
assert message.results[0].essence is not None
def test_bfs():
qg_node_info = ({'id': 'n01',
'type': 'protein',
'is_set': None},
{'id': 'DOID:12345',
'type': 'disease',
'is_set': False},
{'id': 'n02',
'type': 'phenotypic_feature',
'is_set': True})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n01',
'target_id': 'DOID:12345'},
{'edge_id': 'qe02',
'source_id': 'DOID:12345',
'target_id': 'n02'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
qg = QueryGraph(qg_nodes, qg_edges)
adj_map = ARAX_resultify._make_adj_maps(qg, directed=False, droploops=True)['both']
bfs_dists = ARAX_resultify._bfs_dists(adj_map, 'n01')
assert bfs_dists == {'n01': 0, 'DOID:12345': 1, 'n02': 2}
bfs_dists = ARAX_resultify._bfs_dists(adj_map, 'DOID:12345')
assert bfs_dists == {'n01': 1, 'DOID:12345': 0, 'n02': 1}
def test_bfs_in_essence_code():
kg_node_info = ({'id': 'DOID:12345',
'type': 'disease',
'qnode_ids': ['n00']},
{'id': 'UniProtKB:12345',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'UniProtKB:23456',
'type': 'protein',
'qnode_ids': ['n01']},
{'id': 'FOO:12345',
'type': 'gene',
'qnode_ids': ['n02']},
{'id': 'HP:56789',
'type': 'phenotypic_feature',
'qnode_ids': ['n03']})
kg_edge_info = ({'edge_id': 'ke01',
'target_id': 'UniProtKB:12345',
'source_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke02',
'target_id': 'UniProtKB:23456',
'source_id': 'DOID:12345',
'qedge_ids': ['qe01']},
{'edge_id': 'ke03',
'source_id': 'UniProtKB:12345',
'target_id': 'FOO:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke04',
'source_id': 'UniProtKB:23456',
'target_id': 'FOO:12345',
'qedge_ids': ['qe02']},
{'edge_id': 'ke05',
'source_id': 'FOO:12345',
'target_id': 'HP:56789',
'qedge_ids': ['qe03']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
knowledge_graph = KnowledgeGraph(kg_nodes, kg_edges)
qg_node_info = ({'id': 'n00', # DOID:12345
'type': 'disease',
'is_set': False},
{'id': 'n01',
'type': 'protein',
'is_set': False},
{'id': 'n02',
'type': 'gene',
'is_set': False},
{'id': 'n03', # HP:56789
'type': 'phenotypic_feature',
'is_set': False})
qg_edge_info = ({'edge_id': 'qe01',
'source_id': 'n00',
'target_id': 'n01'},
{'edge_id': 'qe02',
'source_id': 'n01',
'target_id': 'n02'},
{'edge_id': 'qe03',
'source_id': 'n02',
'target_id': 'n03'})
qg_nodes = [QNode(id=node_info['id'],
type=ARAX_resultify.BIOLINK_ENTITY_TYPE_OBJECTS[node_info['type']],
is_set=node_info['is_set']) for node_info in qg_node_info]
qg_edges = [QEdge(id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id']) for edge_info in qg_edge_info]
query_graph = QueryGraph(qg_nodes, qg_edges)
results_list = ARAX_resultify._get_results_for_kg_by_qg(knowledge_graph,
query_graph)
assert len(results_list) == 2
assert results_list[0].essence is not None
@pytest.mark.slow
def test_issue680():
actions = [
"add_qnode(curie=DOID:14330, id=n00, type=disease)",
"add_qnode(type=protein, is_set=true, id=n01)",
"add_qnode(type=chemical_substance, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01, type=physically_interacts_with)",
"expand(edge_id=[e00,e01], kp=ARAX/KG1)",
"overlay(action=compute_jaccard, start_node_id=n00, intermediate_node_id=n01, end_node_id=n02, virtual_relation_label=J1)",
"filter_kg(action=remove_edges_by_attribute, edge_attribute=jaccard_index, direction=below, threshold=.2, remove_connected_nodes=t, qnode_id=n02)",
"filter_kg(action=remove_edges_by_property, edge_property=provided_by, property_value=Pharos)",
"overlay(action=predict_drug_treats_disease, source_qnode_id=n02, target_qnode_id=n00, virtual_relation_label=P1)",
"resultify(ignore_edge_direction=true, debug=true)",
"return(message=true, store=false)",
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert message.results[0].essence is not None
kg_edges_map = {edge.id: edge for edge in message.knowledge_graph.edges}
kg_nodes_map = {node.id: node for node in message.knowledge_graph.nodes}
for result in message.results:
result_nodes_by_qg_id = _get_result_nodes_by_qg_id(result, kg_nodes_map, message.query_graph)
result_edges_by_qg_id = _get_result_edges_by_qg_id(result, kg_edges_map, message.query_graph)
# Make sure all intermediate nodes are connected to at least one (real, not virtual) edge on BOTH sides
for n01_node_id in result_nodes_by_qg_id['n01']:
assert any(edge for edge in result_edges_by_qg_id['e00'].values() if
edge.source_id == n01_node_id or edge.target_id == n01_node_id)
assert any(edge for edge in result_edges_by_qg_id['e01'].values() if
edge.source_id == n01_node_id or edge.target_id == n01_node_id)
# Make sure all edges' nodes actually exist in this result (includes virtual and real edges)
for qedge_id, edges_map in result_edges_by_qg_id.items():
qedge = next(qedge for qedge in message.query_graph.edges if qedge.id == qedge_id)
for edge_id, edge in edges_map.items():
assert (edge.source_id in result_nodes_by_qg_id[qedge.source_id] and edge.target_id in
result_nodes_by_qg_id[qedge.target_id]) or \
(edge.target_id in result_nodes_by_qg_id[qedge.source_id] and edge.source_id in
result_nodes_by_qg_id[qedge.target_id])
def test_issue686a():
# Tests that an error is thrown when an invalid parameter is passed to resultify
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify(ignore_edge_direction=true, INVALID_PARAMETER_NAME=true)',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert 'INVALID_PARAMETER_NAME' in response.show()
def test_issue686b():
# Tests that resultify can be called with no parameters passed in
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify()',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
def test_issue686c():
# Tests that setting ignore_edge_direction to an invalid value results in an error
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify(ignore_edge_direction=foo)',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status != 'OK' and 'foo' in response.show()
def test_issue687():
# Tests that ignore_edge_direction need not be specified
actions = [
'add_qnode(id=qg0, curie=CHEMBL.COMPOUND:CHEMBL112)',
'expand()',
'resultify(debug=true)',
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
assert len(message.results) == len(message.knowledge_graph.nodes)
def test_issue727():
# Check resultify ignores edge direction appropriately
shorthand_qnodes = {"n00": "",
"n01": ""}
shorthand_qedges = {"e00": "n00--n01"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["DOID:111"],
"n01": ["PR:01", "PR:02"]}
shorthand_kg_edges = {"e00": ["PR:01--DOID:111", "PR:02--DOID:111"]} # Edges are reverse direction of QG
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
assert len(message.results) == 2
def test_issue731():
# Return no results if QG is unfulfilled
shorthand_qnodes = {"n0": "",
"n1": "is_set",
"n2": ""}
shorthand_qedges = {"e0": "n0--n1",
"e1": "n1--n2"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n0": [],
"n1": ["UniProtKB:123", "UniProtKB:124"],
"n2": ["DOID:122"]}
shorthand_kg_edges = {"e0": [],
"e1": ["UniProtKB:123--DOID:122", "UniProtKB:124--DOID:122"]}
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
assert len(message.results) == 0
@pytest.mark.slow
def test_issue731b():
actions = [
"add_qnode(name=MONDO:0005737, id=n0, type=disease)",
"add_qnode(type=protein, id=n1)",
"add_qnode(type=disease, id=n2)",
"add_qedge(source_id=n0, target_id=n1, id=e0)",
"add_qedge(source_id=n1, target_id=n2, id=e1)",
"expand(edge_id=[e0,e1], kp=ARAX/KG2)",
"resultify(debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
for result in message.results:
found_e01 = any(edge_binding.qg_id == 'e1' for edge_binding in result.edge_bindings)
assert found_e01
def test_issue731c():
qg = QueryGraph(nodes=[QNode(curie='MONDO:0005737',
id='n0',
type='disease'),
QNode(id='n1',
type='protein'),
QNode(id='n2',
type='disease')],
edges=[QEdge(source_id='n0',
target_id='n1',
id='e0'),
QEdge(source_id='n1',
target_id='n2',
id='e1')])
kg_node_info = ({'id': 'MONDO:0005737',
'type': 'disease',
'qnode_ids': ['n0']},
{'id': 'UniProtKB:Q14943',
'type': 'protein',
'qnode_ids': ['n1']},
{'id': 'DOID:12297',
'type': 'disease',
'qnode_ids': ['n2']},
{'id': 'DOID:11077',
'type': 'disease',
'qnode_ids': ['n2']})
kg_edge_info = ({'edge_id': 'UniProtKB:Q14943--MONDO:0005737',
'target_id': 'MONDO:0005737',
'source_id': 'UniProtKB:Q14943',
'qedge_ids': ['e0']},
{'edge_id': 'DOID:12297--UniProtKB:Q14943',
'target_id': 'UniProtKB:Q14943',
'source_id': 'DOID:12297',
'qedge_ids': ['e1']})
kg_nodes = [_create_node(node_id=node_info['id'],
node_type=[node_info['type']],
qnode_ids=node_info['qnode_ids']) for node_info in kg_node_info]
kg_edges = [_create_edge(edge_id=edge_info['edge_id'],
source_id=edge_info['source_id'],
target_id=edge_info['target_id'],
qedge_ids=edge_info['qedge_ids']) for edge_info in kg_edge_info]
kg = KnowledgeGraph(nodes=kg_nodes, edges=kg_edges)
results = ARAX_resultify._get_results_for_kg_by_qg(kg, qg)
indexes_results_with_single_edge = [index for index, result in enumerate(results) if len(result.edge_bindings) == 1]
assert len(indexes_results_with_single_edge) == 0
def test_issue740():
# Tests that self-edges are handled properly
shorthand_qnodes = {"n00": "",
"n01": ""}
shorthand_qedges = {"e00": "n00--n01"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["CUI:C0004572"], # Babesia
"n01": ["HP:01", "HP:02", "CUI:C0004572"]}
shorthand_kg_edges = {"e00": ["CUI:C0004572--HP:01", "CUI:C0004572--HP:02", "CUI:C0004572--CUI:C0004572"]}
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
assert len(message.results) == 3
def test_issue692():
kg = KnowledgeGraph(nodes=[],
edges=[])
qg = QueryGraph(nodes=[],
edges=[])
results_list = ARAX_resultify._get_results_for_kg_by_qg(kg, qg)
assert len(results_list) == 0
def test_issue692b():
message = Message(query_graph=QueryGraph(nodes=[], edges=[]),
knowledge_graph=KnowledgeGraph(nodes=[], edges=[]))
resultifier = ARAXResultify()
response = resultifier.apply(message, {})
assert 'WARNING: no results returned; empty knowledge graph' in response.messages_list()[0]
def test_issue720_1():
# Test when same node fulfills different qnode_ids within same result
actions = [
"add_qnode(curie=DOID:14330, id=n00)",
"add_qnode(type=protein, curie=[UniProtKB:Q02878, UniProtKB:Q9BXM7], is_set=true, id=n01)",
"add_qnode(type=disease, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand()",
"resultify(debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
n02_nodes_in_kg = [node for node in message.knowledge_graph.nodes if "n02" in node.qnode_ids]
assert len(message.results) == len(n02_nodes_in_kg)
assert response.status == 'OK'
def test_issue720_2():
# Test when same node fulfills different qnode_ids within same result
actions = [
"add_qnode(curie=CUI:C0158779, type=anatomical_entity, id=n00)",
"add_qnode(curie=CUI:C0578454, type=phenotypic_feature, id=n01)",
"add_qnode(type=anatomical_entity, id=n02)",
"add_qedge(source_id=n00, target_id=n01, id=e00)",
"add_qedge(source_id=n01, target_id=n02, id=e01)",
"expand(use_synonyms=false, kp=ARAX/KG2)",
"resultify(debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
n02_nodes_in_kg = [node for node in message.knowledge_graph.nodes if "n02" in node.qnode_ids]
assert len(message.results) == len(n02_nodes_in_kg)
assert response.status == 'OK'
def test_issue720_3():
# Tests when same node fulfills different qnode_ids in different results
actions = [
"add_qnode(id=n00, curie=DOID:14330)", # parkinson's
"add_qnode(id=n01, type=protein)",
"add_qnode(id=n02, type=chemical_substance, curie=CHEMBL.COMPOUND:CHEMBL452076)", # cilnidipine
"add_qnode(id=n03, type=protein)",
"add_qedge(id=e00, source_id=n00, target_id=n01)",
"add_qedge(id=e01, source_id=n01, target_id=n02)",
"add_qedge(id=e02, source_id=n02, target_id=n03)",
"expand(use_synonyms=false)",
"resultify(debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
snca_id = "UniProtKB:P37840"
found_result_where_syna_is_n01_and_not_n03 = False
found_result_where_syna_is_n03_and_not_n01 = False
for result in message.results:
syna_as_n01 = any(node_binding for node_binding in result.node_bindings if node_binding.kg_id == snca_id and node_binding.qg_id == 'n01')
syna_as_n03 = any(node_binding for node_binding in result.node_bindings if node_binding.kg_id == snca_id and node_binding.qg_id == 'n03')
if syna_as_n01 and not syna_as_n03:
found_result_where_syna_is_n01_and_not_n03 = True
elif syna_as_n03 and not syna_as_n01:
found_result_where_syna_is_n03_and_not_n01 = True
assert found_result_where_syna_is_n01_and_not_n03 and found_result_where_syna_is_n03_and_not_n01
def test_issue833_extraneous_intermediate_nodes():
# Test for extraneous intermediate nodes
shorthand_qnodes = {"n00": "",
"n01": "is_set",
"n02": "is_set",
"n03": ""}
shorthand_qedges = {"e00": "n00--n01",
"e01": "n01--n02",
"e02": "n02--n03"}
query_graph = _convert_shorthand_to_qg(shorthand_qnodes, shorthand_qedges)
shorthand_kg_nodes = {"n00": ["DOID:1056"],
"n01": ["UniProtKB:111", "UniProtKB:222"],
"n02": ["MONDO:111", "MONDO:222"], # Last one is dead-end
"n03": ["CHEBI:111"]}
shorthand_kg_edges = {"e00": ["DOID:1056--UniProtKB:111", "DOID:1056--UniProtKB:222"],
"e01": ["UniProtKB:111--MONDO:111", "UniProtKB:222--MONDO:222"],
"e02": ["MONDO:111--CHEBI:111"]}
knowledge_graph = _convert_shorthand_to_kg(shorthand_kg_nodes, shorthand_kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
kg_nodes_map = {node.id: node for node in message.knowledge_graph.nodes}
kg_edges_map = {edge.id: edge for edge in message.knowledge_graph.edges}
assert len(message.results) == 1
for result in message.results:
result_nodes_by_qg_id = _get_result_nodes_by_qg_id(result, kg_nodes_map, message.query_graph)
result_edges_by_qg_id = _get_result_edges_by_qg_id(result, kg_edges_map, message.query_graph)
# Make sure all intermediate nodes are connected to at least one (real, not virtual) edge on BOTH sides
for n01_node_id in result_nodes_by_qg_id['n01']:
assert any(edge for edge in result_edges_by_qg_id['e00'].values() if
edge.source_id == n01_node_id or edge.target_id == n01_node_id)
assert any(edge for edge in result_edges_by_qg_id['e01'].values() if
edge.source_id == n01_node_id or edge.target_id == n01_node_id)
# Make sure all edges' nodes actually exist in this result (includes virtual and real edges)
for qedge_id, edges_map in result_edges_by_qg_id.items():
qedge = next(qedge for qedge in message.query_graph.edges if qedge.id == qedge_id)
for edge_id, edge in edges_map.items():
assert (edge.source_id in result_nodes_by_qg_id[qedge.source_id] and edge.target_id in
result_nodes_by_qg_id[qedge.target_id]) or \
(edge.target_id in result_nodes_by_qg_id[qedge.source_id] and edge.source_id in
result_nodes_by_qg_id[qedge.target_id])
def test_single_node():
actions = [
"add_qnode(name=ibuprofen, id=n00)",
"expand(node_id=n00)",
"resultify(debug=true)",
"return(message=true, store=false)"
]
response, message = _do_arax_query(actions)
assert response.status == 'OK'
n00_nodes_in_kg = [node for node in message.knowledge_graph.nodes if "n00" in node.qnode_ids]
assert len(message.results) == len(n00_nodes_in_kg)
def test_parallel_edges_between_nodes():
qg_nodes = {"n00": "",
"n01": "is_set",
"n02": ""}
qg_edges = {"e00": "n00--n01",
"e01": "n01--n02",
"parallel01": "n01--n02"}
query_graph = _convert_shorthand_to_qg(qg_nodes, qg_edges)
kg_nodes = {"n00": ["DOID:11830"],
"n01": ["UniProtKB:P39060", "UniProtKB:P20849"],
"n02": ["CHEBI:85164", "CHEBI:29057"]}
kg_edges = {"e00": ["DOID:11830--UniProtKB:P39060", "DOID:11830--UniProtKB:P20849"],
"e01": ["UniProtKB:P39060--CHEBI:85164", "UniProtKB:P20849--CHEBI:29057"],
"parallel01": ["UniProtKB:P39060--CHEBI:85164", "UniProtKB:P20849--CHEBI:29057", "UniProtKB:P39060--CHEBI:29057"]}
knowledge_graph = _convert_shorthand_to_kg(kg_nodes, kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
kg_nodes_map = {node.id: node for node in message.knowledge_graph.nodes}
kg_edges_map = {edge.id: edge for edge in message.knowledge_graph.edges}
n02_nodes = {node_id for node_id, node in kg_nodes_map.items() if "n02" in node.qnode_ids}
assert len(message.results) == len(n02_nodes)
# Make sure every n01 node is connected to both an e01 edge and a parallel01 edge in each result
for result in message.results:
result_nodes_by_qg_id = _get_result_nodes_by_qg_id(result, kg_nodes_map, message.query_graph)
result_edges_by_qg_id = _get_result_edges_by_qg_id(result, kg_edges_map, message.query_graph)
node_ids_used_by_e01_edges = {edge.source_id for edge in result_edges_by_qg_id['e01'].values()}.union({edge.target_id for edge in result_edges_by_qg_id['e01'].values()})
node_ids_used_by_parallel01_edges = {edge.source_id for edge in result_edges_by_qg_id['parallel01'].values()}.union({edge.target_id for edge in result_edges_by_qg_id['parallel01'].values()})
for node_id in result_nodes_by_qg_id['n01']:
assert node_id in node_ids_used_by_e01_edges
assert node_id in node_ids_used_by_parallel01_edges
def test_issue912_clean_up_kg():
# Tests that the returned knowledge graph contains only nodes used in the results
qg_nodes = {"n00": "",
"n01": "is_set",
"n02": ""}
qg_edges = {"e00": "n00--n01",
"e01": "n01--n02"}
query_graph = _convert_shorthand_to_qg(qg_nodes, qg_edges)
kg_nodes = {"n00": ["DOID:11", "DOID:NotConnected"],
"n01": ["PR:110", "PR:111", "PR:DeadEnd"],
"n02": ["CHEBI:11", "CHEBI:NotConnected"]}
kg_edges = {"e00": ["DOID:11--PR:110", "DOID:11--PR:111", "DOID:11--PR:DeadEnd"],
"e01": ["PR:110--CHEBI:11", "PR:111--CHEBI:11"]}
knowledge_graph = _convert_shorthand_to_kg(kg_nodes, kg_edges)
response, message = _run_resultify_directly(query_graph, knowledge_graph)
assert response.status == 'OK'
assert len(message.results) == 1
returned_kg_node_ids = {node.id for node in message.knowledge_graph.nodes}
assert returned_kg_node_ids == {"DOID:11", "PR:110", "PR:111", "CHEBI:11"}
orphan_edges = {edge.id for edge in message.knowledge_graph.edges if not {edge.source_id, edge.target_id}.issubset(returned_kg_node_ids)}
assert not orphan_edges
if __name__ == '__main__':
pytest.main(['-v', 'test_ARAX_resultify.py'])
| StarcoderdataPython |
1720622 | from functools import reduce
from pandas import DataFrame
from pm4py.objects.log.log import Trace, EventLog
from src.labeling.common import add_label_column
ATTRIBUTE_CLASSIFIER = None
PREFIX_ = 'prefix_'
def complex_features(log: EventLog, prefix_length, padding, labeling_type, feature_list: list = None) -> DataFrame:
columns, additional_columns = _columns_complex(log, prefix_length, feature_list)
encoded_data = []
for trace in log:
if len(trace) <= prefix_length - 1 and not padding:
# trace too short and no zero padding
continue
encoded_data.append(_trace_to_row(trace, prefix_length, additional_columns, padding, columns, labeling_type))
return DataFrame(columns=columns, data=encoded_data)
def _get_global_trace_attributes(log: EventLog):
# retrieves all traces in the log and returns their intersection
attributes = list(reduce(set.union, [set(trace._get_attributes().keys()) for trace in log]))
trace_attributes = [attr for attr in attributes if attr not in ["concept:name", "time:timestamp", "label"]]
return sorted(trace_attributes)
def _get_global_event_attributes(log):
"""Get log event attributes that are not name or time
"""
# retrieves all events in the log and returns their intersection
attributes = list(reduce(set.union, [set(event._dict.keys()) for trace in log for event in trace]))
event_attributes = [attr for attr in attributes if attr not in ["concept:name", "time:timestamp"]]
return sorted(event_attributes)
def _compute_additional_columns(log) -> dict:
return {'trace_attributes': _get_global_trace_attributes(log),
'event_attributes': _get_global_event_attributes(log)}
def _columns_complex(log, prefix_length: int, feature_list: list = None) -> tuple:
additional_columns = _compute_additional_columns(log)
columns = ['trace_id']
columns += additional_columns['trace_attributes']
for i in range(1, prefix_length + 1):
columns.append(PREFIX_ + str(i))
for additional_column in additional_columns['event_attributes']:
columns.append(additional_column + "_" + str(i))
columns += ['label']
if feature_list is not None:
assert(list(feature_list) == columns)
return columns, additional_columns
def _data_complex(trace: Trace, prefix_length: int, additional_columns: dict) -> list:
"""Creates list in form [1, value1, value2, 2, ...]
Appends values in additional_columns
"""
data = [trace.attributes.get(att, 0) for att in additional_columns['trace_attributes']]
for idx, event in enumerate(trace):
if idx == prefix_length:
break
event_name = event["concept:name"]
data.append(event_name)
for att in additional_columns['event_attributes']:
data.append(event.get(att, '0'))
return data
def _trace_to_row(trace: Trace, prefix_length: int, additional_columns, padding, columns: list, labeling_type) -> list:
trace_row = [trace.attributes["concept:name"]]
trace_row += _data_complex(trace, prefix_length, additional_columns)
if padding:
trace_row += [0 for _ in range(len(trace_row), len(columns) - 1)]
trace_row += [ add_label_column(trace, labeling_type, prefix_length) ]
return trace_row
| StarcoderdataPython |
284808 | <filename>imp/dice.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import string
import random
# Simple recursive descent parser for dice rolls, e.g. '3d6+1d8+4'.
#
# roll := die {('+' | '-') die} ('+' | '-') modifier
# die := number 'd' number
# modifier := number
class StringBuf(object):
def __init__(self, s):
self.s = s
self.pos = 0
def peek(self):
return self.s[self.pos]
def getc(self):
c = self.peek()
self.pos += 1
return c
def ungetc(self):
self.pos -= 1
def tell(self):
return self.pos
class Symbol(object):
NUMBER = 0
D = 1
PLUS = 2
MINUS = 3
def __init__(self, type_, pos, value)
def next_symbol(s):
c = s.getc()
while c in string.whitespace:
c = s.getc()
if c in string.digits:
# start of a number
literal = c
c = s.getc()
while c in string.digits:
literal += c
c = s.getc()
s.ungetc()
sym = (Symbol.NUMBER,
elif c == 'd':
# die indicator
pass
elif c == '+':
# plus sign
pass
elif c == '-':
# minus sign
pass
else:
# unrecognized input
raise ValueError('Syntax error at position ' + s.tell())
return ()
| StarcoderdataPython |
8170541 | # -*- coding: utf-8 -*-
import torch
from torch import nn
from support_DynamicNet import getActivationList, getPoolingList, convOutputShape
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class DynamicCNN(nn.Module):
def __init__(self, parameters, print_var = False, tracking_input_dimension = False):
super().__init__()
self.print_var = print_var
self.tracking_input_dimension = tracking_input_dimension
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Parameters recovery and check
# Set device for the training/execution
if("device" in parameters.keys()): self.device = parameters["device"]
else: self.device = device = torch.device("cpu")
# Set the number of layers for convolutional part
if("layers_cnn" in parameters.keys()):
layers_cnn = int(parameters["layers_cnn"]) #0
if(print_var): print("Layer CNN: {}".format(layers_cnn))
else:
layers_cnn = 0;
if(print_var): print("Layer CNN: {}".format(layers_cnn))
# raise Exception("No \"layers_cnn\" key inside the paramters dictionary")
# Set the number of layers for linear part
if("layers_ff" in parameters.keys()):
layers_ff = int(parameters["layers_ff"]) #1
if(print_var): print("Layer Linear: {}".format(layers_ff))
else:
layers_ff = 0
if(print_var): print("Layer Linear: {}".format(layers_ff))
# raise Exception("No \"layers_ff\" key inside the paramters dictionary")
if(layers_cnn == 0 and layers_ff == 0): raise Exception("Both \"layers_cnn\" and \"layers_ff\" are set to 0. You must have at least one layer.")
self.layers_cnn, self.layers_ff = layers_cnn, layers_ff
# Set activation functions for each layer
act = getActivationList()
if("activation_list" in parameters.keys()):
activation_list = parameters["activation_list"]
# Check activation list length (N.B the +1 is added because there is the flatten layer between the cnn and the feed-forward part)
if(len(activation_list) != layers_cnn + layers_ff + 1): raise Exception("wrong number of elements in activation_list")
# Create the activation list of the two part of the network
activation_list_cnn = activation_list[0:layers_cnn]
activation_list_ff = activation_list[(layers_cnn + 1):]
activation_flatten = activation_list[layers_cnn]
if(print_var): print("Activation CNN: {}\nActivation Linear: {}\nActivation Flatten: {}".format(activation_list_cnn, activation_list_ff, activation_flatten))
else:
raise Exception("No \"activation_list\" key inside the paramters dictionary")
if(layers_cnn != 0):
# Set kernel list
if("kernel_list" in parameters.keys() and layers_cnn != 0):
kernel_list = convertTupleElementToInt(parameters["kernel_list"])
# Check kernel list length
if(len(kernel_list) != layers_cnn): raise Exception("Wrong number of elements in kernel_list")
if(print_var): print("Kernels: {}".format(kernel_list))
else:
if(print_var): print("Kernels: {}".format(kernel_list))
# raise Exception("No \"kernel_list\" key inside the paramters dictionary")
# Set filter list
if("filters_list" in parameters.keys() and layers_cnn != 0):
filters_list = convertTupleElementToInt(parameters["filters_list"])
# Check filter list length
if(len(filters_list) != layers_cnn): raise Exception("Wrong number of elements in filters_list")
if(print_var): print("Filters/Channels: {}".format(filters_list))
else:
raise Exception("No \"filters_list\" key inside the paramters dictionary")
# Set stride list
if("stride_list" in parameters.keys() and layers_cnn != 0):
stride_list = convertTupleElementToInt(parameters["stride_list"])
# Check stride list length
if(len(stride_list) != layers_cnn): raise Exception("Wrong number of elements in stride_list")
if(print_var): print("Stride List: {}".format(stride_list))
else:
# If no stride provided create a vector to set every stride to defualt value of conv2D
stride_list = np.ones(layers_cnn).astype(int)
if(print_var): print("Stride List: {}".format(stride_list))
# Set padding list
if("padding_list" in parameters.keys() and layers_cnn != 0):
padding_list = convertTupleElementToInt(parameters["padding_list"])
# Check padding list length
if(len(padding_list) != layers_cnn): raise Exception("Wrong number of elements in padding_list")
if(print_var): print("Padding List: {}".format(padding_list))
else:
# If no padding provided create a vector to set every pad to defualt value of conv2D
padding_list = np.zeros(layers_cnn).astype(int)
if(print_var): print("Padding List: {}".format(padding_list))
# Set pooling list
if("pooling_list" in parameters.keys() and layers_cnn != 0):
pooling_list = parameters["pooling_list"]
# Check pooling length
if(len(pooling_list) != layers_cnn): raise Exception("Wrong number of elements in pooling_list")
if(print_var): print("Pooling List: {}".format(pooling_list))
else:
# If no pooling provided create a vector of negative number so no pool layer will be added
pooling_list = np.ones(layers_cnn).astype(int) * -1
if(print_var): print("Pooling List: {}".format(pooling_list))
# Set groups list
if("groups_list" in parameters.keys() and layers_cnn != 0):
groups_list = parameters["groups_list"]
# Check group length
if(len(groups_list) != layers_cnn): raise Exception("Wrong number of elements in group_list")
if(print_var): print("Groups List: {}".format(groups_list))
else:
# If no groups provided create a vector of ones number so hte group will be set to its default value of 1
groups_list = np.ones(layers_cnn).astype(int)
if(print_var): print("Groups List: {}".format(groups_list))
# Set Batch Normalization list
if("CNN_normalization_list" in parameters.keys() and layers_cnn != 0):
CNN_normalization_list = parameters["CNN_normalization_list"]
# Check batch_normalization_list list length
if(len(CNN_normalization_list) != layers_cnn): raise Exception("Wrong number of elements in CNN_normalization_list")
if(print_var): print("CNN Normalization: {}".format(CNN_normalization_list))
else:
# If no Batch was provided create a vector of negative number so no Batch layer will be added
CNN_normalization_list = np.ones(layers_cnn).astype(int) * -1
CNN_normalization_list = CNN_normalization_list > 100
if(print_var): print("CNN Normalization: {}".format(CNN_normalization_list))
# Set dropout list
if("dropout_list" in parameters.keys()):
dropout_list = parameters["dropout_list"]
# Check dropout list length
if(len(dropout_list) != layers_cnn + layers_ff + 1): raise Exception("Wrong number of elements in dropout_list")
dropout_list_cnn = dropout_list[0:layers_cnn]
dropout_list_ff = dropout_list[(layers_cnn + 1):]
dropout_flatten = dropout_list[layers_cnn]
if(print_var): print("Dropout List: {}".format(dropout_list))
else:
# If no dropout was provided create a vector of negative number so no dropout layer will be added
dropout_list = np.ones(layers_cnn + layers_ff + 1).astype(int) * -1
dropout_list_cnn = dropout_list[0:layers_cnn]
dropout_list_ff = dropout_list[(layers_cnn + 1):]
dropout_flatten = dropout_list[layers_cnn]
if(print_var): print("Dropout List: {}".format(dropout_list))
# Set bias list
if("bias_list" in parameters.keys()):
bias_list = parameters["bias_list"]
# Check bias list length
if(len(bias_list) != layers_cnn + layers_ff + 1): raise Exception("Wrong number of elements in bias_list")
bias_list_cnn = bias_list[0:layers_cnn]
bias_list_ff = bias_list[(layers_cnn + 1):]
bias_list_flatten = bias_list[layers_cnn]
if(print_var): print("Bias List: {}".format(bias_list))
else:
# If no bias was provided create a vector of negative number so no bias will be added
bias_list = np.ones(layers_cnn + layers_ff + 1).astype(int) * -1
bias_list = bias_list < 1000
bias_list_cnn = bias_list[0:layers_cnn]
bias_list_ff = bias_list[(layers_cnn + 1):]
bias_list_flatten = bias_list[layers_cnn]
if(print_var): print("Bias List: {}".format(bias_list))
# Set neuron list
if("neurons_list" in parameters.keys()):
neurons_list = parameters["neurons_list"]
# Check activation list length
if(len(neurons_list) != layers_ff): raise Exception("Wrong number of elements in neurons_list")
if(layers_ff != 1): neurons_list = convertArrayInTupleList(neurons_list)
if(print_var): print("Neurons List: {}".format(neurons_list))
else:
# raise Exception("No \"Neurons_list\" key inside the paramters dictionary")
neurons_list = []
if(print_var): print("Neurons List: {}".format(neurons_list))
# Add a empty line
if(print_var): print()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# CNN Construction
# Temporary variable used to track the change in dimensions of the input
if(layers_cnn != 0):
tmp_input = torch.ones((1, filters_list[0][0], parameters["h"], parameters["w"]))
if(tracking_input_dimension):
print("# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ")
print(tmp_input.shape, "\n")
# Temporay list to store the layer
tmp_list = []
# Construction cycle
for kernel, n_filter, stride, padding, pool, activation, normalization, p_dropout, groups, bias in zip(kernel_list, filters_list, stride_list, padding_list, pooling_list, activation_list_cnn, CNN_normalization_list, dropout_list_cnn, groups_list, bias_list_cnn):
# Create the convolutional layer and add to the list
if(groups == 1): tmp_cnn_layer = nn.Conv2d(in_channels = int(n_filter[0]), out_channels = int(n_filter[1]), kernel_size = kernel, stride = stride, padding = padding, bias = bias)
else: tmp_cnn_layer = nn.Conv2d(in_channels = int(n_filter[0]), out_channels = int(n_filter[1]), kernel_size = kernel, stride = stride, padding = padding, groups = groups, bias = bias)
tmp_list.append(tmp_cnn_layer)
# Keep track of the outupt dimension
tmp_input = tmp_cnn_layer(tmp_input)
# Print the input dimensions at this step (if tracking_input_dimension is True)
if(tracking_input_dimension):
print(tmp_cnn_layer)
print(tmp_input.shape, "\n")
# (OPTIONAL) add batch normalization
if(normalization): tmp_list.append(nn.BatchNorm2d(num_features = int(n_filter[1])))
# (OPTIONAL) Add the activation
if(activation != -1): tmp_list.append(act[activation])
# (OPTIONAL) Add max pooling
if(pool != -1):
# Retrieve the pooling list (with a cast to int for the kernel)
pool_kernel = (int(pool[1][0]), int(pool[1][1]))
pool_layer_list = getPoolingList(kernel = pool_kernel)
# Create the pool layer and add to the list.
tmp_pooling_layer = pool_layer_list[pool[0]]
tmp_list.append(tmp_pooling_layer)
# Keep track of the output dimension
tmp_input = tmp_pooling_layer(tmp_input)
# Print the input dimensions at this step (if tracking_input_dimension is True)
if(tracking_input_dimension):
print(tmp_pooling_layer)
print(tmp_input.shape)
# (OPTIONAL) Dropout
if(p_dropout > 0 and p_dropout < 1): tmp_list.append(torch.nn.Dropout(p = p_dropout))
# Creation of the sequential object to store all the layer
self.cnn = nn.Sequential(*tmp_list)
# Plot a separator
if(tracking_input_dimension): print("# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n")
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Flatten layer
self.flatten_neurons = tmp_input.shape[1] * tmp_input.shape[2] * tmp_input.shape[3]
if(layers_ff == 0):
if(activation_flatten != -1): self.flatten_layer = act[activation_flatten]
else: self.flatten_layer = nn.Identity()
if(print_var): print("Flatten layer: {}\n".format(self.flatten_neurons))
else:
if(layers_ff == 1): tmp_flatten_layer = nn.Linear(self.flatten_neurons, neurons_list[0], bias = bias_list_flatten)
else: tmp_flatten_layer = nn.Linear(self.flatten_neurons, neurons_list[0][0], bias = bias_list_flatten)
tmp_list = []
tmp_list.append(tmp_flatten_layer)
if(activation_flatten != -1): tmp_list.append(act[activation_flatten])
if(dropout_flatten > 0 and dropout_flatten < 1): tmp_list.append(torch.nn.Dropout(p = dropout_flatten))
self.flatten_layer = nn.Sequential(*tmp_list)
if(print_var):
if(layers_ff == 1): print("Flatten layer: {}\n".format([self.flatten_neurons, neurons_list[0]]))
else: print("Flatten layer: {}\n".format([self.flatten_neurons, neurons_list[0][0]]))
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Feed-Forward (Linear) construction
if(layers_ff > 1):
# Temporay list to store the layer
tmp_list = []
# Construction cycle
for neurons, activation, p_dropout, bias in zip(neurons_list, activation_list_ff, dropout_list_ff, bias_list_ff):
if(layers_ff == 1 and layers_cnn == 0): # Case for a single layer feed-forward network (perceptron style)
tmp_linear_layer = nn.Linear(parameters["h"] * parameters["w"], neurons, bias = bias)
else:
tmp_linear_layer = nn.Linear(neurons[0], neurons[1], bias = bias)
tmp_list.append(tmp_linear_layer)
# (OPTIONAL) Add the activation
if(activation != -1): tmp_list.append(act[activation])
# (OPTIONAL) Dropout
if(p_dropout > 0 and p_dropout < 1): tmp_list.append(torch.nn.Dropout(p = p_dropout))
# Creation of the sequential object to store all the layer
self.ff = nn.Sequential(*tmp_list)
else: self.ff = []
def forward(self, x):
if(self.layers_cnn != 0):
# Convolutional section
x = self.cnn(x)
# Flatten layer
x = x.view([x.size(0), -1])
x = self.flatten_layer(x)
# Feed-forward (linear) section
if(len(self.ff) > 0): x = self.ff(x)
return x
def printNetwork(self, separator = False):
depth = 0
# Iterate through the module of the network
for name, module in self.named_modules():
# Iterate through the sequential block
# Since in the iteration the sequential blocks and the modules inside the sequential block appear twice I only take the sequenial block
if(type(module) == torch.nn.modules.container.Sequential):
for layer in module:
# Print layer
print("DEPTH:", depth, "\t- ", layer)
# Incrase depth
depth += 1
if(separator): print("\n- - - - - - - - - - - - - - - - - - - - - - - - - - - \n")
if(name == 'cnn'):
# Add reshape "layer"
print("DEPTH:", depth, "\t- ", "x.view([x.size(0), -1])")
if(separator): print("\n- - - - - - - - - - - - - - - - - - - - - - - - - - - \n")
depth += 1
def getMiddleResults(self, x, input_depth, ignore_dropout = True):
actual_depth = 0
# Iterate through the module of the network
for name, module in self.named_modules():
# Iterate through the sequential block
# Since in the iteration the sequential blocks and the modules inside the sequential block appear twice I only take the sequenial block
if(type(module) == torch.nn.modules.container.Sequential):
for layer in module:
# Evaluate the value of the input at this level
x = layer(x)
# If I reach the desire level I stop
if(actual_depth == input_depth): return x
# Increase depth level
actual_depth += 1
# Reshape after the CNN block
if(name == 'cnn'):
x = x.view([x.size(0), -1])
if(actual_depth == input_depth): return x
actual_depth += 1
# If this istruction is reached it means that the input flow inside all the network.
return x
#%%
def convertArrayInTupleList(array):
"""
Convert an array (or a list) of element in a list of tuple where each element is a tuple with two sequential element of the original array/list
Parameters
----------
array : numpy array/list
Returns
-------
tuple_list. List of tuple
Given the input array = [a, b, c, d ...] the tuple_list will be [(a, b), (b, c), (c, d) ...]
"""
tuple_list = []
for i in range(len(array) - 1):
tmp_tuple = (array[i], array[i + 1])
tuple_list.append(tmp_tuple)
return tuple_list
def convertTupleElementToInt(tuple_list):
"""
Convert a list of tuple in the same list of tuple but with tuple elements cast to int
N.B. The tuples must contain two elements
"""
tuple_int_list = []
for tup in tuple_list:
tmp_tuple = (int(tup[0]), int(tup[1]))
tuple_int_list.append(tmp_tuple)
return tuple_int_list
| StarcoderdataPython |
5052610 | import lief
import sys
import os
import json
def main():
if (len(sys.argv) == 2):
file_name = sys.argv[1]
if os.path.isdir(file_name):
return
pe_binary = lief.parse(file_name)
pe_sections = pe_binary.sections
magic_string = ".appseclimits_"
for section in pe_sections:
if section.name == ".appsec" and section.size > len(magic_string):
magic_list = section.content[:14]
magic = ''.join(chr(x) for x in magic_list)
if (magic != magic_string):
continue
section_data = section.content[14:]
json_config_data = ''.join(chr(x) for x in section_data)
json_content = json.loads(json_config_data)
if json_content["remote_process_access"] == False:
print("Deny remote process access: WriteProcessMemory, etc")
# ...
if __name__ == "__main__":
main()
| StarcoderdataPython |
5056769 | <reponame>jfaccioni/dynafit
"""test_plotter.py - unit tests for plotter.py."""
import unittest
from typing import Sequence
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PolyCollection
from src.plotter import Plotter
from src.utils import array_in_sequence
class TestPlotterModule(unittest.TestCase):
"""Tests the plotter.py module."""
plotter_kwargs = {
'xs': np.array([1, 2, 3]),
'ys': np.array([4, 5, 6]),
'scatter_xs': np.array([7, 8, 9]),
'scatter_ys': np.array([10, 11, 12]),
'show_violin': True,
'violin_xs': np.array([13, 14, 15]),
'violin_ys': [np.array([16, 17, 18]), np.array([19, 20, 21]), np.array([22, 23, 24])],
'violin_q1': np.array([25, 26, 27]),
'violin_medians': np.array([28, 29, 30]),
'violin_q3': np.array([31, 32, 33]),
'cumulative_ys': np.array([34, 35, 36]),
'endpoint_ys': np.array([37, 38, 39]),
'show_ci': True,
'upper_ys': np.array([40, 41, 42]),
'lower_ys': np.array([43, 44, 45]),
'cumulative_upper_ys': np.array([46, 47, 48]),
'cumulative_lower_ys': np.array([49, 50, 51]),
'endpoint_upper_ys': np.array([52, 53, 54]),
'endpoint_lower_ys': np.array([55, 56, 57]),
'hist_xs': np.array([58, 59, 60]),
'hist_intervals': np.array([61, 62, 63]),
}
def setUp(self) -> None:
"""Sets up each unit test by refreshing the Plotter instance, the MagicMock instance representing an Axes
instance, the Figure instance and the Axes instance."""
self.plotter = Plotter(**self.plotter_kwargs)
self.mock_ax = MagicMock()
self.fig, self.ax = plt.subplots()
def tearDown(self) -> None:
"""Tears down each unit test by deleting the Figure and Axes instances."""
self.ax.clear()
plt.close(self.fig)
del self.ax
del self.fig
def assertArrayIn(self, array: np.ndarray, sequence: Sequence) -> None:
"""Asserts whether a numpy array is inside a regular Python sequence."""
self.assertTrue(array_in_sequence(array, sequence))
def disable_violins(self) -> None:
"""Changes Plotter instance attributes to reflect a DynaFit analysis without violin plots enabled."""
self.plotter.show_violin = False
self.plotter.violin_ys = None
self.plotter.violin_colors = None
def disable_ci(self) -> None:
"""Changes Plotter instance attributes to reflect a DynaFit analysis without confidence interval enabled."""
self.plotter.show_ci = False
self.plotter.upper_ys = None
self.plotter.lower_ys = None
self.plotter.cumulative_upper_ys = None
self.plotter.cumulative_lower_ys = None
self.plotter.endpoint_upper_ys = None
self.plotter.endpoint_lower_ys = None
@patch('test_plotter.Plotter.plot_supporting_lines')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
@patch('test_plotter.Plotter.plot_mean_line')
@patch('test_plotter.Plotter.plot_bootstrap_violin_statistics')
@patch('test_plotter.Plotter.plot_bootstrap_scatter')
@patch('test_plotter.Plotter.format_cvp')
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.format_violins')
def test_plot_cvp_ax_calls_all_cvp_related_plot_functions(self, mock_format_violins, mock_plot_bootstrap_violins,
*cvp_functions) -> None:
self.plotter.plot_cvp_ax(ax=self.mock_ax)
mock_format_violins.assert_called_with(violins=mock_plot_bootstrap_violins.return_value)
for cvp_function in cvp_functions:
cvp_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
def test_plot_cvp_ax_plots_everything_if_boolean_flags_are_set_to_true(self, mock_plot_mean_line_ci,
mock_plot_supporting_lines_ci,
mock_plot_bootstrap_violins) -> None:
self.plotter.plot_cvp_ax(ax=self.mock_ax)
for mock_plot_function in (mock_plot_mean_line_ci, mock_plot_supporting_lines_ci, mock_plot_bootstrap_violins):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
def test_plot_cvp_ax_does_not_plot_violins_if_flag_is_set_to_false(self, mock_plot_mean_line_ci,
mock_plot_supporting_lines_ci,
mock_plot_bootstrap_violins) -> None:
self.disable_violins()
self.plotter.plot_cvp_ax(ax=self.mock_ax)
for mock_plot_function in (mock_plot_mean_line_ci, mock_plot_supporting_lines_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_called_with(ax=self.mock_ax)
mock_plot_bootstrap_violins.assert_not_called()
@patch('test_plotter.Plotter.plot_bootstrap_violins')
@patch('test_plotter.Plotter.plot_supporting_lines_ci')
@patch('test_plotter.Plotter.plot_mean_line_ci')
def test_plot_cvp_ax_does_not_add_ci_if_flag_is_set_to_false(self, mock_plot_mean_line_ci,
mock_plot_supporting_lines_ci,
mock_plot_bootstrap_violins) -> None:
self.disable_ci()
self.plotter.plot_cvp_ax(ax=self.mock_ax)
mock_plot_bootstrap_violins.assert_called_with(ax=self.mock_ax)
for mock_plot_function in (mock_plot_mean_line_ci, mock_plot_supporting_lines_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_not_called()
def test_plot_supporting_lines_plots_h0_and_h1_as_line_plots(self) -> None:
self.plotter.plot_supporting_lines(ax=self.mock_ax)
self.assertEqual(self.mock_ax.plot.call_count, 2)
def test_plot_h0_plots_a_red_horizontal_line(self) -> None:
with patch('test_plotter.Plotter.plot_h1'): # do not call ax.plot inside Plotter.plot_h1 for this test
self.plotter.plot_supporting_lines(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.plot.call_args
self.assertEqual(*actual_args[-1]) # horizontal line: start and end Y coordinates are equal for h0
self.assertIn(self.plotter.h0_color, actual_kwargs.values())
def test_plot_h1_plots_a_blue_diagonal_line(self) -> None:
with patch('test_plotter.Plotter.plot_h0'): # do not call ax.plot inside Plotter.plot_h0 for this test
self.plotter.plot_supporting_lines(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.plot.call_args
self.assertGreater(*actual_args[-1]) # diagonal line: end Y coordinate is below start Y coordinate
self.assertIn(self.plotter.h1_color, actual_kwargs.values())
def test_plot_supporting_lines_plots_vertical_y_axis_as_a_vertical_line(self) -> None:
self.plotter.plot_supporting_lines(ax=self.mock_ax)
self.mock_ax.axvline.assert_called_once()
def test_plot_mean_line_plots_a_green_line_of_sample_xy_values(self) -> None:
self.plotter.plot_mean_line(ax=self.mock_ax)
self.mock_ax.plot.assert_called_once()
actual_args, actual_kwargs = self.mock_ax.plot.call_args
self.assertIn(self.plotter.data_color, actual_kwargs.values())
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.ys, actual_args)
def test_plot_bootstrap_scatter_plots_scatter_xs_and_ys(self) -> None:
self.plotter.plot_bootstrap_scatter(ax=self.mock_ax)
self.mock_ax.scatter.assert_called_once()
actual_args, _ = self.mock_ax.scatter.call_args
self.assertArrayIn(self.plotter.scatter_xs, actual_args)
self.assertArrayIn(self.plotter.scatter_ys, actual_args)
def test_plot_bootstrap_scatter_uses_scatter_edgecolor_and_facecolor_attributes(self) -> None:
self.plotter.plot_bootstrap_scatter(ax=self.mock_ax)
self.mock_ax.scatter.assert_called_once()
_, actual_kwargs = self.mock_ax.scatter.call_args
self.assertIn(self.plotter.scatter_edgecolor, actual_kwargs.values())
self.assertIn(self.plotter.scatter_facecolor, actual_kwargs.values())
def test_plot_bootstrap_violins_plots_violins(self) -> None:
self.plotter.plot_bootstrap_violins(ax=self.mock_ax)
self.mock_ax.violinplot.assert_called_once()
actual_args, actual_kwargs = self.mock_ax.violinplot.call_args
self.assertArrayIn(self.plotter.violin_xs, actual_kwargs.values())
for expected_violin_array, actual_violin_array in zip(self.plotter.violin_ys, actual_kwargs.get('dataset')):
with self.subTest(expected_violin_array=expected_violin_array, actual_violin_array=actual_violin_array):
np.testing.assert_allclose(expected_violin_array, actual_violin_array)
def test_plot_bootstrap_violins_returns_violins_as_a_list_of_polycollection_objects(self) -> None:
return_value = self.plotter.plot_bootstrap_violins(ax=self.mock_ax)
for expected_violin in return_value:
self.assertIsInstance(expected_violin, PolyCollection)
def test_format_violins_sets_violin_attributes_with_proper_values(self) -> None:
mock_violin = MagicMock()
self.plotter.format_violins(violins=[mock_violin])
mock_violin.set_facecolor.assert_called_with(self.plotter.violin_facecolor)
mock_violin.set_edgecolor.assert_called_with(self.plotter.violin_edgecolor)
def test_plot_supporting_lines_ci_plots_h0_ci_and_h1_ci_as_filled_areas(self) -> None:
self.plotter.plot_supporting_lines_ci(ax=self.mock_ax)
self.assertEqual(self.mock_ax.fill_between.call_count, 2)
def test_plot_h0_ci_fills_a_red_horizontal_area(self) -> None:
with patch('test_plotter.Plotter.plot_h1_ci'): # avoids ax.fill_between call inside Plotter.plot_h1_ci
self.plotter.plot_supporting_lines_ci(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertEqual(*actual_args[-2]) # horizontal upper CI: start and end Y coordinates are equal for h0
self.assertEqual(*actual_args[-1]) # horizontal lower CI: start and end Y coordinates are equal for h0
self.assertIn(self.plotter.h0_color, actual_kwargs.values())
def test_plot_h0_ci_fills_a_blue_diagonal_area(self) -> None:
with patch('test_plotter.Plotter.plot_h0_ci'): # avoids ax.fill_between call inside Plotter.plot_h0_ci
self.plotter.plot_supporting_lines_ci(ax=self.mock_ax)
actual_args, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertGreater(*actual_args[-2]) # diagonal upper CI: end Y coordinate is below start Y coordinate
self.assertGreater(*actual_args[-1]) # diagonal lower CI: end Y coordinate is below start Y coordinate
self.assertIn(self.plotter.h1_color, actual_kwargs.values())
def test_plot_mean_line_ci_fills_an_area_of_xs_and_ys_values(self) -> None:
self.plotter.plot_mean_line_ci(ax=self.mock_ax)
self.mock_ax.fill_between.assert_called_once()
actual_args, _ = self.mock_ax.fill_between.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.upper_ys, actual_args)
self.assertArrayIn(self.plotter.lower_ys, actual_args)
def test_plot_mean_line_ci_fills_an_area_with_correct_color(self) -> None:
self.plotter.plot_mean_line_ci(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertIn(self.plotter.data_color, actual_kwargs.values())
def test_format_cvp_adds_xy_labels(self) -> None:
self.assertFalse(self.ax.get_xlabel())
self.assertFalse(self.ax.get_ylabel())
self.plotter.format_cvp(ax=self.ax)
self.assertTrue(self.ax.get_xlabel())
self.assertTrue(self.ax.get_ylabel())
@patch('test_plotter.Plotter.plot_hypothesis_lines')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_distance')
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_distance')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.format_hypothesis_plot')
@patch('test_plotter.Plotter.invert_hypothesis_plot_y_axis')
@patch('test_plotter.Plotter.set_hypothesis_plot_limits')
def test_plot_hypothesis_ax_calls_all_hypothesis_related_plot_functions(self, mock_set_hypothesis_plot_limits,
*hypothesis_functions) -> None:
xlims = (0, 5)
self.plotter.plot_hypothesis_ax(ax=self.mock_ax, xlims=xlims)
mock_set_hypothesis_plot_limits.assert_called_with(ax=self.mock_ax, xlims=xlims)
for hypothesis_function in hypothesis_functions:
hypothesis_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_distance')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_distance')
def test_plot_hypothesis_ax_returns_values_from_appropriate_functions(self, mock_cumulative_distance,
mock_cumulative_ci, mock_endpoint_distance,
mock_endpoint_ci) -> None:
return_value = self.plotter.plot_hypothesis_ax(ax=self.ax, xlims=(0, 5))
(cumulative_line, cumulative_ci), (endpoint_line, endpoint_ci) = return_value
self.assertEqual(cumulative_line, mock_cumulative_distance.return_value)
self.assertEqual(cumulative_ci, mock_cumulative_ci.return_value)
self.assertEqual(endpoint_line, mock_endpoint_distance.return_value)
self.assertEqual(endpoint_ci, mock_endpoint_ci.return_value)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_distance')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_distance')
def test_plot_hypothesis_ax_returns_none_values_if_boolean_flags_are_set_to_false(self, mock_cumulative_distance,
mock_endpoint_distance) -> None:
self.disable_ci()
return_value = self.plotter.plot_hypothesis_ax(ax=self.ax, xlims=(0, 5))
(cumulative_line, cumulative_ci), (endpoint_line, endpoint_ci) = return_value
self.assertEqual(cumulative_line, mock_cumulative_distance.return_value)
self.assertIsNone(cumulative_ci)
self.assertEqual(endpoint_line, mock_endpoint_distance.return_value)
self.assertIsNone(endpoint_ci)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
def test_plot_hypothesis_ax_plots_everything_if_boolean_flags_are_set_to_true(self, plot_cumulative_hypothesis_ci,
plot_endpoint_hypothesis_ci) -> None:
self.mock_ax.get_ylim.return_value = (0, 1) # Mocking standard Axes limits
self.plotter.plot_hypothesis_ax(ax=self.mock_ax, xlims=(0, 5))
for mock_plot_function in (plot_cumulative_hypothesis_ci, plot_endpoint_hypothesis_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_called_with(ax=self.mock_ax)
@patch('test_plotter.Plotter.plot_endpoint_hypothesis_ci')
@patch('test_plotter.Plotter.plot_cumulative_hypothesis_ci')
def test_plot_hypothesis_ax_does_not_plot_ci_if_boolean_flags_are_set_to_false(self, plot_cumulative_hypothesis_ci,
plot_endpoint_hypothesis_ci) -> None:
self.disable_ci()
self.mock_ax.get_ylim.return_value = (0, 1) # Mocking standard Axes limits
self.plotter.plot_hypothesis_ax(ax=self.mock_ax, xlims=(0, 5))
for mock_plot_function in (plot_cumulative_hypothesis_ci, plot_endpoint_hypothesis_ci):
with self.subTest(mock_plot_function=mock_plot_function):
mock_plot_function.assert_not_called()
def test_plot_hypothesis_lines_plots_red_h0_at_y0(self) -> None:
self.plotter.plot_hypothesis_lines(ax=self.mock_ax)
(h0_args, h0_kwargs), _ = self.mock_ax.axhline.call_args_list
self.assertIn(0, h0_args)
self.assertIn(self.plotter.h0_color, h0_kwargs.values())
def test_plot_hypothesis_lines_plots_red_h1_at_y1(self) -> None:
self.plotter.plot_hypothesis_lines(ax=self.mock_ax)
_, (h1_args, h1_kwargs) = self.mock_ax.axhline.call_args_list
self.assertIn(1, h1_args)
self.assertIn(self.plotter.h1_color, h1_kwargs.values())
def test_plot_cumulative_hypothesis_distance_plots_line_of_cumulative_distance_values(self) -> None:
self.plotter.plot_cumulative_hypothesis_distance(ax=self.mock_ax)
self.mock_ax.plot.assert_called_once()
actual_args, _ = self.mock_ax.plot.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.cumulative_ys, actual_args)
def test_plot_cumulative_hypothesis_distance_plots_line_of_correct_color(self) -> None:
self.plotter.plot_cumulative_hypothesis_distance(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.plot.call_args
self.assertIn(self.plotter.cumul_color, actual_kwargs.values())
def test_plot_cumulative_hypothesis_returns_a_line2d_instance(self) -> None:
expected_line2d = self.plotter.plot_cumulative_hypothesis_distance(ax=self.ax)
self.assertIsInstance(expected_line2d, plt.Line2D)
def test_plot_endpoint_hypothesis_distance_plots_line_of_endpoint_distance_values(self) -> None:
self.plotter.plot_endpoint_hypothesis_distance(ax=self.mock_ax)
self.mock_ax.plot.assert_called_once()
actual_args, _ = self.mock_ax.plot.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.endpoint_ys, actual_args)
def test_plot_endpoint_hypothesis_distance_plots_line_of_correct_color(self) -> None:
self.plotter.plot_endpoint_hypothesis_distance(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.plot.call_args
self.assertIn(self.plotter.endp_color, actual_kwargs.values())
def test_plot_endpoint_hypothesis_returns_a_line2d_instance(self) -> None:
expected_line2d = self.plotter.plot_endpoint_hypothesis_distance(ax=self.ax)
self.assertIsInstance(expected_line2d, plt.Line2D)
def test_set_hypothesis_plot_limits_sets_x_limits_to_argument_passed_in(self) -> None:
xlims = (-50, 50)
self.plotter.set_hypothesis_plot_limits(ax=self.ax, xlims=xlims)
self.assertEqual(self.ax.get_xlim(), xlims)
def test_set_hypothesis_plot_limits_does_not_adjust_with_y_limits_if_they_are_large_enough(self) -> None:
ylims = (-50, 50)
self.ax.set_ylim(ylims)
self.plotter.set_hypothesis_plot_limits(ax=self.ax, xlims=(0, 5))
self.assertEqual(self.ax.get_ylim(), ylims)
def test_set_hypothesis_plot_limits_adjusts_with_y_limits_if_they_are_not_large_enough(self) -> None:
ylims = (-0.1, 0.1)
self.plotter.set_hypothesis_plot_limits(ax=self.ax, xlims=(0, 5))
self.assertNotEqual(self.ax.get_ylim(), ylims)
self.assertEqual(self.ax.get_ylim(), (self.plotter.hypothesis_plot_lower_ylim,
self.plotter.hypothesis_plot_upper_ylim))
def test_plot_cumulative_hypothesis_ci_fills_an_area(self) -> None:
self.plotter.plot_cumulative_hypothesis_ci(ax=self.mock_ax)
self.mock_ax.fill_between.assert_called_once()
actual_args, _ = self.mock_ax.fill_between.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.cumulative_upper_ys, actual_args)
self.assertArrayIn(self.plotter.cumulative_lower_ys, actual_args)
def test_plot_cumulative_hypothesis_ci_uses_correct_color(self) -> None:
self.plotter.plot_cumulative_hypothesis_ci(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertIn(self.plotter.cumul_color, actual_kwargs.values())
def test_plot_cumulative_hypothesis_returns_a_polycollection_instance(self) -> None:
expected_polycollection = self.plotter.plot_cumulative_hypothesis_ci(ax=self.ax)
self.assertIsInstance(expected_polycollection, PolyCollection)
def test_plot_endpoint_hypothesis_ci_fills_an_area(self) -> None:
self.plotter.plot_endpoint_hypothesis_ci(ax=self.mock_ax)
self.mock_ax.fill_between.assert_called_once()
actual_args, _ = self.mock_ax.fill_between.call_args
self.assertArrayIn(self.plotter.xs, actual_args)
self.assertArrayIn(self.plotter.endpoint_upper_ys, actual_args)
self.assertArrayIn(self.plotter.endpoint_lower_ys, actual_args)
def test_plot_endpoint_hypothesis_ci_uses_correct_color(self) -> None:
self.plotter.plot_endpoint_hypothesis_ci(ax=self.mock_ax)
_, actual_kwargs = self.mock_ax.fill_between.call_args
self.assertIn(self.plotter.endp_color, actual_kwargs.values())
def test_plot_endpoint_hypothesis_returns_a_polycollection_instance(self) -> None:
expected_polycollection = self.plotter.plot_endpoint_hypothesis_ci(ax=self.ax)
self.assertIsInstance(expected_polycollection, PolyCollection)
def test_format_hypothesis_plot_adds_title_labels_ticks_and_set_plot_legends(self) -> None:
self.assertFalse(self.ax.get_title())
self.assertFalse(self.ax.get_xlabel())
self.assertFalse(self.ax.get_ylabel())
self.ax.legend = MagicMock()
self.plotter.format_hypothesis_plot(ax=self.ax)
self.ax.legend.assert_called_once()
self.assertTrue(self.ax.get_title())
self.assertTrue(self.ax.get_xlabel())
self.assertTrue(self.ax.get_ylabel())
for expected_label, text_object in zip(['H0', 'H1'], self.ax.get_yticklabels()):
actual_label = text_object.get_text()
with self.subTest(expected_label=expected_label, actual_label=actual_label):
self.assertEqual(expected_label, actual_label)
def test_invert_hypothesis_plot_y_axis_calls_ax_invert_yaxis(self) -> None:
self.mock_ax.invert_yaxis.assert_not_called()
self.plotter.invert_hypothesis_plot_y_axis(ax=self.mock_ax)
self.mock_ax.invert_yaxis.assert_called()
@patch('test_plotter.Plotter.plot_distributions')
@patch('test_plotter.Plotter.plot_group_divisions')
@patch('test_plotter.Plotter.format_histogram')
def test_plot_histogram_ax_calls_all_histogram_related_plot_functions(self, *histogram_functions) -> None:
self.plotter.plot_histogram_ax(ax=self.mock_ax)
for histogram_function in histogram_functions:
histogram_function.assert_called_with(ax=self.mock_ax)
@patch('src.plotter.distplot')
def test_plot_distributions_calls_seaborn_distplot(self, mock_seaborn_distplot) -> None:
self.plotter.plot_distributions(ax=self.mock_ax)
actual_args, actual_kwargs = mock_seaborn_distplot.call_args
self.assertArrayIn(self.plotter.hist_xs, actual_args)
self.assertArrayIn(self.plotter.hist_intervals, actual_kwargs.values())
self.assertIn(self.mock_ax, actual_kwargs.values())
def test_plot_group_divisions_adds_vertical_lines_based_on_breakpoints(self) -> None:
self.plotter.plot_group_divisions(ax=self.mock_ax)
actual_args, _ = self.mock_ax.vlines.call_args
np.testing.assert_allclose(self.plotter.hist_intervals, actual_args[0])
def test_plot_group_divisions_adds_vertical_lines_of_correct_colors(self) -> None:
self.plotter.plot_group_divisions(ax=self.mock_ax)
for interval, (_, actual_kwargs) in zip(self.plotter.hist_intervals, self.mock_ax.vlines.call_args_list):
with self.subTest(interval=interval, actual_kwargs=actual_kwargs):
self.assertIn(self.plotter.hist_interval_color, actual_kwargs.values())
def test_format_histogram_modifies_title_and_xy_labels(self) -> None:
self.assertFalse(self.ax.get_title())
self.assertFalse(self.ax.get_xlabel())
self.assertFalse(self.ax.get_ylabel())
self.plotter.format_histogram(ax=self.ax)
self.assertTrue(self.ax.get_title())
self.assertTrue(self.ax.get_xlabel())
self.assertTrue(self.ax.get_ylabel())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
156858 | <reponame>Granjow/platformio-core<gh_stars>1-10
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
from SCons.Action import Action # pylint: disable=import-error
from SCons.Script import ARGUMENTS # pylint: disable=import-error
from SCons.Script import AlwaysBuild # pylint: disable=import-error
from platformio import compat, fs
def VerboseAction(_, act, actstr):
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
return act
return Action(act, actstr)
def PioClean(env, clean_dir):
def _relpath(path):
if compat.IS_WINDOWS:
prefix = os.getcwd()[:2].lower()
if (
":" not in prefix
or not path.lower().startswith(prefix)
or os.path.relpath(path).startswith("..")
):
return path
return os.path.relpath(path)
if not os.path.isdir(clean_dir):
print("Build environment is clean")
env.Exit(0)
clean_rel_path = _relpath(clean_dir)
for root, _, files in os.walk(clean_dir):
for f in files:
dst = os.path.join(root, f)
os.remove(dst)
print(
"Removed %s"
% (dst if not clean_rel_path.startswith(".") else _relpath(dst))
)
print("Done cleaning")
fs.rmtree(clean_dir)
env.Exit(0)
def AddTarget( # pylint: disable=too-many-arguments
env,
name,
dependencies,
actions,
title=None,
description=None,
group="Generic",
always_build=True,
):
if "__PIO_TARGETS" not in env:
env["__PIO_TARGETS"] = {}
assert name not in env["__PIO_TARGETS"]
env["__PIO_TARGETS"][name] = dict(
name=name, title=title, description=description, group=group
)
target = env.Alias(name, dependencies, actions)
if always_build:
AlwaysBuild(target)
return target
def AddPlatformTarget(env, *args, **kwargs):
return env.AddTarget(group="Platform", *args, **kwargs)
def AddCustomTarget(env, *args, **kwargs):
return env.AddTarget(group="Custom", *args, **kwargs)
def DumpTargets(env):
targets = env.get("__PIO_TARGETS") or {}
# pre-fill default targets if embedded dev-platform
if env.PioPlatform().is_embedded() and not any(
t["group"] == "Platform" for t in targets.values()
):
targets["upload"] = dict(name="upload", group="Platform", title="Upload")
targets["compiledb"] = dict(
name="compiledb",
title="Compilation Database",
description="Generate compilation database `compile_commands.json`",
group="Advanced",
)
targets["clean"] = dict(name="clean", title="Clean", group="Generic")
return list(targets.values())
def exists(_):
return True
def generate(env):
env.AddMethod(VerboseAction)
env.AddMethod(PioClean)
env.AddMethod(AddTarget)
env.AddMethod(AddPlatformTarget)
env.AddMethod(AddCustomTarget)
env.AddMethod(DumpTargets)
return env
| StarcoderdataPython |
8108892 | #!/usr/bin/python
'''TuneHub Lyrics Library.
Copyright (C) 2011-2012 <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
__version__ = 2
import re, json, urllib2, urlparse
def escape(text):
if text != None:
text = text.replace('<br>', '\n').replace('<br />', '\n').replace('</br>', '\n')
text = re.sub('<[^<]+?>', '', text)
return text
def url(template, artist, title, escaping):
artist = artist.lower().replace(' ', escaping['artist'])
title = title.lower().replace(' ', escaping['title'])
return template.replace(':artist', artist).replace(':title', title)
def parse(text, execsnippet):
result = None
toparse = text
try:
exec execsnippet
return result
except:
return None
class Lyric():
def __init__(self, sitesdic):
self.sitesdic = sitesdic
self.artist = self.title = ''
self.lyric = None
def get(self):
for site in self.sitesdic:
error = False
self.url = url(template = site['url'], artist = self.artist, title = self.title, escaping = site['escaping'])
try:
p = urllib2.urlopen(self.url)
except:
error = True
if error == False:
self.page = p.read()
p.close()
self.execsnippet = site['exec']
self.parsed = parse(self.page, self.execsnippet)
self.lyric = escape(self.parsed)
print self.lyric
if self.lyric != None:
break
if self.lyric != None:
return self.lyric
def init(filename = 'sites.json'):
f = open(filename, 'r')
js = f.read()
f.close()
global sites
sites = json.loads(js)
return Lyric(sites)
if __name__ == '__main__':
print 'Tunehub Lyriclib 2 Test'
artist = raw_input('Artist: ')
title = raw_input('Title: ')
api = init()
api.artist = artist
api.title = title
api.get()
print api.url, api.lyric | StarcoderdataPython |
4916773 | <reponame>voidstrike/TDPNet
import argparse
import torch
import os
import time
import imageio
import numpy as np
import torchvision.transforms as tfs
import sklearn.cluster as cls
from model.TDPNet import TDPNet
from torch.utils.data import DataLoader
from torch.optim import Adam, lr_scheduler
from torch.autograd import Variable
from collections import defaultdict
from datasets.mv_dataset import MultiViewDataSet, ShapeNet55
from metrics.evaluation_metrics import distChamferCUDA
from utils import visualize_point_clouds, tdp_validate
from pointnet.model import PointNetfeat
_transform = tfs.Compose([
tfs.CenterCrop(550),
tfs.Resize(224),
tfs.ToTensor(),
tfs.Normalize((.5, .5, .5), (.5, .5, .5))
])
_transform_shape = tfs.Compose([
tfs.CenterCrop(256),
tfs.Resize(224),
tfs.ToTensor(),
tfs.Normalize((.5, .5, .5), (.5, .5, .5))
])
def main(conf):
# Load 3D Prototype features
if conf.prototypes_npy != 'NOF':
proto_corpus = np.load(conf.prototypes_npy)
assert proto_corpus.shape[0] == conf.num_prototypes
else:
if not conf.reclustering:
raise RuntimeError('Prototypes are not provided, must re-clustering or train from scratch')
proto_corpus = np.zeros((conf.num_prototypes, 1024), dtype=np.float)
# Basic setting, make checkpoint folder, initialize dataset, dataloaders and models
checkpoint_path = os.path.join(conf.model_path, opt.name)
checkpoint_imgs = os.path.join(checkpoint_path, 'images')
if not os.path.exists(checkpoint_path):
os.mkdir(checkpoint_path)
if not os.path.exists(checkpoint_imgs):
os.mkdir(checkpoint_imgs)
root, ply_root, tgt_category = conf.root, conf.proot, conf.cat
tgt_category = tgt_category
if conf.dataset == 'modelnet':
mv_ds = MultiViewDataSet(root, ply_root, 'train', transform=_transform,
sub_cat=tgt_category, number_of_view=1)
mv_ds_test = MultiViewDataSet(root, ply_root, 'test', transform=_transform,
sub_cat=tgt_category, number_of_view=1)
elif conf.dataset == 'shapenet':
mv_ds = ShapeNet55(root, tgt_category, 'train', transform=_transform_shape)
mv_ds_test = ShapeNet55(root, tgt_category, 'test', transform=_transform_shape)
else:
raise RuntimeError(f'Dataset is suppose to be [modelnet|shapenet], but {conf.dataset} is given')
ds_loader = DataLoader(mv_ds, batch_size=conf.batch_size, drop_last=True, shuffle=True)
ds_loader_test = DataLoader(mv_ds_test, batch_size=conf.batch_size)
num_classes = len(mv_ds.classes)
print(f'Dataset summary : Categories: {mv_ds.classes} with length {len(mv_ds)}')
print(f'Num of classes is {len(mv_ds.classes)}')
# Initialize Model
model = TDPNet(conf, proto_corpus)
point_feat_extractor = PointNetfeat() # Required for re-clustering
if conf.from_scratch:
# In this branch, we have to train the point_feat_extractor from scratch, which is a pc self-reconstruction task
model.cuda()
point_feat_extractor.cuda()
# Fixed optimizer
pre_optimizer = Adam(
list(model.parameters()) + list(point_feat_extractor.parameters()),
lr=1e-3,
betas=(.9, .999),
)
print('Start Training 3D self-reconstruction-------------------------')
for i in range(100):
total_loss = 0.
print('Start Epoch {}'.format(str(i + 1)))
for idx, (_, pc, stat, label) in enumerate(ds_loader):
# Get input image and add gaussian noise
pc = Variable(pc.transpose(2, 1).cuda()) # BatchSize * 2048 * 3
pc_feat, _, _ = point_feat_extractor(pc) # Extract Feature using PointNet
pre_optimizer.zero_grad()
syn_pc = model(pc_feat, False)
ori_pc = pc.transpose(2, 1).contiguous()
gen2gr, gr2gen = distChamferCUDA(syn_pc, ori_pc)
cd_loss = gen2gr.mean(1) + gr2gen.mean(1)
loss = cd_loss.sum()
total_loss += loss.detach().item()
loss.backward()
pre_optimizer.step()
print('Epoch {} -- Recon CD {}'.format(str(i + 1), total_loss / float(len(mv_ds))))
# Saving trained network so we can skip this part in the future
print(f'Saving models at {checkpoint_path}')
torch.save(point_feat_extractor.state_dict(),
os.path.join(checkpoint_path, 'pretrained_point_encoder.pt'))
torch.save(model.state_dict(),
os.path.join(checkpoint_path, 'pretrained_point_decoder.pt'))
else:
print('Training from a pretrained encoder-decoder, loading pretrained models')
model.load_state_dict(torch.load(os.path.join(checkpoint_path, 'pretrained_point_decoder.pt')))
model.cuda()
if conf.reclustering:
print(f'Clustering from scratch, the number of cluster centroids would be {conf.num_prototypes}')
point_feat_extractor.load_state_dict(torch.load(os.path.join(checkpoint_path, 'pretrained_point_encoder.pt')))
point_feat_extractor.cuda()
point_feat_extractor.eval()
tmp_ds = MultiViewDataSet(root, ply_root, 'train', transform=_transform, sub_cat=tgt_category, number_of_view=1)
corpus_builder = DataLoader(tmp_ds, batch_size=1)
feature_list = list()
for idx, (_, pc, _, _) in enumerate(corpus_builder):
with torch.no_grad():
pc = Variable(pc.transpose(2, 1).cuda())
point_feat, _, _ = point_feat_extractor(pc)
feature_list.append(point_feat.detach().squeeze().cpu().numpy())
# K-Means Clustering
feature_list = np.asarray(feature_list)
operator = cls.KMeans(n_clusters=conf.num_prototypes, random_state=0).fit(feature_list)
proto_corpus = operator.cluster_centers_
model.update_prototypes(proto_corpus)
print('Start Training 2D to 3D -------------------------------------------')
optimizer = Adam(
model.parameters(),
lr=conf.lrate,
betas=(.9, .999),
)
scheduler = lr_scheduler.StepLR(optimizer, step_size=int(conf.nepoch / 3), gamma=.5)
start_time = time.time()
for i in range(conf.nepoch):
total_loss = 0.
print('Start Epoch {}'.format(str(i + 1)))
if i == min(50, int(conf.nepoch / 3)):
print('Activated prototype finetune')
model.activate_prototype_finetune()
for idx, (multi_view, pc, _, _) in enumerate(ds_loader):
# Get input image and add gaussian noise
mv = np.stack(multi_view, axis=1).squeeze(axis=1)
mv = torch.from_numpy(mv).float()
mv = Variable(mv.cuda())
pc = Variable(pc.cuda()) # BatchSize * 2048, currently
# Optimize process
optimizer.zero_grad()
syn_pc = model(mv)
gen2gr, gr2gen = distChamferCUDA(syn_pc, pc)
cd_loss = gen2gr.mean(1) + gr2gen.mean(1)
loss = cd_loss.sum()
total_loss += loss.detach().item()
loss.backward()
optimizer.step()
if idx % 10 == 0:
duration = time.time() - start_time
start_time = time.time()
print(
'Epoch %d Batch [%2d/%2d] Time [%3.2fs] Recon Nat %.10f' %
(i + 1, idx + 1, len(ds_loader), duration, loss.item() / float(conf.batch_size)))
print('Epoch {} -- Recon Nat {}'.format(str(i + 1), total_loss / float(len(mv_ds))))
# Save model configuration
if conf.save_interval > 0 and i % opt.save_interval == 0:
torch.save(model.state_dict(), os.path.join(checkpoint_path,
'{0}_iter_{1}.pt'.format(conf.name, str(i + 1))))
# Validate the model on test split
if conf.sample_interval > 0 and i % opt.sample_interval == 0:
with torch.no_grad():
tdp_validate(model, ds_loader_test)
model.train()
scheduler.step()
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Training parameters
parser.add_argument('--batch_size', type=int, default=32, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=101, help='number of epochs to train for')
parser.add_argument('--random_seed', action="store_true", help='Fix random seed or not')
parser.add_argument('--lrate', type=float, default=1e-4, help='learning rate')
parser.add_argument('--lr_decay_1', type=int, default=120, help='learning rate decay 1')
parser.add_argument('--lr_decay_2', type=int, default=140, help='learning rate decay 2')
parser.add_argument('--lr_decay_3', type=int, default=145, help='learning rate decay 2')
parser.add_argument('--device', type=str, default='cuda', help='Gpu usage')
parser.add_argument('--dim_template', type=int, default=2, help='Template dimension')
# Data
parser.add_argument('--number_points', type=int, default=2048,
help='Number of point sampled on the object during training, and generated by atlasnet')
parser.add_argument('--prototypes_npy', type=str, default='NOF', help='Path of the prototype npy file')
# Save dirs and reload
parser.add_argument('--name', type=str, default="0", help='training name')
parser.add_argument('--dir_name', type=str, default="", help='name of the log folder.')
# Network
parser.add_argument('--num_layers', type=int, default=2, help='number of hidden MLP Layer')
parser.add_argument('--hidden_neurons', type=int, default=512, help='number of neurons in each hidden layer')
parser.add_argument('--nb_primitives', type=int, default=1, help='number of primitives')
parser.add_argument('--template_type', type=str, default="SQUARE", choices=["SPHERE", "SQUARE"],
help='dim_out_patch')
parser.add_argument('--bottleneck_size', type=int, default=1536, help='dim_out_patch')
parser.add_argument('--activation', type=str, default='relu',
choices=["relu", "sigmoid", "softplus", "logsigmoid", "softsign", "tanh"], help='dim_out_patch')
parser.add_argument('--num_prototypes', type=int, default=8, help='Number of prototypes')
parser.add_argument('--num_slaves', type=int, default=4, help='Number of slave mlps per prototype')
# Loss
parser.add_argument('--no_metro', action="store_true", help='Compute metro distance')
# Additional arguments
parser.add_argument('--root', type=str, required=True, help='The path of multi-view dataset')
parser.add_argument('--proot', type=str, required=True, help='The path of corresponding pc dataset')
parser.add_argument('--cat', type=str, required=True, help='Target category')
parser.add_argument('--model_path', type=str, default='../checkpoint')
parser.add_argument('--sample_interval', type=int, default=10, help='The gap between each sampling process')
parser.add_argument('--save_interval', type=int, default=20, help='The gap between each model saving')
parser.add_argument('--from_scratch', action="store_true", help='Train the point_feature_extractor from scratch')
parser.add_argument('--reclustering', action="store_true", help='Flag that controls the re-clustering behavior')
parser.add_argument('--dataset', type=str, default='modelnet', help='The dataset to use, chose from [modelnet|shapenet]')
opt = parser.parse_args()
main(opt)
| StarcoderdataPython |
1930395 | # Copyright (c) OpenMMLab. All rights reserved.
import importlib
import logging
import os
import tempfile
from functools import partial
import mmcv
import pytest
import torch.multiprocessing as mp
import mmdeploy.utils as util
from mmdeploy.utils import target_wrapper
from mmdeploy.utils.constants import Backend, Codebase, Task
from mmdeploy.utils.export_info import dump_info
from mmdeploy.utils.test import get_random_name
correct_model_path = 'tests/data/srgan.py'
correct_model_cfg = mmcv.Config.fromfile(correct_model_path)
correct_deploy_path = 'tests/data/super-resolution.py'
correct_deploy_cfg = mmcv.Config.fromfile(correct_deploy_path)
empty_file_path = tempfile.NamedTemporaryFile(suffix='.py').name
empty_path = './a.py'
@pytest.fixture(autouse=True, scope='module')
def create_empty_file():
os.mknod(empty_file_path)
class TestLoadConfigError:
def test_load_config_none(self):
with pytest.raises(AssertionError):
util.load_config()
def test_load_config_type_error(self):
with pytest.raises(TypeError):
util.load_config(1)
def test_load_config_file_error(self):
with pytest.raises(FileNotFoundError):
util.load_config(empty_path)
class TestLoadConfig:
@pytest.mark.parametrize('args', [
[empty_file_path],
[correct_model_path],
[correct_model_cfg],
(correct_model_path, correct_deploy_path),
(correct_model_path, correct_deploy_cfg),
(correct_model_cfg, correct_deploy_cfg),
])
def test_load_config(self, args):
configs = util.load_config(*args)
for v in zip(configs, args):
if isinstance(v[1], str):
cfg = mmcv.Config.fromfile(v[1])
else:
cfg = v[1]
assert v[0]._cfg_dict == cfg._cfg_dict
class TestGetCodebaseConfig:
def test_get_codebase_config_empty(self):
assert util.get_codebase_config(mmcv.Config(dict())) == {}
def test_get_codebase_config(self):
codebase_config = util.get_codebase_config(correct_deploy_path)
assert isinstance(codebase_config, dict) and len(codebase_config) > 1
class TestGetTaskType:
def test_get_task_type_none(self):
with pytest.raises(AssertionError):
util.get_task_type(mmcv.Config(dict()))
def test_get_task_type(self):
assert util.get_task_type(correct_deploy_path) == Task.SUPER_RESOLUTION
class TestGetCodebase:
def test_get_codebase_none(self):
with pytest.raises(AssertionError):
util.get_codebase(mmcv.Config(dict()))
def test_get_codebase(self):
assert util.get_codebase(correct_deploy_path) == Codebase.MMEDIT
class TestGetBackendConfig:
def test_get_backend_config_empty(self):
assert util.get_backend_config(mmcv.Config(dict())) == {}
def test_get_backend_config(self):
backend_config = util.get_backend_config(correct_deploy_path)
assert isinstance(backend_config, dict) and len(backend_config) == 1
class TestGetBackend:
def test_get_backend_none(self):
with pytest.raises(AssertionError):
util.get_backend(mmcv.Config(dict()))
def test_get_backend(self):
assert util.get_backend(correct_deploy_path) == Backend.ONNXRUNTIME
class TestGetOnnxConfig:
def test_get_onnx_config_empty(self):
assert util.get_onnx_config(mmcv.Config(dict())) == {}
def test_get_onnx_config(self):
onnx_config = dict(
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'output': {
0: 'batch',
2: 'height',
3: 'width'
}
},
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
save_file='end2end.onnx',
input_names=['input'],
output_names=['output'],
input_shape=None)
assert util.get_onnx_config(correct_deploy_path) == onnx_config
class TestIsDynamic:
config_with_onnx_config = mmcv.Config(
dict(onnx_config=dict(), backend_config=dict(type='default')))
config_with_dynamic_axes = mmcv.Config(
dict(
onnx_config=dict(
type='onnx',
dynamic_axes={'input': {
0: 'batch',
2: 'height',
3: 'width'
}}),
backend_config=dict(type='default')))
config_with_dynamic_axes_and_input_names = mmcv.Config(
dict(
onnx_config=dict(
type='onnx',
input_names=['image'],
dynamic_axes={'image': {
0: 'batch',
2: 'height',
3: 'width'
}}),
backend_config=dict(type='default')))
config_with_dynamic_axes_list = mmcv.Config(
dict(
onnx_config=dict(
type='onnx', input_names=['image'], dynamic_axes=[[0, 2, 3]]),
backend_config=dict(type='default')))
def test_is_dynamic_batch_none(self):
assert util.is_dynamic_batch(
TestIsDynamic.config_with_onnx_config) is False
def test_is_dynamic_batch_error_name(self):
assert util.is_dynamic_batch(TestIsDynamic.config_with_dynamic_axes,
'output') is False
def test_is_dynamic_batch(self):
assert util.is_dynamic_batch(
TestIsDynamic.config_with_dynamic_axes) is True
def test_is_dynamic_batch_axes_list(self):
assert util.is_dynamic_batch(
TestIsDynamic.config_with_dynamic_axes_list) is True
def test_is_dynamic_shape_none(self):
assert util.is_dynamic_shape(
TestIsDynamic.config_with_onnx_config) is False
def test_is_dynamic_shape_error_name(self):
assert util.is_dynamic_shape(TestIsDynamic.config_with_dynamic_axes,
'output') is False
def test_is_dynamic_shape(self):
assert util.is_dynamic_shape(
TestIsDynamic.config_with_dynamic_axes) is True
def test_is_dynamic_shape_input_names(self):
assert util.is_dynamic_shape(
TestIsDynamic.config_with_dynamic_axes_and_input_names) is True
def test_is_dynamic_shape_different_names(self):
config_with_different_names = \
TestIsDynamic.config_with_dynamic_axes_and_input_names
util.get_ir_config(
config_with_different_names).input_names = 'another_name'
assert util.is_dynamic_shape(config_with_different_names) is False
def test_is_dynamic_shape_axes_list(self):
assert util.is_dynamic_shape(
TestIsDynamic.config_with_dynamic_axes_list) is True
class TestGetInputShape:
config_without_input_shape = mmcv.Config(
dict(onnx_config=dict(input_shape=None)))
config_with_input_shape = mmcv.Config(
dict(onnx_config=dict(input_shape=[1, 1])))
config_with_error_shape = mmcv.Config(
dict(onnx_config=dict(input_shape=[1, 1, 1])))
def test_get_input_shape_none(self):
assert util.get_input_shape(
TestGetInputShape.config_without_input_shape) is None
def test_get_input_shape_error(self):
with pytest.raises(Exception):
util.get_input_shape(TestGetInputShape.config_with_error_shape)
def test_get_input_shape(self):
assert util.get_input_shape(
TestGetInputShape.config_with_input_shape) == [1, 1]
class TestCfgApplyMark:
config_with_mask = mmcv.Config(
dict(partition_config=dict(apply_marks=True)))
def test_cfg_apply_marks_none(self):
assert util.cfg_apply_marks(mmcv.Config(dict())) is None
def test_cfg_apply_marks(self):
assert util.cfg_apply_marks(TestCfgApplyMark.config_with_mask) is True
class TestGetPartitionConfig:
config_with_mask = mmcv.Config(
dict(partition_config=dict(apply_marks=True)))
config_without_mask = mmcv.Config(
dict(partition_config=dict(apply_marks=False)))
def test_get_partition_config_none(self):
assert util.get_partition_config(mmcv.Config(dict())) is None
def test_get_partition_config_without_mask(self):
assert util.get_partition_config(
TestGetPartitionConfig.config_without_mask) is None
def test_get_partition_config(self):
assert util.get_partition_config(
TestGetPartitionConfig.config_with_mask) == dict(apply_marks=True)
class TestGetCalib:
config_with_calib = mmcv.Config(
dict(calib_config=dict(create_calib=True, calib_file='calib_data.h5')))
config_without_calib = mmcv.Config(
dict(
calib_config=dict(create_calib=False, calib_file='calib_data.h5')))
def test_get_calib_config(self):
assert util.get_calib_config(TestGetCalib.config_with_calib) == dict(
create_calib=True, calib_file='calib_data.h5')
def test_get_calib_filename_none(self):
assert util.get_calib_filename(mmcv.Config(dict())) is None
def test_get_calib_filename_false(self):
assert util.get_calib_filename(
TestGetCalib.config_without_calib) is None
def test_get_calib_filename(self):
assert util.get_calib_filename(
TestGetCalib.config_with_calib) == 'calib_data.h5'
class TestGetCommonConfig:
config_with_common_config = mmcv.Config(
dict(
backend_config=dict(
type='tensorrt', common_config=dict(fp16_mode=False))))
def test_get_common_config(self):
assert util.get_common_config(
TestGetCommonConfig.config_with_common_config) == dict(
fp16_mode=False)
class TestGetModelInputs:
config_with_model_inputs = mmcv.Config(
dict(backend_config=dict(model_inputs=[dict(input_shapes=None)])))
def test_model_inputs(self):
assert util.get_model_inputs(
TestGetModelInputs.config_with_model_inputs) == [
dict(input_shapes=None)
]
class TestGetDynamicAxes:
input_name = get_random_name()
def test_with_empty_cfg(self):
deploy_cfg = mmcv.Config()
with pytest.raises(KeyError):
util.get_dynamic_axes(deploy_cfg)
def test_can_get_axes_from_dict(self):
expected_dynamic_axes = {
self.input_name: {
0: 'batch',
2: 'height',
3: 'width'
}
}
deploy_cfg = mmcv.Config(
dict(onnx_config=dict(dynamic_axes=expected_dynamic_axes)))
dynamic_axes = util.get_dynamic_axes(deploy_cfg)
assert expected_dynamic_axes == dynamic_axes
def test_can_not_get_axes_from_list_without_names(self):
axes = [[0, 2, 3]]
deploy_cfg = mmcv.Config(dict(onnx_config=dict(dynamic_axes=axes)))
with pytest.raises(KeyError):
util.get_dynamic_axes(deploy_cfg)
def test_can_get_axes_from_list_with_args(self):
axes = [[0, 2, 3]]
expected_dynamic_axes = {self.input_name: axes[0]}
axes_names = [self.input_name]
deploy_cfg = mmcv.Config(dict(onnx_config=dict(dynamic_axes=axes)))
dynamic_axes = util.get_dynamic_axes(deploy_cfg, axes_names)
assert expected_dynamic_axes == dynamic_axes
def test_can_get_axes_from_list_with_cfg(self):
output_name = get_random_name()
axes = [[0, 2, 3], [0]]
expected_dynamic_axes = {
self.input_name: axes[0],
output_name: axes[1]
}
deploy_cfg = mmcv.Config(
dict(
onnx_config=dict(
input_names=[self.input_name],
output_names=[output_name],
dynamic_axes=axes)))
dynamic_axes = util.get_dynamic_axes(deploy_cfg)
assert expected_dynamic_axes == dynamic_axes
class TestParseDeviceID:
def test_cpu(self):
device = 'cpu'
assert util.parse_device_id(device) == -1
def test_cuda(self):
device = 'cuda'
assert util.parse_device_id(device) == 0
def test_cuda10(self):
device = 'cuda:10'
assert util.parse_device_id(device) == 10
def test_incorrect_cuda_device(self):
device = 'cuda_5'
with pytest.raises(RuntimeError):
util.parse_device_id(device)
def test_incorrect_device(self):
device = 'abcd:1'
assert util.parse_device_id(device) is None
def test_AdvancedEnum():
keys = [
Task.TEXT_DETECTION, Task.TEXT_RECOGNITION, Task.SEGMENTATION,
Task.SUPER_RESOLUTION, Task.CLASSIFICATION, Task.OBJECT_DETECTION
]
vals = [
'TextDetection', 'TextRecognition', 'Segmentation', 'SuperResolution',
'Classification', 'ObjectDetection'
]
for k, v in zip(keys, vals):
assert Task.get(v) == k
assert k.value == v
def test_export_info():
with tempfile.TemporaryDirectory() as dir:
dump_info(correct_deploy_cfg, correct_model_cfg, dir, '')
deploy_json = os.path.join(dir, 'deploy.json')
pipeline_json = os.path.join(dir, 'pipeline.json')
detail_json = os.path.join(dir, 'detail.json')
assert os.path.exists(pipeline_json)
assert os.path.exists(detail_json)
assert os.path.exists(deploy_json)
def test_target_wrapper():
def target():
return 0
log_level = logging.INFO
ret_value = mp.Value('d', 0, lock=False)
ret_value.value = -1
wrap_func = partial(target_wrapper, target, log_level, ret_value)
process = mp.Process(target=wrap_func)
process.start()
process.join()
assert ret_value.value == 0
def test_get_root_logger():
from mmdeploy.utils import get_root_logger
logger = get_root_logger()
logger.info('This is a test message')
def test_get_library_version():
assert util.get_library_version('abcdefg') is None
try:
lib = importlib.import_module('setuptools')
except ImportError:
pass
else:
assert util.get_library_version('setuptools') == lib.__version__
def test_get_codebase_version():
versions = util.get_codebase_version()
for k, v in versions.items():
assert v == util.get_library_version(k)
def test_get_backend_version():
versions = util.get_backend_version()
for k, v in versions.items():
assert v == util.get_library_version(k)
| StarcoderdataPython |
5041970 | <filename>cyder/cydhcp/vlan/views.py
from django.shortcuts import get_object_or_404
from cyder.base.views import cy_detail
from cyder.cydhcp.vlan.models import Vlan
def vlan_detail(request, pk):
vlan = get_object_or_404(Vlan, pk=pk)
return cy_detail(request, Vlan, 'vlan/vlan_detail.html', {
'Networks': 'network_set',
'Attributes': 'vlanav_set',
}, pk=pk, obj=vlan)
| StarcoderdataPython |
4977707 | import math
import numpy as np
# Returns whether M is in diagonal shape or not
# input: matrix M
# output: boolean
def is_diagonal(M):
A = np.zeros(M.shape)
np.fill_diagonal(A, M.diagonal())
return np.all(A == M)
# Solves Ax = b if A is in diagonal shape
# input: matrix A, vector b
# output: vector x solving Ax = b
def solve_AIsDiag(A, b):
x = np.copy(np.diag(A))
for i in range(len(b)):
x[i] = b[i] / x[i]
return x
# CG algorithm to solve an equotion like A*x = b
# input: matrix s.p.d. A , n x 1 vector b, iteration maximum itmax, tolerance eps, startpoint x
# output: vector x solving the equotion
def CG(A, b, itmax, eps, x):
if is_diagonal(A):
return solve_AIsDiag(A, b)
it = 0
r = np.subtract(b, A.dot(x))
r_norm = np.dot(r, r)
r_new_norm = np.dot(r, r)
p = np.zeros(b.size)
beta = 0
r_old = np.copy(r) + np.ones_like(b)
while (math.sqrt(r_new_norm) > eps) and (np.linalg.norm(r_old - r) > eps * (1 + np.linalg.norm(r))):
if(it == itmax):
print("Maximale Iteration erreicht!")
return x
if it > 0: # erst ab zweiter Iteration
beta = r_new_norm/r_norm # beta = r*r / r_alt*r_alt
r_norm = r_new_norm
p = np.add(r, beta*p) # p = r + bata*p
if np.dot(p, np.dot(A, p)) <= 0: # Abbruch der Schleife, falls A nicht pos. def.
x = x + (np.dot(b - np.dot(A, x), p) / np.dot(p, np.dot(A, p))) * p
return x
alpha = r_norm/(np.dot(p, np.dot(A, p))) # alpha = r*r/(p*A*p)
x = np.add(x, alpha*p) # x = x + alpha*p
r_old = np.copy(r) # r sichern
r = np.subtract(r, alpha*np.dot(A, p)) # r = r - alpha*A*p
r_new_norm = np.dot(r, r) # r*r
it = it + 1
return x
| StarcoderdataPython |
1698582 | import numpy as np
import scipy.linalg as spla
from embedding import convert_to_graph
def get_degree_matrix(A):
return np.diag(np.sum(A,axis=1),0)
def get_laplacian(A):
return get_degree_matrix(A) - A
def _spectral_clustering_by_connected_components(L):
n,_ = L.shape[0]
u,v = spla.eigh(L)
number_of_components = 1 + np.where(np.abs(u)<1e-14)[0][-1]
clusters = np.zeros(n)
for x in range(number_of_components):
mask = np.abs(v[:,x]) > 1e-14
clusters[mask] = x
shift = number_of_components // 2
return clusters - shift
# Spectral clustering using the Fiedler vector (second-smallest eigenvector)
# Relaxing the integer conditions in minimizing the number of edges between partitions
# leads to the solution of the second-smallest eigenvector. (The eigenvector for the smallest
# eigenvalue assigns all nodes to the same partition, assuming the graph is connected).
#
# See www.blog.shriphani.com/2015/04/06/the-smallest-eigenvalues-of-a-graph-laplacian
#
def _spectral_clustering_by_fiedler_vector(L,kernel='sgn',explore=False):
u,v = spla.eigh(L,eigvals=(0,1))
assert u[1] > 1e-14, "Multiplicity of 0 eigenvalues is > 1. Multiple connected components exist."
clusters = v[:,1]
if explore:
return clusters
if kernel == 'sgn':
return np.sign(clusters)
elif kernel == 'mean':
return(v[:,1] > np.mean(v[:,1])).astype(int)*2 - 1
elif kernel == 'median':
return(v[:,1] > np.median(v[:,1])).astype(int)*2 - 1
def spectral_clustering(X,method='fiedler',affinity_measure='euclidean',epsilon=1,truncate=False,threshold=0.1,kernel='sgn',explore=False):
A = convert_to_graph(X,affinity_measure=affinity_measure,epsilon=1,truncate=truncate,threshold=threshold)
L = get_laplacian(A)
if method == 'fiedler':
clusters = _spectral_clustering_by_fiedler_vector(L,kernel=kernel,explore=explore)
elif method == 'components':
clusters = _spectral_clustering_by_connected_components(L)
return clusters
# shows histogram of affinity measurements to help tune epsilon and to help set an appropriate threshold if truncating
def explore_graph_formation(X,affinity_measure='euclidean',epsilon=1):
As = convert_to_graph(X,affinity_measure=affinity_measure,epsilon=epsilon,explore=True)
fig = plt.figure(figsize=(12,8))
plt.hist(As,bins=30,color='b')
plt.xlabel('affinity measurement')
plt.ylabel('frequency')
plt.title('Distribution of affinity measurements with epsilon=%.3f'%epsilon)
plt.show()
# returns histogram of pre-bucketed clusters to aid in deciding which kernel to use
# if distribution is not centered about 0, mean may be best
# if distribution is skewed, median may be best
def explore_spectral_clustering(X,affinity_measure='euclidean',epsilon=1,truncate=False,threshold=0.1):
vec = spectral_clustering(X,method='fiedler',affinity_measure=affinity_measure,epsilon=epsilon,truncate=truncate,threshold=threshold,explore=True)
fig = plt.figure(figsize=(12,8))
plt.hist(vec,bins=30,color='b')
plt.xlabel('raw cluster assignment')
plt.ylabel('frequency')
plt.title('Distribution of pre-discretized cluster assignments with epsilon=%.3f and threshold=%.3f'%(epsilon,threshold))
plt.show()
| StarcoderdataPython |
3388016 | <reponame>Zhylkaaa/nboost<filename>nboost/plugins/qa/base.py<gh_stars>0
from typing import Tuple
import time
from nboost.plugins import Plugin
from nboost.delegates import ResponseDelegate
from nboost.database import DatabaseRow
from nboost import defaults
from nboost.logger import set_logger
class QAModelPlugin(Plugin):
def __init__(self,
max_query_length: type(defaults.max_query_length) = defaults.max_query_length,
model_dir: str = defaults.qa_model_dir,
max_seq_len: int = defaults.max_seq_len,
**kwargs):
super().__init__(**kwargs)
self.model_dir = model_dir
self.max_query_length = max_query_length
self.max_seq_len = max_seq_len
self.logger = set_logger('qamodel', verbose=True)
def on_response(self, response: ResponseDelegate, db_row: DatabaseRow):
if response.cvalues:
start_time = time.perf_counter()
responses = []
for idx, cvalue in enumerate(response.cvalues):
answer, start_pos, stop_pos, score = self.get_answer(response.request.query, cvalue)
self.logger.info(f"{response.request.qa_threshold} \t {answer}, {start_pos}, {stop_pos}, {score}")
responses.append({
'answer_text': answer,
'answer_start_pos': start_pos,
'answer_stop_pos': stop_pos,
'answer_score': score,
})
db_row.qa_time = time.perf_counter() - start_time
response.set_path(f'body.nboost.qa', responses)
def get_answer(self, query: str, cvalue: str) -> Tuple[str, int, int, float]:
"""Return answer, start_pos, end_pos, score"""
raise NotImplementedError()
| StarcoderdataPython |
3424119 | from selenium import webdriver
from time import sleep
from selenium.webdriver.chrome.options import Options
import csv
# Options
chrome_options = Options()
chrome_options.add_argument("--incognito")
chrome_options.add_argument("--headless")
import re
#text_after = re.sub(regex_search_term, regex_replacement, text_before)
def convert(value):
value = re.sub(r'\D', "",value)
return int(value)
product_links = []
url = 'https://www.dienmayxanh.com/laptop#c=44&o=9&pi=10'
browser = webdriver.Chrome(executable_path="chromedriver.exe")
browser.get(url)
links = browser.find_elements_by_xpath('//*[@class="main-contain"]') # Vị trí chứa đường dẫn sản phẩm
for i in links:
link=i.get_attribute("href") # Lấy đường dẫn và thêm vào list
product_links.append(link)
sleep(20)
csv_columns = ['Product','Price','Brand','Core','RAM','ScrSize','GraphicCard','Drive_Type','Capacity','OperSystem','Weight','Madein', 'Since','Shop','URL']
for link in product_links:
browser.get(link)
click_button = driver.find_element_by_xpath('//div[@class="ng-star-inserted"]//button[@class="text-center btn btn-link"]')
click_button.click()
time.sleep(5)
try:
ram=convert(browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[2]/div/span[1]').text)
scr = browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[4]/div/span[1]').text
capacity=browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[3]/div/span').text
weight = re.sub(r'(?:Dài.* - Rộng.*Nặng )',"",browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[10]/div/span').text)
if 'TB' in capacity:
capacity = 1000
else:
capacity = convert(capacity[:6])
brand = browser.find_element_by_xpath('/html/body/section[1]/ul/li[2]/a').text
if 'MacBook' in brand:
opsys = 'Mac OS'
else:
opsys = 'Window'
data = {
"Product" : browser.find_element_by_xpath('/html/body/section[1]/h1').text,
"Price" : convert(browser.find_element_by_xpath('//*[@class="giamsoc-ol-price"]').text),
"Brand" : brand[7:],
"Core" : browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[1]/div').text,
"RAM" : ram,
"ScrSize" : re.sub(r'(?: ")', '', scr[:4]),
"GraphicCard" : browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[5]/div/span[2]').text,
"Drive_Type" : 'SDD',
"Capacity" : capacity,
"OperSystem" : opsys,
"Weight" : re.sub(r'(?: kg)','',weight),
"Madein" : "",
"Since" : browser.find_element_by_xpath('//*[@class="parameter"]/ul/li[11]/div/span').text,
"Shop": 'Dienmayxanh',
"URL":link,
}
with open('data.csv', "a", encoding="utf8") as f:
writer = csv.DictWriter(f, fieldnames=csv_columns)
writer.writerow(data)
except:
pass
browser.close() | StarcoderdataPython |
1729099 | <filename>webapp/api.py
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from http import HTTPStatus
from typing import Union, Tuple
from uuid import UUID
from flask import Blueprint, Response, jsonify, request, current_app
from liquidity import create_liquidity_provider
from liquidity.types.currency import Currency, CurrencyPair
from liquidity.types.errors import AlreadySettled
from liquidity.types.quote import QuoteId
from liquidity.types.settlement import DebtId
from liquidity.types.trade import Direction, TradeId
api = Blueprint("api/v1", __name__, url_prefix="/")
def json_response(json):
return current_app.response_class(
response=json, status=HTTPStatus.OK, mimetype="application/json"
)
@api.route("/details", methods=["GET"])
def lp_details() -> Union[str, Response]:
current_app.logger.info("/details start")
lp = create_liquidity_provider()
json = lp.lp_details().to_json()
current_app.logger.info(f"/details response: {json}")
return json_response(json)
@api.route("/quote", methods=["POST"])
def get_quote() -> Union[str, Response]:
lp = create_liquidity_provider()
data = request.get_json()
current_app.logger.info(f"request for quote {data}")
base_currency = Currency[data["base_currency"]]
quote_currency = Currency[data["quote_currency"]]
pair = CurrencyPair(base=base_currency, quote=quote_currency)
quote = lp.get_quote(pair=pair, amount=int(data["amount"]))
json = quote.to_json()
current_app.logger.info(f"quote {json}")
return json_response(json)
@api.route("/trade", methods=["POST"])
def trade_and_execute() -> Union[str, Response]:
lp = create_liquidity_provider()
data = request.get_json()
quote_id = QuoteId(UUID(data["quote_id"]))
direction = Direction[data["direction"]]
diem_deposit_address = (
data["diem_deposit_address"] if "diem_deposit_address" in data else None
)
tx_version = int(data["tx_version"]) if "tx_version" in data else None
trade_id = lp.trade_and_execute(
quote_id=quote_id,
direction=direction,
diem_bech32_deposit_address=diem_deposit_address,
tx_version=tx_version,
)
return jsonify({"trade_id": str(trade_id)})
@api.route("/trade/<uuid:trade_id_param>", methods=["GET"])
def trade_info(trade_id_param: UUID) -> Union[str, Response]:
lp = create_liquidity_provider()
trade_id = TradeId(trade_id_param)
trade_data = lp.trade_info(trade_id=trade_id)
json = trade_data.to_json()
current_app.logger.info(f"Trade info {json}")
return json_response(json)
@api.route("/debt", methods=["GET"])
def get_debt() -> Union[str, Response]:
lp = create_liquidity_provider()
debts = lp.get_debt()
serializable_debts = [debt.to_dict() for debt in debts]
current_app.logger.info(f"Debt info {serializable_debts}")
return jsonify({"debts": serializable_debts})
@api.route("/debt/<uuid:debt_id_param>", methods=["PUT"])
def settle(debt_id_param: UUID) -> Union[str, Response, Tuple[str, int]]:
settlement_confirmation = request.get_json()["settlement_confirmation"]
lp = create_liquidity_provider()
try:
lp.settle(DebtId(debt_id_param), settlement_confirmation)
except KeyError as e:
return str(e), 404
except AlreadySettled as e:
return str(e), 409
return "OK"
| StarcoderdataPython |
5075768 | from django import forms
from mighty.models import PaymentMethod
from mighty.applications.shop.forms.widgets import (
CBNumberInput, CBCVCInput, CBDateInput,
IbanInput, BicInput
)
class PaymentMethodForm(forms.ModelForm):
class Meta:
model = PaymentMethod
fields = ('owner', 'iban', 'bic', 'cb', 'cvc', 'date_valid')
widgets = {
'cb': CBNumberInput(),
'cvc': CBCVCInput(),
'date_valid': CBDateInput(),
'iban': IbanInput(),
'bic': BicInput(),
} | StarcoderdataPython |
11231717 | <reponame>bcdarwin/pydpiper
#!/usr/bin/env python
import networkx as nx
import Queue
import cPickle as pickle
import os
import sys
import socket
import time
from datetime import datetime
from subprocess import call
from shlex import split
from multiprocessing import Process, Event
import file_handling as fh
import pipeline_executor as pe
import logging
import threading
import Pyro4
LOOP_INTERVAL = 5.0
RESPONSE_LATENCY = 100
logger = logging.getLogger(__name__)
sys.excepthook = Pyro4.util.excepthook
class PipelineFile():
def __init__(self, filename):
self.filename = filename
self.setType()
def setType(self):
self.fileType = None
def __repr__(self):
return(self.filename)
class InputFile(PipelineFile):
def setType(self):
self.fileType = "input"
class OutputFile(PipelineFile):
def setType(self):
self.fileType = "output"
class LogFile(PipelineFile):
def setType(self):
self.fileType = "log"
class PipelineStage():
def __init__(self):
self.mem = 2.0 # default memory allotted per stage
self.procs = 1 # default number of processors per stage
self.inputFiles = [] # the input files for this stage
self.outputFiles = [] # the output files for this stage
self.logFile = None # each stage should have only one log file
self.status = None
self.name = ""
self.colour = "black" # used when a graph is created of all stages to colour the nodes
self.number_retries = 0
def isFinished(self):
return self.status == "finished"
def setRunning(self):
self.status = "running"
def setFinished(self):
self.status = "finished"
def setFailed(self):
self.status = "failed"
def setNone(self):
self.status = None
def setMem(self, mem):
self.mem = mem
def getMem(self):
return self.mem
def setProcs(self, num):
self.procs = num
def getProcs(self):
return self.procs
def getHash(self):
return(hash("".join(self.outputFiles) + "".join(self.inputFiles)))
def __eq__(self, other):
return self.inputFiles == other.inputFiles and self.outputFiles == other.outputFiles
def __ne__(self, other):
return not(__eq__(self,other))
def getNumberOfRetries(self):
return self.number_retries
def incrementNumberOfRetries(self):
self.number_retries += 1
class CmdStage(PipelineStage):
def __init__(self, argArray):
PipelineStage.__init__(self)
self.argArray = argArray # the raw input array
self.cmd = [] # the input array converted to strings
self.parseArgs()
self.checkLogFile()
def parseArgs(self):
if self.argArray:
for a in self.argArray:
ft = getattr(a, "fileType", None)
if ft == "input":
self.inputFiles.append(str(a))
elif ft == "output":
self.outputFiles.append(str(a))
self.cmd.append(str(a))
self.name = self.cmd[0]
def checkLogFile(self):
if not self.logFile:
self.logFile = self.name + "." + datetime.isoformat(datetime.now()) + ".log"
def setLogFile(self, logFileName):
self.logFile = str(logFileName)
def execStage(self):
of = open(self.logFile, 'a')
of.write("Running on: " + socket.gethostname() + " at " + datetime.isoformat(datetime.now(), " ") + "\n")
of.write(repr(self) + "\n")
of.flush()
if self.is_effectively_complete():
of.write("All output files exist. Skipping stage.\n")
returncode = 0
else:
args = split(repr(self))
returncode = call(args, stdout=of, stderr=of, shell=False)
of.close()
return(returncode)
def is_effectively_complete(self):
"""check if this stage is effectively complete (if output files already exist)"""
for output in self.outputFiles + self.inputFiles:
if not os.path.exists(output):
return False
return True
def getHash(self):
return(hash(" ".join(self.cmd)))
def __repr__(self):
return(" ".join(self.cmd))
class Pipeline():
def __init__(self):
# the core pipeline is stored in a directed graph. The graph is made
# up of integer indices
self.G = nx.DiGraph()
# an array of the actual stages (PipelineStage objects)
self.stages = []
self.nameArray = []
# a queue of the stages ready to be run - contains indices
self.runnable = Queue.Queue()
# a list of currently running stages
self.currently_running_stages = []
# the current stage counter
self.counter = 0
# hash to keep the output to stage association
self.outputhash = {}
# a hash per stage - computed from inputs and outputs or whole command
self.stagehash = {}
# an array containing the status per stage
self.processedStages = []
# location of backup files for restart if needed
self.backupFileLocation = None
# list of registered clients
self.clients = []
# map from clients to jobs running on each client
# TODO in principle this subsumes self.clients[] ...
self.client_running_stages = {}
self.clientTimestamps = {}
# number of clients (executors) that have been launched by the server
# we need to keep track of this because even though no (or few) clients
# are actually registered, a whole bunch of them could be waiting in the
# queue
self.number_launched_and_waiting_clients = 0
# clients we've lost contact with due to crash, etc.
self.failed_executors = 0
# main option hash, needed for the pipeline (server) to launch additional
# executors during run time
self.main_options_hash = None
self.programName = None
# Initially set number of skipped stages to be 0
self.skipped_stages = 0
self.failed_stages = 0
self.verbose = 0
def getTotalNumberOfStages(self):
return len(self.stages)
def getNumberProcessedStages(self):
return len(self.processedStages)
def getNumberOfRunningClients(self):
return len(self.clients)
def getNumberOfQueuedClients(self):
return self.number_launched_and_waiting_clients
def setVerbosity(self, verbosity):
self.verbose = verbosity
def getCurrentlyRunningStages(self):
return self.currently_running_stages
def getNumberRunnableStages(self):
return self.runnable.qsize()
def pipelineFullyCompleted(self):
return (len(self.stages) == len(self.processedStages))
def addStage(self, stage):
"""adds a stage to the pipeline"""
# check if stage already exists in pipeline - if so, don't bother
# check if stage exists - stage uniqueness defined by in- and outputs
# for base stages and entire command for CmdStages
h = stage.getHash()
if self.stagehash.has_key(h):
self.skipped_stages += 1
#stage already exists - nothing to be done
else: #stage doesn't exist - add it
# add hash to the dict
self.stagehash[h] = self.counter
#self.statusArray[self.counter] = 'notstarted'
# add the stage itself to the array of stages
self.stages.append(stage)
self.nameArray.append(stage.name)
# add all outputs to the output dictionary
for o in stage.outputFiles:
self.outputhash[o] = self.counter
# add the stage's index to the graph
self.G.add_node(self.counter, label=stage.name,color=stage.colour)
# increment the counter for the next stage
self.counter += 1
def selfPickle(self):
"""Pickles pipeline in case future restart is needed"""
if (self.backupFileLocation == None):
self.setBackupFileLocation()
pickle.dump(self.G, open(str(self.backupFileLocation) + '/G.pkl', 'wb'))
pickle.dump(self.stages, open(str(self.backupFileLocation) + '/stages.pkl', 'wb'))
pickle.dump(self.nameArray, open(str(self.backupFileLocation) + '/nameArray.pkl', 'wb'))
pickle.dump(self.counter, open(str(self.backupFileLocation) + '/counter.pkl', 'wb'))
pickle.dump(self.outputhash, open(str(self.backupFileLocation) + '/outputhash.pkl', 'wb'))
pickle.dump(self.stagehash, open(str(self.backupFileLocation) + '/stagehash.pkl', 'wb'))
pickle.dump(self.processedStages, open(str(self.backupFileLocation) + '/processedStages.pkl', 'wb'))
logger.info("Pipeline pickled")
def restart(self):
"""Restarts the pipeline from previously pickled backup files."""
if (self.backupFileLocation == None):
self.setBackupFileLocation()
logger.info("Backup location not specified. Looking in the current directory.")
try:
self.G = pickle.load(open(str(self.backupFileLocation) + '/G.pkl', 'rb'))
self.stages = pickle.load(open(str(self.backupFileLocation) + '/stages.pkl', 'rb'))
self.nameArray = pickle.load(open(str(self.backupFileLocation) + '/nameArray.pkl', 'rb'))
self.counter = pickle.load(open(str(self.backupFileLocation) + '/counter.pkl', 'rb'))
self.outputhash = pickle.load(open(str(self.backupFileLocation) + '/outputhash.pkl', 'rb'))
self.stagehash = pickle.load(open(str(self.backupFileLocation) + '/stagehash.pkl', 'rb'))
self.processedStages = pickle.load(open(str(self.backupFileLocation) + '/processedStages.pkl', 'rb'))
logger.info('Successfully reimported old data from backups.')
except:
logger.exception("Backup files are not recoverable. Pipeline restart required.")
sys.exit()
done = []
for i in self.G.nodes_iter():
if self.stages[i].isFinished():
done.append(i)
else:
if i in self.processedStages:
self.processedStages.remove(i)
logger.info('Previously completed stages (of ' + str(len(self.stages)) + ' total): ' + str(len(done)))
def setBackupFileLocation(self, outputDir=None):
"""Sets location of backup files."""
if (outputDir == None):
# set backups in current directory if directory doesn't currently exist
outputDir = os.getcwd()
self.backupFileLocation = fh.createBackupDir(outputDir)
def addPipeline(self, p):
if p.skipped_stages > 0:
self.skipped_stages += p.skipped_stages
for s in p.stages:
self.addStage(s)
def printStages(self, name):
"""Prints stages to a file, stage info to stdout"""
fileForPrinting = os.path.abspath(os.curdir + "/" + str(name) + "-pipeline-stages.txt")
pf = open(fileForPrinting, "w")
for i in range(len(self.stages)):
pf.write(str(i) + " " + str(self.stages[i]) + "\n")
pf.close()
print "Total number of stages in the pipeline: ", len(self.stages)
def printNumberProcessedStages(self):
print "Number of stages already processed: ", len(self.processedStages)
def createEdges(self):
"""computes stage dependencies by examining their inputs/outputs"""
starttime = time.time()
# iterate over all nodes
for i in self.G.nodes_iter():
for ip in self.stages[i].inputFiles:
# if the input to the current stage was the output of another
# stage, add a directional dependence to the DiGraph
if self.outputhash.has_key(ip):
self.G.add_edge(self.outputhash[ip], i)
endtime = time.time()
logger.info("Create Edges time: " + str(endtime-starttime))
def computeGraphHeads(self):
"""adds stages with no incomplete predecessors to the runnable queue"""
graphHeads = []
for i in self.G.nodes_iter():
if self.stages[i].isFinished() == False:
""" either it has 0 predecessors """
if len(self.G.predecessors(i)) == 0:
self.runnable.put(i)
graphHeads.append(i)
""" or all of its predecessors are finished """
if len(self.G.predecessors(i)) != 0:
predfinished = True
for j in self.G.predecessors(i):
if self.stages[j].isFinished() == False:
predfinished = False
if predfinished == True:
self.runnable.put(i)
graphHeads.append(i)
logger.info("Graph heads: " + str(graphHeads))
def getStage(self, i):
"""given an index, return the actual pipelineStage object"""
return(self.stages[i])
# getStage<...> are currently used instead of getStage due to previous bug; could revert:
def getStageMem(self, i):
return(self.stages[i].mem)
def getStageProcs(self,i):
return(self.stages[i].procs)
def getStageCommand(self,i):
return(repr(self.stages[i]))
def getStage_is_effectively_complete(self,i):
return(self.stages[i].is_effectively_complete())
def getStageLogfile(self,i):
return(self.stages[i].logFile)
"""Given client information, issue commands to the client (along similar
lines to getRunnableStageIndex) and update server's internal view of client.
This is highly stateful, being a resource-tracking wrapper around
getRunnableStageIndex and hence a glorified Queue().get()."""
def getCommand(self, clientURIstr, clientMemFree, clientProcsFree):
flag, i = self.getRunnableStageIndex()
if flag == "run_stage":
if ((self.getStageMem(i) <= clientMemFree) and (self.getStageProcs(i) <= clientProcsFree)):
return (flag, i)
else:
logger.debug("Not enough resources for %s to run stage %d.", clientURIstr, i)
self.requeue(i)
# TODO search the queue for something this client can run?
return ("wait", None)
else:
return (flag, i)
"""Return a tuple of a command ("shutdown_normally" if all stages are finished,
"wait" if no stages are currently runnable, or "run_stage" if a stage is
available) and the next runnable stage if the flag is "run_stage", otherwise
None"""
def getRunnableStageIndex(self):
if self.allStagesComplete():
return ("shutdown_normally", None)
elif self.runnable.empty():
return ("wait", None)
else:
index = self.runnable.get()
return ("run_stage", index)
def allStagesComplete(self):
return len(self.processedStages) == len(self.stages)
def setStageStarted(self, index, clientURI):
URIstring = "(" + str(clientURI) + ")"
logger.debug("Starting Stage " + str(index) + ": " + str(self.stages[index]) +
URIstring)
self.client_running_stages[clientURI].add(index)
self.currently_running_stages.append(index)
self.stages[index].setRunning()
def checkIfRunnable(self, index):
"""stage added to runnable queue if all predecessors finished"""
canRun = True
logger.debug("Checking if stage " + str(index) + " is runnable ...")
if self.stages[index].isFinished():
canRun = False
else:
for i in self.G.predecessors(index):
s = self.getStage(i)
if not s.isFinished():
canRun = False
logger.debug("Stage " + str(index) + " Runnable: " + str(canRun))
return canRun
def setStageFinished(self, index, clientURI, save_state = True, checking_pipeline_status = False):
"""given an index, sets corresponding stage to finished and adds successors to the runnable queue"""
logger.info("Finished Stage " + str(index) + ": " + str(self.stages[index]))
# this function can be called when a pipeline is restarted, and
# we go through all stages and set the finished ones to... finished... :-)
# in that case, we can not remove the stage from the list of running
# jobs, because there is none.
if checking_pipeline_status:
self.stages[index].status = "finished"
else:
self.removeFromRunning(index, clientURI, new_status = "finished")
self.processedStages.append(index)
# disableing the pickling for now. On larger pipelines this can take
# 10-15 seconds after inital stages finished, which increases in duration
# later on.
#if save_state:
# self.selfPickle()
for i in self.G.successors(index):
if self.checkIfRunnable(i):
self.runnable.put(i)
def removeFromRunning(self, index, clientURI, new_status):
self.currently_running_stages.remove(index)
self.client_running_stages[clientURI].remove(index)
self.stages[index].status = new_status
def setStageLost(self, index, clientURI):
"""Clean up a stage lost due to unresponsive client"""
self.removeFromRunning(index, clientURI, new_status = None)
self.requeue(index)
def setStageFailed(self, index, clientURI):
# given an index, sets stage to failed, adds to processed stages array
# But... only if this stage has already been retried twice (<- for now static)
# Once in while retrying a stage makes sense, because of some odd I/O
# read write issue (NFS race condition?). At least that's what I think is
# happening, so trying this to see whether it solves the issue.
num_retries = self.stages[index].getNumberOfRetries()
if num_retries < 2:
self.removeFromRunning(index, clientURI, new_status = None)
self.stages[index].incrementNumberOfRetries()
logger.debug("RETRYING: ERROR in Stage " + str(index) + ": " + str(self.stages[index]))
logger.debug("RETRYING: adding this stage back to the runnable queue.")
logger.debug("RETRYING: Logfile for Stage " + str(self.stages[index].logFile))
self.requeue(index)
else:
self.removeFromRunning(index, clientURI, new_status = "failed")
logger.info("ERROR in Stage " + str(index) + ": " + str(self.stages[index]))
# This is something we should also directly report back to the user:
print("\nERROR in Stage %s: %s" % (str(index), str(self.stages[index])))
print("Logfile for (potentially) more information:\n%s\n" % self.stages[index].logFile)
sys.stdout.flush()
self.processedStages.append(index)
self.failed_stages += 1
for i in nx.dfs_successor(self.G, index).keys():
self.processedStages.append(i)
def requeue(self, i):
"""If stage cannot be run due to insufficient mem/procs, executor returns it to the queue"""
logger.debug("Requeueing stage %d", i)
self.runnable.put(i)
def initialize(self):
"""called once all stages have been added - computes dependencies and adds graph heads to runnable queue"""
self.runnable = Queue.Queue()
self.createEdges()
self.computeGraphHeads()
"""
Returns True unless all stages are finished, then False
This function also checks to see whether executors can be launched. The
server keeps track of how many executors are launched/registered. If the
server set to launch executors itself, it will do so if there are runnable
stages and the addition of launched/registered executors is smaller than
the max number of executors it can launch
"""
def continueLoop(self):
# exit if there are still stages that need to be run,
# but when there are no runnable nor any running stages left
if (self.runnable.empty() and
len(self.currently_running_stages) == 0
and self.failed_stages > 0):
# nothing running, nothing can be run, but we're
# also not done processing all stages
logger.info("ERROR: no more runnable stages, however not all stages have finished. Going to shut down.")
# This is something we should also directly report back to the user:
print("\nERROR: no more runnable stages, however not all stages have finished. Going to shut down.\n")
sys.stdout.flush()
return False
# TODO return False if all executors have died but not spawning new ones...
if not self.allStagesComplete():
return True
#elif len(self.clients) > 0:
# this branch is to allow clients asking for more jobs to shutdown
# gracefully when the server has no more jobs
# since it might hang the server if a client has become unresponsive
# it's currently commented. We might turn it back on once the server
# has a way to detect unresponsive clients.
# (also, before shutting down, we currently sleep for longer
# than the interval between client connections in order for
# clients to realize they need to shut down)
# TODO what if a launched_and_waiting client registers here?
# return True
else:
return False
def updateClientTimestamp(self, clientURI):
self.clientTimestamps[clientURI] = time.time() # use server clock for consistency
def mainLoop(self):
while self.continueLoop():
# check to see whether new executors need to be launched
executors_to_launch = self.numberOfExecutorsToLaunch()
if executors_to_launch > 0:
self.launchExecutorsFromServer(executors_to_launch)
# look for dead clients and requeue their jobs
# copy() is used because otherwise client_running_stages may change size
# during the iteration, throwing an exception,
# but if we haven't heard from a client in some time,
# that particular client isn't likely to be the source of the
# change, so using a slightly stale copy shouldn't miss
# anything interesting
# FIXME there are potential race conditions here with
# requeue, unregisterClient ... take locks for this whole section?
t = time.time()
for client, stages in self.client_running_stages.copy().iteritems():
if t - self.clientTimestamps[client] > pe.HEARTBEAT_INTERVAL + RESPONSE_LATENCY:
logger.warn("Executor at %s has died!", client)
print("\nWarning: there has been no contact with %s, for %f seconds. Considering the executor as dead!\n" % (client, 5 + RESPONSE_LATENCY))
if self.failed_executors > self.main_options_hash.max_failed_executors:
logger.warn("Too many executors lost to spawn new ones")
self.failed_executors += 1
for s in stages.copy():
self.setStageLost(s,client)
self.unregisterClient(client)
time.sleep(LOOP_INTERVAL)
logger.debug("Server loop shutting down")
"""
Returns an integer indicating the number of executors to launch
This function first verifies whether the server can launch executors
on its own (self.main_options_hash.nums_exec != 0). Then it checks to
see whether the executors are able to kill themselves. If they are,
it's possible that new executors need to be launched. This happens when
there are runnable stages, but the number of active executors is smaller
than the number of executors the server is able to launch
"""
def numberOfExecutorsToLaunch(self):
if self.failed_executors > self.main_options_hash.max_failed_executors:
return 0
executors_to_launch = 0
if self.main_options_hash.num_exec != 0:
# Server should launch executors itself
# This should happen regardless of whether or not executors
# can kill themselves, because the server is now responsible
# for the inital launches as well.
active_executors = self.number_launched_and_waiting_clients + len(self.clients)
max_num_executors = self.main_options_hash.num_exec
executor_launch_room = max_num_executors - active_executors
if self.runnable.qsize() > 0 and executor_launch_room > 0:
# there are runnable stages, and there is room to launch
# additional executors
executors_to_launch = min(self.runnable.qsize(), executor_launch_room)
return executors_to_launch
def launchExecutorsFromServer(self, number_to_launch):
# As the function name suggests, here we launch executors!
try:
logger.debug("Launching %i executors", number_to_launch)
processes = [Process(target=launchPipelineExecutor, args=(self.main_options_hash,self.programName,)) for i in range(number_to_launch)]
for p in processes:
p.start()
self.incrementLaunchedClients()
except:
logger.exception("Failed launching executors from the server.")
def getProcessedStageCount(self):
return len(self.processedStages)
def registerClient(self, client):
# Adds new client (represented by a URI string)
# to array of registered clients. If the server launched
# its own clients, we should remove 1 from the number of launched and waiting
# clients (It's possible though that users launch clients themselves. In that
# case we should not decrease this variable)
self.clients.append(client)
self.client_running_stages[client] = set([])
self.clientTimestamps[client] = time.time()
if self.number_launched_and_waiting_clients > 0:
self.number_launched_and_waiting_clients -= 1
if self.verbose:
print("Client registered (banzai!): %s" % client)
def unregisterClient(self, client):
# removes a client URI string from the array of registered clients. An executor
# calls this method when it decides on its own to shut down,
# and the server may call it when a client is unresponsive
if client in self.clients:
self.clients.remove(client)
del self.client_running_stages[client]
del self.clientTimestamps[client]
if self.verbose:
print("Client un-registered (seppuku!): " + client)
else:
if self.verbose:
print("Unable to un-register client: " + client)
def incrementLaunchedClients(self):
self.number_launched_and_waiting_clients += 1
def printShutdownMessage(self):
# it is possible that pipeline.continueLoop returns false, even though the
# pipeline is not completed (for instance, when stages failed, and no more stages
# can be run) so check that in order to provide the correct feedback to the user
print("\n\n######################################################")
if self.pipelineFullyCompleted():
print("All pipeline stages have been processed. \nDaemon unregistering "
+ str(len(self.clients)) + " client(s) and shutting down...\n\n")
else:
print("Not all pipeline stages have been processed,")
print("however there are no more stages that can be run.")
print("Daemon unregistering " + str(len(self.clients)) + " client(s) and shutting down...\n\n\n")
print("Objects successfully unregistered and daemon shutdown.")
if self.pipelineFullyCompleted():
print("Pipeline finished successfully!")
else:
print("Pipeline failed...")
print("######################################################\n")
sys.stdout.flush()
def launchPipelineExecutor(options, programName=None):
"""Launch pipeline executor directly from pipeline"""
pipelineExecutor = pe.pipelineExecutor(options)
if options.queue == "sge":
pipelineExecutor.submitToQueue(programName)
else:
pe.launchExecutor(pipelineExecutor)
def skip_completed_stages(pipeline):
runnable = []
while True:
flag,i = pipeline.getRunnableStageIndex()
if i == None:
break
s = pipeline.getStage(i)
if not isinstance(s, CmdStage):
runnable.append(i)
continue
if not s.is_effectively_complete():
runnable.append(i)
continue
pipeline.setStageFinished(i, "fake_client_URI", save_state = False, checking_pipeline_status = True)
logger.debug("skipping stage %i", i)
for i in runnable:
pipeline.requeue(i)
def launchServer(pipeline, options):
# first follow up on the previously reported total number of
# stages in the pipeline with how many have already finished:
pipeline.printNumberProcessedStages()
# is the server going to be verbose or not?
if options.verbose:
def verboseprint(*args):
# Print each argument separately so caller doesn't need to
# stuff everything to be printed into a single string
for arg in args:
print arg,
print
else:
verboseprint = lambda *a: None
# getIpAddress is similar to socket.gethostbyname(...)
# but uses a hack to attempt to avoid returning localhost (127....)
network_address = Pyro4.socketutil.getIpAddress(socket.gethostname(),
workaround127 = True, ipVersion = 4)
daemon = Pyro4.core.Daemon(host=network_address)
pipelineURI = daemon.register(pipeline)
if options.use_ns:
# in the future we might want to launch a nameserver here
# instead of relying on a separate executable running
ns = Pyro4.locateNS()
ns.register("pipeline", pipelineURI)
else:
# If not using Pyro NameServer, must write uri to file for reading by client.
uf = open(options.urifile, 'w')
uf.write(pipelineURI.asString())
uf.close()
# set the verbosity of the pipeline before running it
pipeline.setVerbosity(options.verbose)
verboseprint("Daemon is running at: %s" % daemon.locationStr)
verboseprint("The pipeline's uri is: %s" % str(pipelineURI))
try:
t = threading.Thread(target=daemon.requestLoop)
t.daemon = True
t.start()
pipeline.mainLoop()
except KeyboardInterrupt:
logger.exception("Caught keyboard interrupt, killing executors and shutting down server.")
print("\nKeyboardInterrupt caught: cleaning up, shutting down executors.\n")
sys.stdout.flush()
except:
logger.exception("Failed running server in daemon.requestLoop. Server shutting down.")
finally:
# allow time for all clients to contact the server and be told to shut down
# (we could instead add a way for the server to notify its registered clients):
# also, currently this doesn't happen until all jobs are finished (see getCommand),
# but if we instead decided to shut down once the queue is empty, then
# various notifyStageTerminated calls could fail
time.sleep(pe.WAIT_TIMEOUT + 1)
daemon.shutdown()
t.join()
pipeline.printShutdownMessage()
def flatten_pipeline(p):
"""return a list of tuples for each stage.
Each item in the list is (id, command, [dependencies])
where dependencies is a list of stages depend on this stage to be complete before they run.
"""
def post(x, y):
if y[0] in x[2]:
return 1
elif x[0] in y[2]:
return -1
else:
return 0
return sorted([(i, str(p.stages[i]), p.G.predecessors(i)) for i in p.G.nodes_iter()],cmp=post)
def sge_script(p):
qsub = "sge_batch_hold -l vf=2G"
flat = flatten_pipeline(p)
subs = []
alter = []
unhold = []
f = lambda x: "MAGeT_%i" % x
script = []
skipped_stages = 0
for i in flat:
job_id,cmd,depends = i
stage = p.getStage(job_id)
if isinstance(stage, CmdStage):
if stage.is_effectively_complete():
skipped_stages += 1
continue
name = f(job_id)
deps = ",".join(map(f,depends))
job_cmd="%s -J %s %s" % (qsub, name, cmd)
script.append(job_cmd)
if depends:
depend_cmd="qalter -hold_jid %s %s" % (deps,name)
script.append(depend_cmd)
unhold_cmd = "qalter -h U %s" % name
script.append(unhold_cmd)
print skipped_stages, "stages skipped (outputs exist).", len(subs), "stages to run."
return script #subs + alter + unhold
def pipelineDaemon(pipeline, options=None, programName=None):
"""Launches Pyro server and (if specified by options) pipeline executors"""
#check for valid pipeline
if pipeline.runnable.empty():
print "Pipeline has no runnable stages. Exiting..."
sys.exit()
if options.queue == "sge_script":
script = open("sge_script", "w")
script.write("\n".join(sge_script(pipeline)))
script.close()
print "SGE job submission script for this pipeline written to sge_script"
sys.exit()
if options.urifile==None:
options.urifile = os.path.abspath(os.curdir + "/" + "uri")
logger.debug("Examining filesystem to determine skippable stages...")
skip_completed_stages(pipeline)
logger.debug("Prior to starting server, total stages %i. Number processed: %i.",
len(pipeline.stages), len(pipeline.processedStages))
logger.debug("Number of stages in runnable index (size of queue): %i",
pipeline.runnable.qsize())
# provide the pipeline with the main option hash. The server when started
# needs access to information in there in order to (re)launch executors
# during run time
pipeline.main_options_hash = options
pipeline.programName = programName
logger.debug("Starting server...")
process = Process(target=launchServer, args=(pipeline,options))
process.start()
try:
process.join()
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt; exiting\n"
sys.exit(0)
| StarcoderdataPython |
3386179 | """
Custom HTTP responses in JSON and standardized exceptions.
"""
from settings import *
from django.shortcuts import render
from django.http import HttpResponse
import json
class JsonResponse(HttpResponse):
"""
A JSON response converts a dictionary @d into JavaScript
Object Notation (JSON) and uses the "application/json"
content type.
"""
def __init__(self, d):
super(JsonResponse, self).__init__(json.dumps(d),
content_type="application/json")
# Generates HTTP 500: Internal Server Error
#super(JsonResponse, self).__init__("This is not the JSON you're looking for"), content_type="application/text")
# Generates JSON parse errors in the client.
#super(JsonResponse, self).__init__("This is not the JSON you're looking for")
class ErrorResponse(JsonResponse):
"""
Generates a JsonResponse with the specified @error message.
"""
def __init__(self, message, code=ERROR_CODE_GENERAL):
d = { "errorMessage": message,
"errorCode": code,
}
super(ErrorResponse, self).__init__(d)
class ServiceException(Exception):
"""
Indicates that a critical error has occurred and that the request can no
longer be processed. Includes an appropriate ErrorReponse to send to the
client.
"""
def __init__(self, message, code=ERROR_CODE_GENERAL):
self.errorResponse = ErrorResponse(message, code) | StarcoderdataPython |
1610981 | <filename>app/apps/reporting/migrations/0004_taskreport_gpu_cost.py
# Generated by Django 2.2.23 on 2021-09-20 14:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reporting', '0003_taskreport_cpu_cost'),
]
operations = [
migrations.AddField(
model_name='taskreport',
name='gpu_cost',
field=models.FloatField(blank=True, null=True),
),
]
| StarcoderdataPython |
1727738 | # This file is used to initialize the Supervisors package, it can be left blank unless otherwise needed.
# The supervisors package should include pages that supervisors can access. | StarcoderdataPython |
70037 | from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import torch
import torch.nn.functional as F
from tests import utils
class SimpleConvTranspose2dModule(torch.nn.Module):
def __init__(self, stride=1, padding=0, output_padding=0, dilation=1, groups=1):
super(SimpleConvTranspose2dModule, self).__init__()
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.groups = groups
self.dilation = dilation
def forward(self, inputs, filters, bias=None):
convTranspose = F.conv_transpose2d(
inputs,
filters,
bias=bias,
stride=self.stride,
padding=self.padding,
output_padding=self.output_padding,
groups=self.groups,
dilation=self.dilation,
)
return F.relu(convTranspose)
class TestConvTranpose2d(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleConvTranspose2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(4, 8, 3, 3),
),
lambda: (
"with_bias",
SimpleConvTranspose2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(4, 8, 3, 3),
torch.randn(4),
),
]
)
def test_convTranpose2d(self, _, module, inputs, filters, bias=None):
"""Basic test of the PyTorch conv3d Node on Glow."""
utils.compare_tracing_methods(
module, inputs, filters, fusible_ops={"aten::_convolution"}
)
| StarcoderdataPython |
8012625 | from .. import bp_location
from flask import g
from flask_login import current_user, login_required
from ..forms.location import LocationForm
from database.models import Location, Alert, Access, Role_dict
from web_app.modules._base_views import BaseUserItems, BasePanel, BaseLister, BaseAdder, BaseDeleter, BaseEditer
from web_app.helpers.decorators import has_role
from web_app.helpers import save_plan
from ._base import BaseLocPanel
class UserItems(BaseUserItems):
def get_items(self):
return current_user.locations
class Lister(BaseLister):
def get_items(self):
return current_user.locations
class Panel(BaseLocPanel):
def get_item(self, id):
return Location.query.get(id)
def get_items(self):
return current_user.locations
def get_alerts(self, id, offset=None, limit=10):
return Alert.get_alerts(id=id, for_name="location", offset=offset, limit=limit)
class LocationSpecialAttrsMixin(object):
def set_special_attributes(self, form, record, **kwargs):
record.company_id = current_user.company_id
def preprocess_form(self, form):
filename = save_plan(form.plan.data)
if filename:
form.plan.data = filename
else:
del form.plan
class Adder(LocationSpecialAttrsMixin, BaseAdder):
def post_transaction(self, form, record, **kwargs):
# add current user to access list for location
access = Access()
access.location_id = record.id
access.user_id = current_user.id
g.session.add(access)
g.session.commit()
def get_template_name(self):
return 'entity/adder.pug'
class Editer(LocationSpecialAttrsMixin, BaseEditer):
pass
class Deleter(BaseDeleter):
def get_items(self):
return current_user.locations
class LocationVars(object):
title = "Location"
column_titles= ["ID", "Entity Name", "Location", "# Cameras"]
column_ids= ["id","name","address", "cameras_count"]
form= LocationForm
model= Location
kind = bp_location.name
role_decorator = has_role(Role_dict.ADMIN)
decorators = [login_required ,role_decorator]
for view in [Lister, Adder, Editer, Deleter]:
view.add_url_rule(bp_location, LocationVars, decorators)
for view in [UserItems, Panel]:
view.add_url_rule(bp_location, LocationVars, [login_required])
| StarcoderdataPython |
229163 | <gh_stars>0
import time
import board
import adafruit_si7021
import sys
sensor = adafruit_si7021.SI7021(board.I2C())
print("\nTemperature: %0.1f C" % sensor.temperature)
print("Humidity: %0.1f %%" % sensor.relative_humidity)
sys.exit(0) | StarcoderdataPython |
4847937 | <filename>module2-sql-for-analysis/assignment/main.py
"""Script entry point."""
from titanic import init_script
if __name__ == "__main__":
init_script()
| StarcoderdataPython |
8091260 |
# _return_guides_in_regions.py
__module_name__ = "_return_guides_in_regions.py"
__author__ = ", ".join(["<NAME>"])
__email__ = ", ".join(["<EMAIL>",])
# package imports #
# --------------- #
import pandas as pd
import regex
import seq_toolkit
def _id_PAMs_in_sequence(sequence, PAM, motif_key="pam", verbose=True):
PAM_df = seq_toolkit.query_motif(sequence, PAM, motif_key, verbose)
return PAM_df
def _build_region_interval_idx(df, region_extension):
RegionIntervals = []
for i in range(len(df)):
start = int(df.filter(regex="tart").iloc[i].values[0]) - region_extension
end = int(df.filter(regex="nd").iloc[i].values[0]) + region_extension
RegionIntervals.append(pd.Interval(left=start, right=end, closed="right"))
return pd.IntervalIndex(RegionIntervals)
def _return_guides_in_regions(
sequence,
df,
region_column=False,
region_specification=False,
PAM="NGG",
global_start=0,
region_extension=0,
):
""""""
if region_column:
region_df = df.loc[df[region_column] == region_specification].reset_index(drop=True)
else:
region_df = df
pam_df = _id_PAMs_in_sequence(sequence, PAM, motif_key="pam", verbose=True)
region_df["range"] = ranges = _build_region_interval_idx(
region_df, region_extension
)
pam_df['pam.start'] += global_start
pam_df['pam.end'] += global_start
pam_df["range"] = pd.cut(x=pam_df["pam.start"].values, bins=ranges)
target_region_df = pd.merge(region_df, pam_df, on="range").drop("range", axis=1)
return target_region_df | StarcoderdataPython |
5179170 | """
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
import torch.nn.functional as F
from torch import nn
class Market1501TripletModel(nn.Module):
MODEL_NAME = "Self-made cnn"
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size,
max_pool_kernel_size,
dropout_rate,
filters=64,
):
super(Market1501TripletModel, self).__init__()
blocks = []
start_block = self.__conv_block(
in_channels=input_shape[1],
out_channels=filters,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
)
for _ in range(conv_blocks - 1):
blocks.append(self.__conv_block(
in_channels=filters,
out_channels=filters,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
))
self.backbone = nn.Sequential(
start_block,
*blocks,
nn.Flatten(),
)
backbone_output_features = self.backbone(torch.rand(input_shape)).shape[-1]
self.fully_connected = nn.Sequential(
nn.Linear(in_features=backbone_output_features, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=embedding_dim)
)
self.model = nn.Sequential(self.backbone, self.fully_connected)
@staticmethod
def __conv_block(in_channels, out_channels, conv_kernel_size, max_pool_kernel_size, dropout_rate):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel_size,
padding='same',
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=max_pool_kernel_size),
nn.Dropout(dropout_rate),
)
def forward(self, anchor, true_positive, false_positive):
embedding_anchor = self.model(anchor)
embedding_true = self.model(true_positive)
embedding_false = self.model(false_positive)
anchor_positive_dist = F.pairwise_distance(embedding_anchor, embedding_true)
anchor_negative_dist = F.pairwise_distance(embedding_anchor, embedding_false)
return anchor_positive_dist, anchor_negative_dist
class Market1501TripletMiniVGG(nn.Module):
INITIAL_FILTERS = 32
MODEL_NAME = "Mini-VGG"
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGG, self).__init__()
self.model_name = "Mini-VGG"
start_block = self.__conv_block(
in_channels=input_shape[1],
out_channels=Market1501TripletMiniVGG.INITIAL_FILTERS,
conv_kernel_size=conv_kernel_size,
)
self.backbone = nn.Sequential(start_block)
assert conv_blocks % 2 == 0, "Conv blocks must be an even number in MiniVGGNet"
for idx in range(2, conv_blocks + 1):
current_filters_multiply = int(round((idx / 2) + 0.1, 0))
current_filters = Market1501TripletMiniVGG.INITIAL_FILTERS * current_filters_multiply
_, last_output_channels, _, _ = self.__get_last_shape(input_shape, self.backbone)
self.backbone.add_module(f'Block:{idx}', self.__conv_block(
in_channels=last_output_channels,
out_channels=current_filters,
conv_kernel_size=conv_kernel_size,
))
if idx % 2 == 0:
self.backbone.add_module(f'Pool:{current_filters_multiply}', self.__pool_block(
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
))
self.backbone.add_module('Flatten', nn.Flatten())
backbone_output_features = self.__get_last_shape(input_shape, self.backbone)[-1]
self.fully_connected = nn.Sequential(
nn.Linear(in_features=backbone_output_features, out_features=512),
nn.ReLU(),
nn.BatchNorm1d(num_features=512),
nn.Linear(in_features=512, out_features=128),
nn.ReLU(),
nn.BatchNorm1d(num_features=128),
nn.Linear(in_features=128, out_features=embedding_dim)
)
self.model = nn.Sequential(self.backbone, self.fully_connected)
@staticmethod
def __get_last_shape(input_shape, block):
return block(torch.rand(input_shape)).shape
@staticmethod
def __pool_block(max_pool_kernel_size, dropout_rate):
return nn.Sequential(
nn.MaxPool2d(kernel_size=max_pool_kernel_size),
nn.Dropout(p=dropout_rate)
)
@staticmethod
def __conv_block(in_channels, out_channels, conv_kernel_size):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel_size,
padding='same',
bias=True
),
nn.ReLU(),
nn.BatchNorm2d(num_features=out_channels)
)
def forward(self, anchor, positive, negative):
anchor_emd = self.model(anchor)
positive_emd = self.model(positive)
negative_emd = self.model(negative)
anchor_positive_dist = F.pairwise_distance(anchor_emd, positive_emd)
anchor_negative_dist = F.pairwise_distance(anchor_emd, negative_emd)
return anchor_positive_dist, anchor_negative_dist
class Market1501TripletModelEval(Market1501TripletModel):
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletModelEval, self).__init__(
input_shape=input_shape,
embedding_dim=embedding_dim,
conv_blocks=conv_blocks,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate,
filters=filters
)
def forward(self, image):
return self.model(image)
class Market1501TripletMiniVGGEval(Market1501TripletMiniVGG):
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGGEval, self).__init__(
input_shape=input_shape,
embedding_dim=embedding_dim,
conv_blocks=conv_blocks,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate,
filters=filters
)
def forward(self, image):
return self.model(image)
class Market1501TripletMiniVGGReluBeforeBN(nn.Module):
INITIAL_FILTERS = 32
MODEL_NAME = "Mini-VGG"
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGGReluBeforeBN, self).__init__()
self.model_name = "Mini-VGG"
start_block = self.__conv_block(
in_channels=input_shape[1],
out_channels=Market1501TripletMiniVGG.INITIAL_FILTERS,
conv_kernel_size=conv_kernel_size,
)
self.backbone = nn.Sequential(start_block)
assert conv_blocks % 2 == 0, "Conv blocks must be an even number in MiniVGGNet"
for idx in range(2, conv_blocks + 1):
current_filters_multiply = int(round((idx / 2) + 0.1, 0))
current_filters = Market1501TripletMiniVGG.INITIAL_FILTERS * current_filters_multiply
_, last_output_channels, _, _ = self.__get_last_shape(input_shape, self.backbone)
self.backbone.add_module(f'Block:{idx}', self.__conv_block(
in_channels=last_output_channels,
out_channels=current_filters,
conv_kernel_size=conv_kernel_size,
))
if idx % 2 == 0:
self.backbone.add_module(f'Pool:{current_filters_multiply}', self.__pool_block(
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
))
self.backbone.add_module('Flatten', nn.Flatten())
backbone_output_features = self.__get_last_shape(input_shape, self.backbone)[-1]
self.fully_connected = nn.Sequential(
nn.Linear(in_features=backbone_output_features, out_features=512),
nn.BatchNorm1d(num_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=128),
nn.BatchNorm1d(num_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=embedding_dim)
)
self.model = nn.Sequential(self.backbone, self.fully_connected)
@staticmethod
def __get_last_shape(input_shape, block):
return block(torch.rand(input_shape)).shape
@staticmethod
def __pool_block(max_pool_kernel_size, dropout_rate):
return nn.Sequential(
nn.MaxPool2d(kernel_size=max_pool_kernel_size),
nn.Dropout(p=dropout_rate)
)
@staticmethod
def __conv_block(in_channels, out_channels, conv_kernel_size):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel_size,
padding='same',
bias=True
),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU()
)
def forward(self, anchor, positive, negative):
anchor_emd = self.model(anchor)
positive_emd = self.model(positive)
negative_emd = self.model(negative)
anchor_positive_dist = F.pairwise_distance(anchor_emd, positive_emd)
anchor_negative_dist = F.pairwise_distance(anchor_emd, negative_emd)
return anchor_positive_dist, anchor_negative_dist
class Market1501TripletMiniVGGReluBeforeBnEval(Market1501TripletMiniVGGReluBeforeBN):
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGGReluBeforeBnEval, self).__init__(
input_shape=input_shape,
embedding_dim=embedding_dim,
conv_blocks=conv_blocks,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate,
filters=filters
)
def forward(self, image):
return self.model(image)
if __name__ == '__main__':
from torchinfo import summary
input_shape = (1, 3, 128, 64)
model = Market1501TripletMiniVGG(
input_shape,
embedding_dim=32,
conv_blocks=2,
)
summary(model, (input_shape, input_shape, input_shape))
| StarcoderdataPython |
355363 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "15/09/2016"
import sys
from silx.gui import qt as Qt
print('Using Qt {0}'.format(Qt.qVersion()))
from .XsocsGui import XsocsGui
from .process.MergeWidget import MergeWidget
from .process.QSpaceWidget import QSpaceWidget
def merge_window(*args, **kwargs):
app = Qt.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(True)
mw = MergeWidget(*args, **kwargs)
mw.show()
app.exec_()
def conversion_window(*args, **kwargs):
app = Qt.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(True)
mw = QSpaceWidget(*args, **kwargs)
mw.show()
app.exec_()
def xsocs_main(*args, **kwargs):
app = Qt.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(True)
mw = XsocsGui(*args, **kwargs)
mw.show()
app.exec_()
| StarcoderdataPython |
173176 | from torch import nn
import torch
from model.Swin import SwinTransformer3D
import copy
class swin_encoder(nn.Module):
def __init__(self , device , drop , checkpoint_encoder):
super().__init__()
checkpoint = checkpoint_encoder
self.device=device
self.label= 'demo/label_map_k400.txt'
self.model=SwinTransformer3D(
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=(8, 7, 7),
patch_size=(2, 4, 4),
drop_path_rate=0.1,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=drop,
attn_drop_rate=drop,
patch_norm=True)
checkpoint = torch.load(checkpoint, map_location=device)
state_dict2 = copy.deepcopy(checkpoint['state_dict'])
state_dict = dict()
for key in checkpoint['state_dict']:
if 'backbone.' in key:
new_key = key.replace('backbone.', '')
state_dict[new_key] = state_dict2.pop(key)
self.model.load_state_dict(state_dict)
self.model = self.model.to(device)
self.max_testing_views = None
def forward(self, imgs):
batches = imgs.shape[0]
imgs = imgs.reshape((-1,) + imgs.shape[2:])
feat = self.model.forward(imgs)
# perform spatio-temporal pooling
avg_pool = nn.AdaptiveAvgPool2d((1, 1))
feat = avg_pool(feat)
# squeeze dimensions
feat = feat.view(batches, feat.shape[1], feat.shape[2])
feat = feat.permute(0, 2, 1)
return feat
| StarcoderdataPython |
1900085 | <reponame>KuangChih/Design-for-IoT-Middleware<filename>Lab2/Lab2-2.py
void setup() {
pinMode(2, INPUT);
pinMode(13, OUTPUT); //pin 13 for the Led on board
}
void loop() {
int touchPadState = digitalRead(2);
if (touchPadState == HIGH) { //touched
digitalWrite(13, HIGH);
} else {
digitalWrite(13, LOW);
}
}
| StarcoderdataPython |
236173 | <filename>Problem_100_199/euler_125.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 125
The palindromic number 595 is interesting because it can be written as the sum of consecutive squares: 62 + 72 + 82 + 92 + 102 + 112 + 122.
There are exactly eleven palindromes below one-thousand that can be written as consecutive square sums, and the sum of these palindromes is 4164.
Note that 1 = 02 + 12 has not been included as this problem is concerned with the squares of positive integers.
Find the sum of all the numbers less than 108 that are both palindromic and can be written as the sum of consecutive squares.
'''
from util import is_palindromic
def p125():
# Answer: 2906969179, 0.29s
result = set()
double = [i ** 2 for i in range(1, 10001)]
ld = len(double)
for i in range(0, ld):
s = double[i]
for j in range(i + 1, ld):
s += double[j]
if s > 100000000:
break
if is_palindromic(str(s)):
result.add(s)
print(sum(result))
return
p125()
| StarcoderdataPython |
36452 | import torch
from torch import nn
import torch.optim as optim
import torch.multiprocessing as mp
import numpy as np
import time
class MPManager(object):
def __init__(self, num_workers):
"""
manage a single-instruction-multiple-data (SIMD) scheme
:param int num_workers: The number of processors to run.
"""
mp.set_start_method('spawn')
# Counting the current batch size
self.num_workers = num_workers
# A pool of processes
self.pool = mp.Pool(processes=num_workers)
def run(self, function, arguments):
"""
:param function : the instruction
:param arguments : list of things processors loop over
can be anything the function works on, e.g. model + data
"""
output_and_grads = self.pool.map(function, arguments)
return output_and_grads
| StarcoderdataPython |
12851714 | from collections import deque
from itertools import islice
from .base import RollingObject
class Apply(RollingObject):
"""
Iterator object that applies a function to
a rolling window over a Python iterable.
Parameters
----------
iterable : any iterable object
window_size : integer, the size of the rolling
window moving over the iterable
operation : callable, default sum
a function, or class implementing a __call__
method, to be applied to each window
Complexity
----------
Update time: operation dependent
Memory usage: O(k)
where k is the size of the rolling window
Examples
--------
Rolling sum using builtin sum():
>>> import rolling
>>> seq = (8, 1, 1, 3, 6, 5)
>>> r_sum = rolling.Apply(seq, 3, operation=sum)
>>> next(r_sum)
10
>>> next(r_sum)
5
Reverse each window:
>>> r_rev = rolling.Apply(seq, 4, operation=lambda x: list(reversed(x)))
>>> list(r_rev)
[[3, 1, 1, 8],
[6, 3, 1, 1],
[5, 6, 3, 1]]
"""
def _init_fixed(self, iterable, window_size, operation=sum, **kwargs):
head = islice(self._iterator, window_size - 1)
self._buffer = deque(head, maxlen=window_size)
self._operation = operation
def _init_variable(self, iterable, window_size, operation=sum, **kwargs):
self._buffer = deque(maxlen=window_size)
self._operation = operation
@property
def current_value(self):
return self._operation(self._buffer)
def _add_new(self, new):
self._buffer.append(new)
def _remove_old(self):
self._buffer.popleft()
def _update_window(self, new):
self._buffer.append(new)
@property
def _obs(self):
return len(self._buffer)
def __repr__(self):
return "Rolling(operation='{}', window_size={}, window_type='{}')".format(
self._operation.__name__, self.window_size, self.window_type
)
| StarcoderdataPython |
3545783 | #!/usr/bin/env python3
# Copyright © 2019-2020 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Generates release notes for a given version of mesa."""
import asyncio
import datetime
import os
import pathlib
import re
import subprocess
import sys
import textwrap
import typing
import urllib.parse
import aiohttp
from mako.template import Template
from mako import exceptions
CURRENT_GL_VERSION = '4.6'
CURRENT_VK_VERSION = '1.2'
TEMPLATE = Template(textwrap.dedent("""\
${header}
${header_underline}
%if not bugfix:
Mesa ${this_version} is a new development release. People who are concerned
with stability and reliability should stick with a previous release or
wait for Mesa ${this_version[:-1]}1.
%else:
Mesa ${this_version} is a bug fix release which fixes bugs found since the ${previous_version} release.
%endif
Mesa ${this_version} implements the OpenGL ${gl_version} API, but the version reported by
glGetString(GL_VERSION) or glGetIntegerv(GL_MAJOR_VERSION) /
glGetIntegerv(GL_MINOR_VERSION) depends on the particular driver being used.
Some drivers don't support all the features required in OpenGL ${gl_version}. OpenGL
${gl_version} is **only** available if requested at context creation.
Compatibility contexts may report a lower version depending on each driver.
Mesa ${this_version} implements the Vulkan ${vk_version} API, but the version reported by
the apiVersion property of the VkPhysicalDeviceProperties struct
depends on the particular driver being used.
SHA256 checksum
---------------
::
TBD.
New features
------------
%for f in features:
- ${rst_escape(f)}
%endfor
Bug fixes
---------
%for b in bugs:
- ${rst_escape(b)}
%endfor
Changes
-------
%for c, author_line in changes:
%if author_line:
${rst_escape(c)}
%else:
- ${rst_escape(c)}
%endif
%endfor
"""))
def rst_escape(unsafe_str: str) -> str:
"Escape rST special chars when they follow or preceed a whitespace"
special = re.escape(r'`<>*_#[]|')
unsafe_str = re.sub(r'(^|\s)([' + special + r'])',
r'\1\\\2',
unsafe_str)
unsafe_str = re.sub(r'([' + special + r'])(\s|$)',
r'\\\1\2',
unsafe_str)
return unsafe_str
async def gather_commits(version: str) -> str:
p = await asyncio.create_subprocess_exec(
'git', 'log', '--oneline', f'mesa-{version}..', '--grep', r'Closes: \(https\|#\).*',
stdout=asyncio.subprocess.PIPE)
out, _ = await p.communicate()
assert p.returncode == 0, f"git log didn't work: {version}"
return out.decode().strip()
async def gather_bugs(version: str) -> typing.List[str]:
commits = await gather_commits(version)
issues: typing.List[str] = []
for commit in commits.split('\n'):
sha, message = commit.split(maxsplit=1)
p = await asyncio.create_subprocess_exec(
'git', 'log', '--max-count', '1', r'--format=%b', sha,
stdout=asyncio.subprocess.PIPE)
_out, _ = await p.communicate()
out = _out.decode().split('\n')
for line in reversed(out):
if line.startswith('Closes:'):
bug = line.lstrip('Closes:').strip()
break
else:
raise Exception('No closes found?')
if bug.startswith('h'):
# This means we have a bug in the form "Closes: https://..."
issues.append(os.path.basename(urllib.parse.urlparse(bug).path))
else:
issues.append(bug.lstrip('#'))
loop = asyncio.get_event_loop()
async with aiohttp.ClientSession(loop=loop) as session:
results = await asyncio.gather(*[get_bug(session, i) for i in issues])
typing.cast(typing.Tuple[str, ...], results)
bugs = list(results)
if not bugs:
bugs = ['None']
return bugs
async def get_bug(session: aiohttp.ClientSession, bug_id: str) -> str:
"""Query gitlab to get the name of the issue that was closed."""
# Mesa's gitlab id is 176,
url = 'https://gitlab.freedesktop.org/api/v4/projects/176/issues'
params = {'iids[]': bug_id}
async with session.get(url, params=params) as response:
content = await response.json()
return content[0]['title']
async def get_shortlog(version: str) -> str:
"""Call git shortlog."""
p = await asyncio.create_subprocess_exec('git', 'shortlog', f'mesa-{version}..',
stdout=asyncio.subprocess.PIPE)
out, _ = await p.communicate()
assert p.returncode == 0, 'error getting shortlog'
assert out is not None, 'just for mypy'
return out.decode()
def walk_shortlog(log: str) -> typing.Generator[typing.Tuple[str, bool], None, None]:
for l in log.split('\n'):
if l.startswith(' '): # this means we have a patch description
yield l.lstrip(), False
elif l.strip():
yield l, True
def calculate_next_version(version: str, is_point: bool) -> str:
"""Calculate the version about to be released."""
if '-' in version:
version = version.split('-')[0]
if is_point:
base = version.split('.')
base[2] = str(int(base[2]) + 1)
return '.'.join(base)
return version
def calculate_previous_version(version: str, is_point: bool) -> str:
"""Calculate the previous version to compare to.
In the case of -rc to final that verison is the previous .0 release,
(19.3.0 in the case of 20.0.0, for example). for point releases that is
the last point release. This value will be the same as the input value
for a point release, but different for a major release.
"""
if '-' in version:
version = version.split('-')[0]
if is_point:
return version
base = version.split('.')
if base[1] == '0':
base[0] = str(int(base[0]) - 1)
base[1] = '3'
else:
base[1] = str(int(base[1]) - 1)
return '.'.join(base)
def get_features(is_point_release: bool) -> typing.Generator[str, None, None]:
p = pathlib.Path(__file__).parent.parent / 'docs' / 'relnotes' / 'new_features.txt'
if p.exists():
if is_point_release:
print("WARNING: new features being introduced in a point release", file=sys.stderr)
with p.open('rt') as f:
for line in f:
yield line
else:
yield "None"
p.unlink()
else:
yield "None"
async def main() -> None:
v = pathlib.Path(__file__).parent.parent / 'VERSION'
with v.open('rt') as f:
raw_version = f.read().strip()
is_point_release = '-rc' not in raw_version
assert '-devel' not in raw_version, 'Do not run this script on -devel'
version = raw_version.split('-')[0]
previous_version = calculate_previous_version(version, is_point_release)
this_version = calculate_next_version(version, is_point_release)
today = datetime.date.today()
header = f'Mesa {this_version} Release Notes / {today}'
header_underline = '=' * len(header)
shortlog, bugs = await asyncio.gather(
get_shortlog(previous_version),
gather_bugs(previous_version),
)
final = pathlib.Path(__file__).parent.parent / 'docs' / 'relnotes' / f'{this_version}.rst'
with final.open('wt') as f:
try:
f.write(TEMPLATE.render(
bugfix=is_point_release,
bugs=bugs,
changes=walk_shortlog(shortlog),
features=get_features(is_point_release),
gl_version=CURRENT_GL_VERSION,
this_version=this_version,
header=header,
header_underline=header_underline,
previous_version=previous_version,
vk_version=CURRENT_VK_VERSION,
rst_escape=rst_escape,
))
except:
print(exceptions.text_error_template().render())
subprocess.run(['git', 'add', final])
subprocess.run(['git', 'commit', '-m',
f'docs: add release notes for {this_version}'])
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| StarcoderdataPython |
11359010 | import unittest
import sys
# If PATH is properly configured by your IDE you don't
# need this weird fix to dynamically add solutions to PATH
sys.path.append("../solutions")
from proth_theorem import proth
# from solutions.proth_theorem import proth
class ProthTest(unittest.TestCase):
def test_3_proth_prime(self):
self.assertEqual(proth(3), True)
def test_5_proth_prime(self):
self.assertEqual(proth(5), True)
def test_9_proth_prime(self):
self.assertEqual(proth(9), False)
def test_13_proth_prime(self):
self.assertEqual(proth(13), True)
def test_17_proth_prime(self):
self.assertEqual(proth(17), True)
def test_25_proth_prime(self):
self.assertEqual(proth(25), False)
def test_33_proth_prime(self):
self.assertEqual(proth(33), False)
def test_41_proth_prime(self):
self.assertEqual(proth(41), True)
def test_49_proth_prime(self):
self.assertEqual(proth(49), False)
def test_57_proth_prime(self):
self.assertEqual(proth(57), False)
def test_65_proth_prime(self):
self.assertEqual(proth(65), False)
def test_81_proth_prime(self):
self.assertEqual(proth(81), False)
def test_97_proth_prime(self):
self.assertEqual(proth(97), True)
def test_113_proth_prime(self):
self.assertEqual(proth(113), True)
def test_129_proth_prime(self):
self.assertEqual(proth(129), False)
def test_145_proth_prime(self):
self.assertEqual(proth(145), False)
def test_161_proth_prime(self):
self.assertEqual(proth(161), False)
def test_177_proth_prime(self):
self.assertEqual(proth(177), False)
def test_193_proth_prime(self):
self.assertEqual(proth(193), True)
def test_209_proth_prime(self):
self.assertEqual(proth(209), False)
def test_225_proth_prime(self):
self.assertEqual(proth(225), False)
def test_241_proth_prime(self):
self.assertEqual(proth(241), True)
def test_257_proth_prime(self):
self.assertEqual(proth(257), True)
def test_289_proth_prime(self):
self.assertEqual(proth(289), False)
def test_321_proth_prime(self):
self.assertEqual(proth(321), False)
def test_353_proth_prime(self):
self.assertEqual(proth(353), True)
def test_385_proth_prime(self):
self.assertEqual(proth(385), False)
def test_417_proth_prime(self):
self.assertEqual(proth(417), False)
def test_449_proth_prime(self):
self.assertEqual(proth(449), True)
def test_481_proth_prime(self):
self.assertEqual(proth(481), False)
def test_513_proth_prime(self):
self.assertEqual(proth(513), False)
def test_545_proth_prime(self):
self.assertEqual(proth(545), False)
def test_577_proth_prime(self):
self.assertEqual(proth(577), True)
def test_609_proth_prime(self):
self.assertEqual(proth(609), False)
def test_641_proth_prime(self):
self.assertEqual(proth(641), True)
def test_673_proth_prime(self):
self.assertEqual(proth(673), True)
def test_705_proth_prime(self):
self.assertEqual(proth(705), False)
def test_737_proth_prime(self):
self.assertEqual(proth(737), False)
def test_769_proth_prime(self):
self.assertEqual(proth(769), True)
def test_801_proth_prime(self):
self.assertEqual(proth(801), False)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9611696 | <filename>grammartest.py
######importe######
import nltk
from nltk import word_tokenize
import nltk.corpus.reader.tagged as tagged
import nltk.tag.hmm as hmm
from grammardefs import *
corpus_tiger= nltk.corpus.ConllCorpusReader('.', 'tiger9', ['ignore', 'words', 'ignore', 'ignore', 'pos'],encoding='utf-8')
trainer = hmm.HiddenMarkovModelTrainer()
model = trainer.train(corpus_tiger.tagged_sents())
######code#######
eingabe= input(">>> ")
token= word_tokenize(eingabe)
pos= model.tag(token)
for word in pos: # jedes wort der eingabe wird getagged
if "VAFIN" in word:
get_def VAFIN(word[1]) # wenn wort = verb, wird die dazugehörige funktion aufgerufen
return verb # die funktion gibt dem verb eine zuweisung, z.B. verb=[S]
#(wenn wir personalpronomen rauslassen, brauchen wir nur Singular/Plural)
if "NN" in word:
get_def NN # hier zuweisung wie z.b. nomen=[M, S]
return nomen
if "PDS" in word:
get_def PDS # hier zuweisung wie z.b. artikel =[M, S]
return artikel
.
.
.
if artikel[0] = nomen[0] and artikel[1]= nomen[1] and nomen[1] = verb [0]: # wenn alle merkmale zueinanderpassen, dann ist der Satz korrrekt
print ("Der Satz ist korrekt!")
else:
print ("Der Satz ist inkorrekt!")
# bei artikel und nomen ist [0]=genus und [1]=numerus
# verb ist [0]=numerus
| StarcoderdataPython |
6689718 | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
class UserCreateForm(UserCreationForm):
class Meta:
fields = ('first_name', 'last_name', 'username', 'email', '<PASSWORD>', '<PASSWORD>')
model = get_user_model()
def __init__(self, *args, **kwargs):
# When defining the constructor we need to do super to initialize the constructor
# of the parent class as well
super().__init__(*args,**kwargs)
# In the signup form, change the title of the user name and email
self.fields['username'].label = 'Display Name'
self.fields['email'].label = 'Email Address'
| StarcoderdataPython |
210177 | <reponame>PeakerBee/aioms
import json
from kazoo.client import KazooClient
from discovery.instance import ServiceInstance
from discovery.service import ServiceProvider
class ZookeeperServiceRegistry(ServiceProvider):
def __init__(self, zookeeper: 'KazooClient', root_path: str):
self.zookeeper = zookeeper
self.root_path = root_path
def register_service(self, service: 'ServiceInstance') -> None:
path = f'{self.root_path}/{service.get_service_id()}/{service.get_instance_id()}'
service_data = json.dumps(service, default=lambda x: x.__dict__)
self.zookeeper.set(path, service_data)
def start(self) -> None:
self.zookeeper.start()
def stop(self) -> None:
self.zookeeper.stop() | StarcoderdataPython |
114882 | <filename>docs/pyplots/volumetrics.py
import matplotlib.pyplot as plt
import numpy as np
exp = 2.0
near = 1.0
far = 10000.0
volumeDepth = 128.0
def volumeZToDepth(z):
return np.power(z / volumeDepth, exp) * far + near
t1 = np.arange(0.0, volumeDepth, 1.0)
plt.plot(t1, volumeZToDepth(t1), 'bo', t1, volumeZToDepth(t1), 'k')
plt.ylabel('Depth')
plt.xlabel('Volume Z')
plt.show()
| StarcoderdataPython |
11357836 | <reponame>RohanMiraje/DSAwithPython<filename>DSA/linked_list/single_linked_list/head_tail_linked_list.py
class Node:
def __init__(self, value):
self.data = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_at_beg(self, value):
new_node = Node(value)
if not self.head:
self.tail = new_node
self.head = new_node
return
new_node.next = self.head
self.head = new_node
def insert_at_last(self, value):
new_node = Node(value)
if not self.tail:
self.tail = new_node
self.head = new_node
return
self.tail.next = new_node
self.tail = new_node
def print_list(self):
temp = self.head
while temp:
print(temp.data, end=" ")
temp = temp.next
print("\n")
if __name__ == '__main__':
ll = LinkedList()
ll.insert_at_last(1)
ll.insert_at_last(2)
ll.insert_at_last(3)
ll.insert_at_last(4)
ll.insert_at_last(5)
ll.insert_at_beg(11)
ll.insert_at_beg(12)
ll.insert_at_beg(13)
ll.insert_at_beg(14)
ll.insert_at_beg(15)
ll.print_list()
| StarcoderdataPython |
8142040 | from mp_api.routes.thermo.query_operators import IsStableQuery
from monty.tempfile import ScratchDir
from monty.serialization import loadfn, dumpfn
def test_is_stable_operator():
op = IsStableQuery()
assert op.query(is_stable=True) == {"criteria": {"is_stable": True}}
with ScratchDir("."):
dumpfn(op, "temp.json")
new_op = loadfn("temp.json")
assert new_op.query(is_stable=True) == {"criteria": {"is_stable": True}}
| StarcoderdataPython |
11348128 | <gh_stars>10-100
# Created by MechAviv
# Valentine Damage Skin | (2439897)
if sm.addDamageSkin(2439897):
sm.chat("'Valentine Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem() | StarcoderdataPython |
3389272 | import itertools
from spw_pkg_guard import spw_pkg_guard
from nMigen_test import mytest, runtests, helper, uut_iface
DEBUG = False
VERBOSE = DEBUG
addr_width = 8
cnt_width = 32
@mytest
class handshake_test(helper):
def get_test_processes(self):
self.ui = ui = uut_iface(spw_pkg_guard(), VERBOSE)
pkg0 = self.gen(20)
pkg1 = self.gen(20)
def rd():
yield ui.uut.iHandshake.eq(1)
while (yield ui.uut.oHandshake) == 0:
yield from self.ticks(1)
return (ui.uut, [rd])
@mytest
class one_pkg_test(helper):
def get_test_processes(self):
self.ui = ui = uut_iface(spw_pkg_guard(), VERBOSE)
pkg0 = self.gen(20)
pkg1 = self.gen(20)
def wr():
yield from self.wr_pkg(pkg0)
def rd():
yield ui.uut.iHandshake.eq(1)
while (yield ui.uut.oHandshake) == 0:
yield from self.ticks(1)
yield from ui.di_rd.Read(pkg1)
yield ui.uut.iHandshake.eq(0)
while (yield ui.uut.oHandshake) == 1:
yield from self.ticks(1)
yield ui.uut.iHandshake.eq(0)
return (ui.uut, [wr, rd])
@mytest
class two_pkg_test(helper):
def get_test_processes(self):
self.ui = ui = uut_iface(spw_pkg_guard(), VERBOSE)
pkg0 = self.gen(20)
pkg1 = self.gen(20)
pkg2 = self.gen(20)
pkg3 = self.gen(20)
def wr():
yield from self.wr_pkg(pkg0)
yield from self.wr_pkg(pkg2)
def rd():
yield ui.uut.iHandshake.eq(1)
while (yield ui.uut.oHandshake) == 0:
yield from self.ticks(1)
yield from ui.di_rd.Read(pkg1)
yield ui.uut.iHandshake.eq(0)
while (yield ui.uut.oHandshake) == 1:
yield from self.ticks(1)
yield ui.uut.iHandshake.eq(1)
while (yield ui.uut.oHandshake) == 0:
yield from self.ticks(1)
yield from ui.di_rd.Read(pkg3)
yield ui.uut.iHandshake.eq(0)
while (yield ui.uut.oHandshake) == 1:
yield from self.ticks(1)
return (ui.uut, [wr, rd])
if __name__ == "__main__":
print("start")
runtests(debug = DEBUG)
print("done")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.