id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3386335 | import time
from django.core.wsgi import get_wsgi_application
import os
import subprocess
import logging
from loghandler.loghandler import setup_logging
setup_logging()
logger = logging.getLogger(__name__)
# Django specific settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# Ensure settings are read
application = get_wsgi_application()
from database.models import Settings
logger.info("sleeping ...")
time.sleep(25)
logger.info("waking ...")
startCommand = Settings.objects.get(name="startCommando")
startCommand = startCommand.value.split(" ")
logger.info(startCommand)
p = subprocess.Popen(startCommand) | StarcoderdataPython |
184922 | import numpy as np
import torch.nn as nn
from networks.ResidualBlocks import ResidualBlock1dTransposeConv
def make_res_block_decoder_feature_generator(channels_in, channels_out, a_val=2.0, b_val=0.3):
upsample = None;
if channels_in != channels_out:
upsample = nn.Sequential(nn.ConvTranspose1d(channels_in, channels_out,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
output_padding=0),
nn.BatchNorm1d(channels_out))
layers = []
layers.append(ResidualBlock1dTransposeConv(channels_in, channels_out,
kernelsize=1,
stride=1,
padding=0,
dilation=1,
o_padding=0,
upsample=upsample,
a=a_val, b=b_val))
return nn.Sequential(*layers)
def make_layers_resnet_decoder_feature_generator(start_channels, end_channels, a=2.0, b=0.3, l=1):
layers = [];
num_decompr_layers = int(1/float(l)*np.floor(np.log(end_channels / float(start_channels))))
for k in range(0, num_decompr_layers):
in_channels = start_channels*(2 ** (l*k))
out_channels = start_channels*(2 ** (l*(k+1)))
resblock = make_res_block_decoder_feature_generator(in_channels, out_channels, a_val=a, b_val=b);
layers.append(resblock)
if start_channels*(2 ** (l*num_decompr_layers)) < end_channels:
resblock = make_res_block_decoder_feature_generator(start_channels*(2 ** (l*num_decompr_layers)), end_channels, a_val=a, b_val=b);
layers.append(resblock)
return nn.Sequential(*layers)
class FeatureGenerator(nn.Module):
def __init__(self, in_channels, out_channels, a, b, generation_power):
super(FeatureGenerator, self).__init__()
self.in_channels = in_channels;
self.out_channels = out_channels;
self.a = a;
self.b = b;
self.generation_power = generation_power;
self.feature_generator = make_layers_resnet_decoder_feature_generator(self.in_channels,
self.out_channels,
a=self.a,
b=self.b,
l=self.generation_power)
def forward(self, z):
features = self.feature_generator(z);
return features; | StarcoderdataPython |
3300086 | # x_4_9
#
# 1~15までの数字について
# 「3」で割り切れる場合は「Fizz」、「5」で割り切れる場合は「Buzz」
# 「3」でも「5」でも割り切れる場合は「FizzBuzz」
# それ以外はそのまま数字を表示するようにコードを修正してください
number = 1
print('Fizz')
print('Buzz')
print('FizzBuzz')
| StarcoderdataPython |
1738636 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) <NAME> <<EMAIL>>
## This program is published under a GPLv2 license
from base_classes import *
from config import *
from dadict import *
from data import *
from error import *
from themes import *
from arch import *
from plist import *
from fields import *
from packet import *
from asn1fields import *
from asn1packet import *
from utils import *
from route import *
if conf.ipv6_enabled:
from utils6 import *
from route6 import *
from sendrecv import *
from supersocket import *
from volatile import *
from as_resolvers import *
from ansmachine import *
from automaton import *
from autorun import *
from main import *
from layers.all import *
from layers.igmpv1v2 import *
from layers.igmpv3report import *
from layers.igmpv3query import *
from layers.vrrp import *
from layers.vrrpv3 import *
from layers.ospf import *
from layers.lldp import *
from layers.pim import *
from layers.pim6 import *
from layers.bfd import *
from layers.mrpp import *
from layers.uldp import *
from layers.lacp import *
from layers.ulpparp import *
from layers.ulppmac import *
from layers.msdp import *
from layers.oam import *
from asn1.asn1 import *
from asn1.ber import *
from asn1.mib import *
| StarcoderdataPython |
135807 | from ._pycdb import CDB, CDBMake
| StarcoderdataPython |
3365708 | import argparse
import Bio.SeqIO
from collections import OrderedDict
import numpy as np
import pandas as pd
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--alignment", help="a fasta file")
parser.add_argument("--gaps", type=int, help="value of gaps per site to remove")
parser.add_argument("--output-dataframe", help="a csv file")
parser.add_argument("--output-fasta", help="a fasta file output")
args = parser.parse_args()
sequences_by_name = OrderedDict()
for sequence in Bio.SeqIO.parse(args.alignment, "fasta"):
sequences_by_name[sequence.id] = str(sequence.seq)
sequence_names = list(sequences_by_name.keys())
numbers = list(sequences_by_name.values())[:]
for i in range(0,len(list(sequences_by_name.values()))):
numbers[i] = re.sub(r'[^AGCT]', '5', numbers[i])
numbers[i] = list(numbers[i].replace('A','1').replace('G','2').replace('C', '3').replace('T','4'))
numbers[i] = [int(j) for j in numbers[i]]
genomes_df = pd.DataFrame(numbers)
genomes_df.columns = ["Site " + str(k) for k in range(0,len(numbers[i]))]
# Drop columns with more than (number user picks) of bases missing in given site.
dist_df = genomes_df.apply(pd.value_counts).iloc[4]
dist_df = pd.DataFrame(dist_df)
dist_df.columns = ["gaps"]
print(dist_df)
index_to_drop = dist_df.where(dist_df["gaps"] > args.gaps).dropna().index.values.tolist()
print(index_to_drop)
genomes_df = genomes_df.drop(index_to_drop, axis=1)
if args.output_dataframe is not None:
genomes_df.to_csv(args.output_dataframe)
genomes = genomes_df.values.tolist()
#converting to FASTA
new_genomes = []
if args.output_fasta is not None:
for i in range(0, len(genomes)):
string_numbers = ''.join(map(str, genomes[i]))
string_numbers = string_numbers.replace('1','A').replace('2','G').replace('3', 'C').replace('4','T').replace('5', "N")
new_genomes.append(string_numbers)
fasta_file = open(args.output_fasta, "w")
for i in range(len(new_genomes)):
fasta_file.write(">" + sequence_names[i] + "\n" + new_genomes[i] + "\n")
fasta_file.close() | StarcoderdataPython |
1710232 | <filename>scripts/load_as_rst.py
#!/usr/bin/env python
"""
Used to convert Markdown files to RST for use in sphinx and PyPi.
"""
from __future__ import print_function
import os
import sys
import warnings
# This is called during setup, so we can't be sure six is installed.
# The only thing to pull in is raise_from, though, so we'll just warn
# about the missing exception chain if it's not installed.
try:
from six import raise_from
except ImportError:
warnings.warn('six not found: Exception chaining will not be used in '
'python 3', ImportWarning)
def raise_from(value, from_value):
raise value
# See https://stackoverflow.com/a/23265673/467366 for general approach
def _fallback_load(fname, strict=False, parent_exception=None):
failure_msg = ('pypandoc module not available, returning text in its '
'original format')
if strict and parent_exception is not None:
raise raise_from(ImportError(failure_msg), parent_exception)
else:
warnings.warn(failure_msg, ImportWarning)
with open(fname, 'r') as f:
return f.read()
try:
from pypandoc import convert as pypandoc_convert
def load_text(fname, strict=False):
try:
return pypandoc_convert(fname, 'rst')
except (ImportError, OSError) as e:
return _fallback_load(fname, strict=strict, parent_exception=e)
except ImportError as e:
__err = e
def load_text(fname, strict=False):
return _fallback_load(fname, strict=strict, parent_exception=__err)
def convert(fname_in, fname_out=None, preamble=None, strict=False):
"""
Converts an file at ``fname_in`` from a format understood by pypandoc
to ReStructuredText.
:param fname_in:
Filename to read.
:param fname_out:
Filename to write to - this will be overwritten. If unspecified,
the converted text is returned.
:param preamble:
A preamble to add to the file (e.g. "#This is an auto-generated file")
"""
if fname_in is None:
raise ValueError('Must specify an input file')
if not os.path.exists(fname_in):
raise IOError('File not found: {}'.format(fname_in))
text = preamble or ''
text = '\n'.join((text, load_text(fname_in, strict=strict)))
if fname_out is not None:
with open(fname_out, 'w') as f:
print(text, file=f)
return None # Just being explicit
else:
return text
if __name__ == "__main__":
import argparse
import os
desc = ('Loads a file supported by pypandoc as RST and outputs it to '
'either the stdout or a specified file.')
parser = argparse.ArgumentParser(desc)
parser.add_argument('input', type=str, default=None,
help='The file to convert to RST.')
parser.add_argument('-o', '--output', type=str, default=None,
help=('The file to which to output the converted text '
'(Warning: this will be overwritten if exists)'))
args = parser.parse_args()
output = convert(args.input, args.output)
if output is not None:
print(output)
| StarcoderdataPython |
1758777 | """
Orthogonal Projection on Latent Structure (O-PLS)
"""
import numpy as np
from numpy import linalg as la
from typing import Tuple, Any, Union
from base import nipals
class OPLS:
"""
Orthogonal Projection on Latent Structure (O-PLS).
Methods
----------
predictive_scores: np.ndarray
First predictive score.
predictive_loadings: np.ndarray
Predictive loadings.
weights_y: np.ndarray
y weights.
orthogonal_loadings: np.ndarray
Orthogonal loadings.
orthogonal_scores: np.ndarray
Orthogonal scores.
"""
def __init__(self):
"""
TODO:
1. add arg for specifying the method for performing PLS
"""
# orthogonal score matrix
self._Tortho: np.ndarray = None
# orthogonal loadings
self._Portho: np.ndarray = None
# loadings
self._Wortho: np.ndarray = None
# covariate weights
self._w: np.ndarray = None
# predictive scores
self._T: np.ndarray = None
self._P: np.ndarray = None
self._C: np.ndarray = None
# coefficients
self.coef: np.ndarray = None
# total number of components
self.npc: int = None
def fit(self, x, y, n_comp=None, dot=np.dot) -> None:
"""
Fit PLS model.
Parameters
----------
x: np.ndarray
Variable matrix with size n samples by p variables.
y: np.ndarray
Dependent matrix with size n samples by 1, or a vector
n_comp: int
Number of components, default is None, which indicates that
largest dimension which is smaller value between n and p
will be used.
Returns
-------
OPLS object
Reference
---------
[1] <NAME>, <NAME>. Projection on Latent Structure (OPLS).
J Chemometrics. 2002, 16, 119-128.
[2] <NAME>, <NAME>. O2-PLS, a two-block (X-Y) latent variable
regression (LVR) method with a integral OSC filter.
<NAME>. 2003, 17, 53-64.
"""
n, p = x.shape
npc = min(n, p)
if n_comp is not None and n_comp < npc:
npc = n_comp
# initialization
Tortho = np.empty((n, npc))
Portho = np.empty((p, npc))
Wortho = np.empty((p, npc))
T, P, C = np.empty((n, npc)), np.empty((p, npc)), np.empty(npc)
# X-y variations
tw = dot(y, x) / dot(y, y)
tw /= la.norm(tw)
# predictive scores
tp = dot(x, tw)
# components
w, u, _, t = nipals(x, y)
p = dot(t, x) / dot(t, t)
for nc in range(npc):
# orthoganol weights
w_ortho = p - (dot(tw, p) * tw)
w_ortho /= la.norm(w_ortho)
# orthoganol scores
t_ortho = dot(x, w_ortho)
# orthoganol loadings
p_ortho = dot(t_ortho, x) / dot(t_ortho, t_ortho)
# update X to the residue matrix
x -= t_ortho[:, np.newaxis] * p_ortho
# save to matrix
Tortho[:, nc] = t_ortho
Portho[:, nc] = p_ortho
Wortho[:, nc] = w_ortho
# predictive scores
tp -= t_ortho * dot(p_ortho, tw)
T[:, nc] = tp
C[nc] = dot(y, tp) / dot(tp, tp)
# next component
w, u, _, t = nipals(x, y)
p = dot(t, x) / dot(t, t)
P[:, nc] = p
self._Tortho = Tortho
self._Portho = Portho
self._Wortho = Wortho
# covariate weights
self._w = tw
# coefficients and predictive scores
self._T = T
self._P = P
self._C = C
self.coef = tw * C[:, np.newaxis]
self.npc = npc
def predict(
self, X, n_component=None, return_scores=False
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
""" Predict the new coming data matrx. """
if n_component is None or n_component > self.npc:
n_component = self.npc
coef = self.coef[n_component - 1]
y = np.dot(X, coef)
if return_scores:
return y, np.dot(X, self._w)
return y
def correct(
self, x, n_component=None, return_scores=False, dot=np.dot
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Correction of X
Parameters
----------
x: np.ndarray
Data matrix with size n by c, where n is number of
samples, and c is number of variables
n_component: int | None
Number of components. If is None, the number of components
used in fitting the model is used. Default is None.
return_scores: bool
Return orthogonal scores. Default is False.
Returns
-------
xc: np.ndarray
Corrected data, with same matrix size with input X.
t: np.ndarray
Orthogonal score, n by n_component.
"""
# TODO: Check X type and dimension consistencies between X and
# scores in model.
xc = x.copy()
if n_component is None:
n_component = self.npc
if xc.ndim == 1:
t = np.empty(n_component)
for nc in range(n_component):
t_ = dot(xc, self._Wortho[:, nc])
xc -= t_ * self._Portho[:, nc]
t[nc] = t_
else:
n, c = xc.shape
t = np.empty((n, n_component))
# scores
for nc in range(n_component):
t_ = dot(xc, self._Wortho[:, nc])
xc -= t_[:, np.newaxis] * self._Portho[:, nc]
t[:, nc] = t_
if return_scores:
return xc, t
return xc
def predictive_score(self, n_component=None) -> np.ndarray:
"""
Parameters
----------
n_component: int
The component number.
Returns
-------
np.ndarray
The first predictive score.
"""
if n_component is None or n_component > self.npc:
n_component = self.npc
return self._T[:, n_component-1]
def ortho_score(self, n_component=None) -> np.ndarray:
"""
Parameters
----------
n_component: int
The component number.
Returns
-------
np.ndarray
The first orthogonal score.
"""
if n_component is None or n_component > self.npc:
n_component = self.npc
return self._Tortho[:, n_component-1]
@property
def predictive_scores(self):
""" Orthogonal loadings. """
return self._T
@property
def predictive_loadings(self):
""" Predictive loadings. """
return self._P
@property
def weights_y(self):
""" y scores. """
return self._C
@property
def orthogonal_loadings(self):
""" Orthogonal loadings. """
return self._Portho
@property
def orthogonal_scores(self):
""" Orthogonal scores. """
return self._Tortho
| StarcoderdataPython |
3347290 | # Generated by Django 2.0.2 on 2018-03-07 20:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('protocols', '0002_auto_20180307_1953'),
]
operations = [
migrations.RemoveField(
model_name='protocol',
name='name',
),
migrations.RemoveField(
model_name='step',
name='date_range',
),
migrations.RemoveField(
model_name='step',
name='end_date',
),
migrations.RemoveField(
model_name='step',
name='protocol',
),
migrations.RemoveField(
model_name='step',
name='start_date',
),
migrations.AddField(
model_name='protocol',
name='date_range',
field=models.CharField(default='', max_length=15),
),
migrations.AddField(
model_name='protocol',
name='end_date',
field=models.DateField(default=datetime.date.today),
),
migrations.AddField(
model_name='protocol',
name='start_date',
field=models.DateField(default=datetime.date.today),
),
migrations.AddField(
model_name='step',
name='name',
field=models.CharField(default='MyProtocol', max_length=255),
),
]
| StarcoderdataPython |
1714678 | import numpy as np
import torch
def parameter_number(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def normal2unit(vertices: "(vertice_num, 3)"):
"""
Return: (vertice_num, 3) => normalized into unit sphere
"""
center = vertices.mean(dim= 0)
vertices -= center
distance = vertices.norm(dim= 1)
vertices /= distance.max()
return vertices
def rotate(points, degree: float, axis: int):
"""Rotate along upward direction"""
rotate_matrix = torch.eye(3)
theta = (degree/360)*2*np.pi
cos = np.cos(theta)
sin = np.sin(theta)
axises = [0, 1, 2]
assert axis in axises
axises.remove(axis)
rotate_matrix[axises[0], axises[0]] = cos
rotate_matrix[axises[0], axises[1]] = -sin
rotate_matrix[axises[1], axises[0]] = sin
rotate_matrix[axises[1], axises[1]] = cos
points = points @ rotate_matrix
return points
class Transform():
def __init__(self,
normal: bool,
shift: float = None,
scale: float = None,
rotate: float = None,
axis: int = 0,
random:bool= False):
self.normal = normal
self.shift = shift
self.scale = scale
self.rotate = rotate
self.axis = axis
self.random = random
def __call__(self, points: "(point_num, 3)"):
if self.normal:
points = normal2unit(points)
if self.shift:
shift = self.shift
if self.random:
shift = (torch.rand(3)*2 - 1) * self.shift
points += shift
if self.scale:
scale = self.scale
points *= scale
if self.rotate:
degree = self.rotate
if self.random:
degree = (torch.rand(1).item()*2 - 1) * self.rotate
points = rotate(points, degree, self.axis)
return points
def test():
points = torch.randn(1024, 3)
transform = Transform(normal= True, scale= 10.0, axis= 1, random= True)
points = transform(points)
print(points.size())
if __name__ == '__main__':
test() | StarcoderdataPython |
3226987 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
#from scrapy.exception import DropItem
import codecs
import json
from datetime import datetime
from hashlib import md5
from scrapy import log
from twisted.enterprise import adbapi
class JsonWriterPipeline(object):
def __init__(self):
self.file = codecs.open('ans.json', 'wb', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
def spider_closed(self, spider):
self.file.close()
file = codecs.open(filename, 'wb', encoding='utf-8')
class V2ExPipeline(object):
def process_item(self, item, spider):
return item
class MySQLStorePipeline(object):
"""A pipeline to store the item in a MySQL database.
This implementation uses Twisted's asynchronous database API.
"""
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
# run db query in the thread pool
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
# return the deferred instead the item. This makes the engine to
# process next item (according to CONCURRENT_ITEMS setting) after this
# operation (deferred) has finished.
return d
def _do_upsert(self, conn, item, spider):
"""Perform an insert or update."""
guid = self._get_guid(item)
now = datetime.utcnow().replace(microsecond=0).isoformat(' ')
conn.execute("""SELECT EXISTS(
SELECT 1 FROM v2ex WHERE guid = %s
)""", (guid, ))
ret = conn.fetchone()[0]
if ret:
conn.execute("""
UPDATE v2ex
SET link=%s
WHERE guid=%s
""", (item['link'], guid))
spider.log("Item updated in db: %s %r" % (guid, item))
else:
conn.execute("""
INSERT INTO v2ex (guid,link)
VALUES (%s,%s)
""", (guid, item['link']))
spider.log("Item stored in db: %s %r" % (guid, item))
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
log.err(failure)
def _get_guid(self, item):
"""Generates an unique identifier for a given item."""
# hash based solely in the url field
return md5(item['link']).hexdigest()
| StarcoderdataPython |
85674 | import csv
import os
import sys
import typing
import keras
import librosa
import numpy as np
sys.path.append(os.path.dirname(os.path.realpath(__file__))) # TODO(TK): replace this with a correct import when mevonai is a package
import bulkDiarize as bk
default_model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model', 'lstm_cnn_rectangular_lowdropout_trainedoncustomdata.h5')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
classes = ['Neutral', 'Happy', 'Sad', 'Angry', 'Fearful', 'Disgusted', 'Surprised']
class EmotionRecognizer:
def __init__(self, model_file: typing.Optional[str] = None):
if model_file is not None:
self._model = keras.models.load_model(model_file)
else:
self._model = keras.models.load_model(default_model_path)
self._classes = ('Neutral', 'Happy', 'Sad', 'Angry', 'Fearful', 'Disgusted', 'Surprised')
def predict_proba(
self,
audio_data: typing.Any, # TODO(TK): replace with np.typing.ArrayLike when numpy upgrades to 1.20+ (conditional on TensorFlow support)
sample_rate: int,
) -> typing.Dict[str, float]:
mfccs = librosa.feature.mfcc(y=audio_data, sr=sample_rate, n_mfcc=13)
result = np.zeros((13, 216))
result[:mfccs.shape[0], :mfccs.shape[1]] = mfccs
temp = np.zeros((1, 13, 216)) # np.expand_dims(result, axis=0)
temp[0] = result
t = np.expand_dims(temp, axis=3)
ans = self._model.predict(t).flatten()
return {emotion: prob for emotion, prob in zip(self._classes, ans)}
def predict(folder, classes, model):
solutions = []
filenames=[]
for subdir in os.listdir(folder):
# print(subdir)
lst = []
predictions=[]
# print("Sub",subdir)
filenames.append(subdir)
for file in os.listdir(f'{folder}{"/"}{subdir}'):
# print(subdir,"+",file)
temp = np.zeros((1,13,216))
X, sample_rate = librosa.load(os.path.join(f'{folder}{"/"}{subdir}{"/"}', file), res_type='kaiser_fast', duration=2.5, sr=22050*2, offset=0.5)
mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13)
result = np.zeros((13,216))
result[:mfccs.shape[0],:mfccs.shape[1]] = mfccs
temp[0] = result
t = np.expand_dims(temp,axis=3)
ans=model.predict(t).flatten()
if ans.shape[0] != len(classes):
raise RuntimeError("Unexpected number of classes encountered")
# print("SOL",classes[ans[0]])
predictions.append(classes[np.argmax(ans)])
if len(predictions) < 2:
predictions.append('None')
solutions.append(predictions)
return solutions,filenames
if __name__ == '__main__':
model = keras.models.load_model(default_model_path)
INPUT_FOLDER_PATH = "input/"
OUTPUT_FOLDER_PATH = "output/"
# bk.diarizeFromFolder(INPUT_FOLDER_PATH,OUTPUT_FOLDER_PATH)
for subdir in os.listdir(INPUT_FOLDER_PATH):
bk.diarizeFromFolder(f'{INPUT_FOLDER_PATH}{subdir}{"/"}',(f'{OUTPUT_FOLDER_PATH}{subdir}{"/"}'))
print("Diarized",subdir)
folder = OUTPUT_FOLDER_PATH
for subdir in os.listdir(folder):
predictions,filenames = predict(f'{folder}{"/"}{subdir}', classes, model)
# print("filename:",filenames,",Predictions:",predictions)
with open('SER_'+subdir+'.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
for i in range(len(filenames)):
csvData = [filenames[i], 'person01',predictions[i][0],'person02',predictions[i][1]]
print("filename:",filenames[i],",Predicted Emotion := Person1:",predictions[i][0],",Person2:",predictions[i][1])
writer.writerow(csvData)
csvFile.close()
os.remove("filterTemp.wav")
| StarcoderdataPython |
38435 | <gh_stars>1-10
from __future__ import print_function
import numpy as np
import dataprocessing as proc
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description='BASE Model')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=40, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=40, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.emb1 = nn.Embedding(9, 8, 0)
self.emb2 = nn.Embedding(16, 8, 0)
self.emb3 = nn.Embedding(2, 8, 0)
self.emb4 = nn.Embedding(6, 8, 0)
self.emb5 = nn.Embedding(42, 8, 0)
self.emb6 = nn.Embedding(15, 8, 0)
self.linear1 = nn.Linear(53, 100)
self.linear2 = nn.Linear(100, 50)
self.linear3 = nn.Linear(57, 2)
def forward(self, x):
wide_indices = Variable(torch.LongTensor([0, 1, 2, 3, 4, 5, 6]))
wide = torch.index_select(x, 1, wide_indices).float()
deep_indices = Variable(torch.LongTensor([16, 17, 18, 19, 20]))
x1 = self.emb1(x.select(1, 10))
x2 = self.emb2(x.select(1, 11))
x3 = self.emb3(x.select(1, 12))
x4 = self.emb4(x.select(1, 13))
x5 = self.emb5(x.select(1, 14))
x6 = self.emb6(x.select(1, 15))
x7 = torch.index_select(x.float(), 1, deep_indices).float()
deep = Variable(torch.cat([x1.data, x2.data, x3.data, x4.data, x5.data, x6.data, x7.data], 1))
deep = F.relu(self.linear1(deep))
deep = F.relu(self.linear2(deep))
x = Variable(torch.cat([wide.data, deep.data], 1))
x = self.linear3(x)
return F.log_softmax(x)
model = Net()
if args.cuda:
model.cuda()
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def generate_data(data, label, batchSize, data_type='train', shuffle=True):
assert batchSize > 0
data_len = data.shape[0]
total_batch = data_len / batchSize + (1 if data_len % batchSize != 0 else 0)
if shuffle:
indices = np.random.permutation(data_len)
data = data[indices]
label = label[indices]
for idx in range(total_batch):
start = idx * batchSize
end = min((idx + 1) * batchSize, data_len)
if data_type == 'train':
yield data[start:end], label[start:end]
else:
yield data[start:end], label[start:end]
def train(epoch, train_data, train_labels, use_data_len=32561):
model.train() # set to training mode
batch_idx = 1
for (_data, _target) in generate_data(train_data[:use_data_len], train_labels[:use_data_len], batchSize=args.batch_size, shuffle=True):
data = torch.from_numpy(_data)
target = torch.from_numpy(_target).long()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model.forward(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [Iteration {}] [{:5d}/{} ({:2d}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx, batch_idx * len(data), use_data_len,
int(100. * batch_idx * len(data) / use_data_len), loss.data[0]))
batch_idx += 1
def test(epoch, test_data, test_labels):
model.eval() # set to evaluation mode
test_loss = 0
correct = 0
for (data, target) in generate_data(test_data, test_labels,
batchSize=args.batch_size, shuffle=True):
data = torch.from_numpy(data)
target = torch.from_numpy(target).long()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model.forward(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= test_data.shape[0] # loss function already averages over batch size
print('\nEpoch {} Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
epoch, test_loss, correct, test_data.shape[0], 100. * correct / test_data.shape[0]))
def go():
train_data, train_labels = proc.get_data("train")
test_data, test_labels = proc.get_data("test")
for epoch in range(1, args.epochs + 1):
train(epoch, train_data, train_labels, 32561)
test(epoch, test_data, test_labels)
if __name__ == '__main__':
go()
| StarcoderdataPython |
3249312 | #!/usr/bin/env python3
import os, sys
import pyqrcode, yaml
BASEURL = "http://knizky.cf/#"
OUT_DIR = sys.argv[1] if len(sys.argv) > 1 else '.'
books = yaml.load(open("_data/books.yml", "r"))
for book in books:
qr = pyqrcode.create(BASEURL + book)
qr.png(os.path.join(OUT_DIR, book + ".png"), scale=10)
| StarcoderdataPython |
159096 | <reponame>RosettaCommons/jade2<filename>jade2/deep_learning/torch/lightning_modules/__init__.py<gh_stars>1-10
from .GraphTask import *
from .GeneralTask import * | StarcoderdataPython |
3398235 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright (C) 2020 <NAME>
# Use of this source code is governed by the MIT License
###############################################################################
from . import Indicator, arctan, SEED_ZERO
import collections
from math import atan
import numpy as np
RAD2DEG = 180.0 / (4.0 * atan(1))
class mama(Indicator):
'''
Quoting Ehlers: "The MESA Adaptive Moving Average (MAMA) adapts to price
movement in an entirely new and unique way. The adapation is based on the
rate change of phase as measured by the Hilbert Transform Discriminator"
Formula:
- The formula is overly complex. See the linked PDF from Ehlers himself
See:
- https://www.mesasoftware.com/papers/MAMA.pdf
'''
group = 'overlap'
alias = 'MAMA', 'MesaAdaptiveMovingAverage'
inputs = 'high', 'low'
allowinputs = 1
outputs = 'mama', 'fama'
params = (
('fastlimit', 0.5, 'Fast Limit'),
('slowlimit', 0.05, 'Fast Limit'),
)
LOOKBACK_TOTAL = 33
LOOKBACK_SMOOTH = 4
LOOKBACK_HT = 7
LOOKBACK_HT_SKIP = 0 # skip before applying ht
LOOKBACK_SMOOTH_EXTRA = LOOKBACK_HT - LOOKBACK_SMOOTH
LOOKBACK_REST = LOOKBACK_TOTAL - LOOKBACK_HT
def __init__(self):
# Choose p0, depending on passed number o inputs
p0 = (self.i.high + self.i.low) / 2.0 if len(self.i) > 1 else self.i0
# smooth p0
p0smooth = (4.0*p0 + 3.0*p0(-1) + 2.0*p0(-2) + 1.0*p0(-3)) / 10.0
# Add the needed lookback for HT, not yet offered by the smoothing
p0smooth._period(self.LOOKBACK_SMOOTH_EXTRA)
# zero-fill to ensure ht lookbacks deliver results (nan would break)
i1, q1 = p0smooth(val=0.0), p0smooth(val=0.0)
# use applymulti to get more than one result back
i1, q1 = p0smooth._applymulti(self._periodize, i1, q1, raw=True)
# i1 carries a -3 from detrender and q1 a -6 (LOOKBACK_HT - 1). Add the
# largest dominant to q1, because both work together now
q1._period(self.LOOKBACK_HT - 1)
# where i1 == 0 fore arctan to return also 0, with Ehlers formula
atanq1num = q1.mask(i1 == 0.0, 0.0)
phase = RAD2DEG * arctan(atanq1num / i1).fillna(0.0)
# ta-lib cals deltaphase starting as soon as phase has 1 value in spite
# of using phase(-1) which would be void, but it is considered as
# 0.0. Reduce the phase period by 1 and set the initial value to 0.
# This behavior matches all other calculations in _speriodize
phase._period(-1, val=0.0) # at minper rel idx 0 => 0.0
deltaphase = (phase(-1) - phase).clip(lower=1.0)
alpha = (self.p.fastlimit / deltaphase).clip(lower=self.p.slowlimit)
# span set to use p0, but let alpha dominate if period is greater
_mama = p0._ewm(alpha=alpha, span=1, _seed=SEED_ZERO)._mean()
# Add no span, to let the fama calculation use the entire _mama range
_fama = _mama._ewm(alpha=alpha*0.5, _seed=SEED_ZERO)._mean()
# _periodize - no auto period. Add non-count period, filled with nan
# removing what was already added to q1
_mama._period(self.LOOKBACK_REST - self.LOOKBACK_HT + 1, val=np.nan)
_fama._period(self.LOOKBACK_REST - self.LOOKBACK_HT + 1, val=np.nan)
self.o.mama = _mama
self.o.fama = _fama
def _ht(self, x, adjperiod, i):
ht0 = 0.0962*x[i] + 0.5769*x[i - 2] - 0.5769*x[i - 4] - 0.0962*x[i - 6]
return ht0 * adjperiod
def _periodize(self, price, i1, q1):
# period 7 needed in _periodize for hilbert transform
# p0smooth has: 4 and needs additional 3 before applying _periodize
# actual "return" values to be used in __init__ for phase calculations
LOOKBACK = self.LOOKBACK_HT
LOOKIDX = LOOKBACK - 1
LOOKSTART = LOOKIDX + self.LOOKBACK_HT_SKIP
# circular buffers for ht calcs
detrender = collections.deque([0.0] * LOOKBACK, maxlen=LOOKBACK)
# the first LOOKBACK elements of the input are ignored in the ta-lib
# calculations for the detrender. Nullify them.
price[0:LOOKSTART] = 0.0
i2, q2, re, im, period = 0.0, 0.0, 0.0, 0.0, 0.0
for i in range(LOOKSTART, len(price)):
adjperiod = 0.075*period + 0.54 # adj period_1 for ht transformx
detrender.append(self._ht(price, adjperiod, i))
# New detrender val pushed, append to the right -1 is actual value
i1[i] = i10 = detrender[-4] # 3 periods ago: -2, -3, -4
q1[i] = q10 = self._ht(detrender, adjperiod, LOOKIDX)
ji = self._ht(i1, adjperiod, i)
jq = self._ht(q1, adjperiod, i)
i21, q21 = i2, q2 # keep for next round
i2 = i10 - jq
q2 = q10 + ji
i2 = 0.2*i2 + 0.8*i21 # smooth
q2 = 0.2*q2 + 0.8*q21 # smooth
re0 = i2*i21 + q2*q21
im0 = i2*q21 - q2*i21
re = 0.2*re0 + 0.8*re # smooth
im = 0.2*im0 + 0.8*im # smooth
period1 = period
period = 360 / (RAD2DEG*atan(im / re)) if re and im else period1
period = min(period, period1 * 1.5)
period = max(period, period1 * 0.67)
period = max(period, 6)
period = min(period, 50)
period = 0.2*period + 0.8*period1 # smooth
# return the results
return i1, q1
| StarcoderdataPython |
3244837 | <gh_stars>0
# x_5_6
#
# for文を使って「numbers」のそれぞれの数字を2倍にした数をリスト「nums_x_2」に追加してください
numbers = [2, 5, 7, 1, 3, 8, 1, 8, 2, 3]
numbers_x_2 = []
print(numbers_x_2)
| StarcoderdataPython |
1699419 | <gh_stars>0
import sys
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from Bio import AlignIO, SeqIO
from Bio.Seq import Seq
from Bio.Align import MultipleSeqAlignment
from Bio.SeqRecord import SeqRecord
alignment_file = sys.argv[1]
output_file = sys.argv[2]
alignment = AlignIO.read(alignment_file, "fasta")
alignment_array = np.array(alignment)
# Consensus computation
consensus_sequence = []
for ii in range(alignment_array.shape[-1]):
values, counts = np.unique(alignment_array[:,ii], return_counts=True)
consensus_nucleotide = values[np.argmax(counts)]
consensus_sequence = consensus_sequence + [consensus_nucleotide]
consensus_sequence = np.array(consensus_sequence)
consensus_sequence = Seq("".join(consensus_sequence))
consensus_sequence = SeqRecord(seq=consensus_sequence, id=f"Consensus_{alignment_file}", name="", description="")
with open(output_file, "w") as handle:
SeqIO.write([consensus_sequence], handle, "fasta")
# Distance to consensus sequence
# distance_matrix = (alignment_array != consensus_sequence)
# distance = np.sum(distance_matrix, axis=1, dtype=int) / distance_matrix.shape[-1]
| StarcoderdataPython |
167707 | <reponame>cocoaaa/ReprLearn<filename>reprlearn/evaluator/qualitative.py
from pathlib import Path
from typing import List, Set, Dict, Tuple, Optional, Iterable, Mapping, Union, Callable, TypeVar
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch import linalg as LA
import torchvision
from torchvision.utils import make_grid
import pytorch_lightning as pl
from src.data.transforms.transforms import LinearRescaler
from src.data.transforms.functional import unnormalize
from src.models.plmodules import BiVAE
from src.visualize.utils import show_timg, show_timgs, show_batch
from src.utils.misc import now2str, info, get_next_version_path
def show_recon(model: pl.LightningModule,
dm: pl.LightningDataModule,
tb_writer: SummaryWriter = None,
global_step: int = 0,
unnorm: bool = True,
to_show: bool = True,
verbose: bool = False):
model.eval()
cmap = 'gray' if dm.size()[0] == 1 else None
train_mean, train_std = dm.train_mean, dm.train_std
dl_modes = ['test'] if hasattr(dm, 'test_ds') else ['train', 'val']
with torch.no_grad():
for mode in dl_modes:
dl = getattr(dm, f"{mode}_dataloader")()
batch = next(iter(dl))
if isinstance(batch, dict):
x = batch['img']
# label_c = batch['digit'] # digit/content label (int) -- currently not used
# label_s = batch['color']
else:
x, _ = batch
x = x.to(model.device)
x_recon = model.reconstruct(x)
# Move to cpu for visualization
x = x.cpu()
x_recon = x_recon.cpu()
if verbose:
info(x, f"{mode}_x")
info(x_recon, f"{mode}_x_recon")
if unnorm:
x_unnormed = unnormalize(x, train_mean, train_std)
x_recon_unnormed = unnormalize(x_recon, train_mean, train_std)
if verbose:
print("===After unnormalize===")
info(x_unnormed, f"{mode}_x_unnormed")
info(x_recon_unnormed, f"{mode}_x_recon_unnormed")
if to_show:
_x = x_unnormed if unnorm else x
_x_recon = x_recon_unnormed if unnorm else x_recon
show_timgs(_x, title=f"Input: {mode}", cmap=cmap)
# show_timgs(_x_recon, title=f"Recon: {mode}", cmap=cmap)
show_timgs(LinearRescaler()(_x_recon), title=f"Recon(linearized): {mode}", cmap=cmap)
# Log input-recon grid to TB
if tb_writer is not None:
input_grid = make_grid(x_unnormed) # (C, gridh, gridw)
recon_grid = make_grid(x_recon_unnormed) # (C, gridh, gridw)
normed_recon_grid = make_grid(LinearRescaler()(x_recon_unnormed))
grid = torch.cat([input_grid, normed_recon_grid], dim=-1) # inputs | recons
tb_writer.add_image(f"{mode}/recons", grid, global_step=global_step)
# ------------------------------------------------------------------------
# Latent Space
# ------------------------------------------------------------------------
def get_class_reps(dl: DataLoader) -> Dict[Union[str,int], torch.Tensor]:
class_reps = {}
for i in range(len(dl.dataset)):
batch = dl.dataset[i]
x = batch['img']
label_c = batch['digit'] # digit/content label (int) -- currently not used
label_s = batch['color']
if len(class_reps) >= 10:
break
if isinstance(label_c, torch.Tensor):
label_c = label_c.item()
label_c = str(label_c)
if label_c in class_reps:
continue
class_reps[label_c] = x
return class_reps
def evaluate_transfers(model: BiVAE,
constant_code: str, #"c" or "s"
class_reps: Dict[str, torch.Tensor],
log_dir: Path,
train_mean: List, # to unnormalize output tensor
train_std: List, # to unnormalize
linearlize: bool=True):
"""
:param model: Trained BiVAE model
:param class_reps: a dictionary of string class_id <-> a single 3dim Tensor (C,H,W)
:param log_dir: Path to the model.logger's log_dir (Eg. '..../exp_name/version7')
:param train_mean: Original datamodule's training set's mean
:param train_std: Oiriginal datamodule's training set std
:param linearlize: (bool). If true, linearlize the output image to range [0,1] for better viz. contrast
:return: a plt.Figure of all transfer results put together via torchvision.make_grid
"""
assert constant_code in ['c', 's'], "constant_code must be 'c' or 's' (for content, style, respectively"
model.eval()
ids = sorted(class_reps.keys())
grids = {}
for i, id_a in enumerate(ids):
grids[id_a] = []
for j, id_b in enumerate(ids):
img_a = class_reps[id_a]
img_b = class_reps[id_b]
img_pair = torch.stack([img_a, img_b], dim=0)
unnormed_img_pair = unnormalize(img_pair, train_mean, train_std)
with torch.no_grad():
dict_qparams = model.encode(img_pair)
dict_z = model.rsample(dict_qparams)
if constant_code == 'c':
# Fix content to c[0]
content = dict_z["c"][[0]]
style = dict_z["s"][[1]]
elif constant_code == 's':
# Fix style to s[0]
content = dict_z["c"][[1]]
style = dict_z["s"][[0]]
test_dict_z = {"c": content, "s": style}
# Reconstruct
z = model.combine_content_style(test_dict_z)
recons = model.decode(z)
# Optional: for better viz, unnormalize or/and linearlize
unnormed_recons = unnormalize(recons, train_mean, train_std)
if linearlize:
img_pair = LinearRescaler()(img_pair) #todo
unnormed_recons = LinearRescaler()(unnormed_recons)
grid = make_grid(
torch.cat([unnormed_img_pair, unnormed_recons], dim=0)
) # (3, gridh, gridw)
grids[id_a].append(grid)
# grids[id_a] is a list of each row result, i.e length = 10
# Save each content's transferred image as:
# log_dir/content_transfers/{next_version}/"content_transfers_{content-id}.png"
constant_type = "content" if constant_code == "c" else "style"
save_dir = get_next_version_path(log_dir, name=f'{constant_type}_transfers')
if not save_dir.exists():
save_dir.mkdir()
print("Created: ", save_dir)
all_recons = []
for id_a, recons in grids.items():
recons = torch.cat(recons, dim=1) # recons is a 3dim tensor (3, gridh, gridw) for each id_a
all_recons.append(recons)
# Show and save this id's transfers
save_path = save_dir/f'{constant_type}_transfers_{id_a}.png'
show_timg(recons,
title=id_a,
save_path=save_path,
)
# Put all per-id transfer results into a single grid
return all_recons# make_grid(all_recons, nrow=10, padding=30)
def save_content_transfers(model: BiVAE, *args, **kwargs):
"""
:param model: Trained BiVAE model
:param class_reps: a dictionary of string class_id <-> a single 3dim Tensor (C,H,W)
:param log_dir: Path to the model.logger's log_dir (Eg. '..../exp_name/version7')
:param train_mean: Original datamodule's training set's mean
:param train_std: Oiriginal datamodule's training set std
:param linearlize: (bool). If true, linearlize the output image to range [0,1] for better viz. contrast
:return:
"""
return evaluate_transfers(model, constant_code='c', *args, **kwargs)
def save_style_transfers(model: BiVAE, *args, **kwargs):
"""
:param model: Trained BiVAE model
:param class_reps: a dictionary of string class_id <-> a single 3dim Tensor (C,H,W)
:param log_dir: Path to the model.logger's log_dir (Eg. '..../exp_name/version7')
:param train_mean: Original datamodule's training set's mean
:param train_std: Oiriginal datamodule's training set std
:param linearlize: (bool). If true, linearlize the output image to range [0,1] for better viz. contrast
:return:
"""
return evaluate_transfers(model, constant_code='s', *args, **kwargs)
def run_both_transfers(model: BiVAE, *args, **kwargs):
"""
Run both content-transfer and style-transfer on the each pair of the content-representative tensor images
:param model: Trained BiVAE model
:param class_reps: a dictionary of string class_id <-> a single 3dim Tensor (C,H,W)
:param log_dir: Path to the model.logger's log_dir (Eg. '..../exp_name/version7')
:param train_mean: Original datamodule's training set's mean
:param train_std: Oiriginal datamodule's training set std
:param linearlize: (bool). If true, linearlize the output image to range [0,1] for better viz. contrast
:return:
"""
return (save_content_transfers(model, *args, **kwargs), save_style_transfers(model, *args, **kwargs))
def compute_avg_codes(
model: pl.LightningModule,
dm: pl.LightningDataModule,
batch_size: Optional[int] = None,
mode: str='train'
) -> Dict[str,torch.Tensor]:
"""Given a model and datamodule:
- Get a batch of input -> encode -> get a batch of mu_qc, mu_qs -> get samples of content and style codes
- Compute the dimension-wise mean/min/max of mu_qc, the mean/min/max of mu_qs,
mean of sampled content codes, mean of sampled style codes.
Returns
-------
the computed mean/min/max results in a dictionary.
:param model:
:param dm:
:param batch_size:
:param mode:
:return:
"""
n_styles = dm.n_styles
n_contents = dm.n_contents
ds = getattr(dm, f"{mode}_ds")
batch_size = batch_size or len(ds)
dl = DataLoader(ds, batch_size=batch_size, num_workers=16, pin_memory=True, shuffle=True)
with torch.no_grad():
batch = next(iter(dl))
# x = batch['img'] # todo: unpack a sample from dataset object using DS.unpack
# label_c = batch['digit']
# label_s = batch['color']
x, label_c, label_s = dm.unpack(batch)
dict_qparams = model.encode(x)
mu_qc = dict_qparams['mu_qc']
mu_qs = dict_qparams['mu_qs']
dict_z = model.rsample(dict_qparams)
c = dict_z['c']
s = dict_z['s']
z = model.combine_content_style(dict_z)
# mean norm of each code
norm_c = LA.norm(c, dim=-1)
norm_s = LA.norm(s, dim=-1)
print("Avg. norm of content content: ", norm_c.mean())
print("Avg. norm of style content: ", norm_s.mean())
content_avgs = {}
mu_qc_avgs = {}
mu_qc_mins = {}
mu_qc_maxs = {}
for content_id in range(n_contents):
this_mu_qc = mu_qc[label_c == content_id]
mu_qc_avgs[content_id] = this_mu_qc.mean(dim=0)
mu_qc_mins[content_id] = this_mu_qc.min(dim=0).values
mu_qc_maxs[content_id] = this_mu_qc.max(dim=0).values
# mean of samples
mean_c = c[label_c == content_id].mean(dim=0)
content_avgs[content_id] = mean_c
style_avgs = {}
mu_qs_avgs = {}
mu_qs_mins = {}
mu_qs_maxs = {}
for style_id in range(n_styles):
this_mu_qs = mu_qs[label_s == style_id]
mu_qs_avgs[style_id] = this_mu_qs.mean(dim=0)
mu_qs_mins[style_id] = mu_qs[label_s == style_id].min(dim=0).values
mu_qs_maxs[style_id] = mu_qs[label_s == style_id].max(dim=0).values
# mean of samples
mean_s = s[label_s == style_id].mean(dim=0)
style_avgs[style_id] = mean_s
return {"content_avgs":content_avgs,
"mu_qc_avgs": mu_qc_avgs,
"mu_qc_mins": mu_qc_mins,
"mu_qc_maxs": mu_qc_maxs,
"style_avgs": style_avgs,
"mu_qs_avgs": mu_qs_avgs,
"mu_qs_mins": mu_qs_mins,
"mu_qs_maxs": mu_qs_maxs}
def get_traversals(
vec: torch.Tensor,
dim_i: int,
min_value: float,
max_value: float,
n_samples: int
) -> torch.Tensor:
"""Given a single 1dim vector, get a batch of 1dim vectors by traversing
`dim_i`th dimension of `vec` linearly, from `min_value` to `max_value`
Returns
-------
traversals : torch.Tensor (1 more dims than `vec`)
a batch of 1dim vectors, generated by linearly traversing `dim_i`th
dimension of `vec`
"""
new_values = torch.linspace(min_value, max_value, n_samples)
traversals = torch.zeros((n_samples, len(vec)))
for i, new_value in enumerate(new_values):
new_vec = vec.clone()
new_vec[dim_i] = new_value
traversals[i] = new_vec
return traversals
def run_content_traversal(
model: pl.LightningModule,
content_code: torch.Tensor,
style_code: torch.Tensor,
traversal_start: Union[float, Iterable[float]],
traversal_end: Union[float, Iterable[float]],
n_traversals: int,
show: bool = True,
title: str = '',
to_save: bool = True,
out_path: Optional[Path] = None,
verbose: bool=False,
) -> torch.Tensor:
"""Given a fixed style code, traverse the content code each dimension $j$
independently, from `traversal_start[j]` to `traversal_end[j]` at
`n_traversals` steps.
Parameters
----------
model : pl.LightningModule
content_code : torch.Tensor; shape (dim_content, )
style_code : torch.Tensor; shape (dim_style,)
traversal_start : Iterable; length == dim_content
a vector of floats that indicate the starting point of the traversal
for each dimsion of the content code
traversal_end : Iterable; length == dim_content
a vector of floats that indicate the ending point of the traversal
for each dimsion of the content code
n_traversals : int
how many traversal steps per dimension
show : bool
True to show the result in a grid where $j$th col is the content dim $j$,
and the $i$th row shows the $i$th step in that direction.
title : str
Returns
-------
torch.Tensor; shape (`n_traversals`, dim_content, *dim_input_x)
a batch of reconstructions from each dimension's traversals.
Eg: output[n][j] contains a (C,H,W) image reconstructed by a $n$th
step at content_code's dim jth direction with the fixed style code.
"""
is_training = model.training
model.eval()
content_dim = content_code.shape[-1]
try:
traversal_start[0]
except TypeError:
traversal_start = torch.zeros(content_dim).fill_(traversal_start)
try:
traversal_end[0]
except TypeError:
traversal_end = torch.zeros(content_dim).fill_(traversal_end)
with torch.no_grad():
# Traverse for each dim
grids = [] # k,v = dim_i, batch of recons while traversing in dim_i direction (n_traversals, *dim_x)
for dim_i in range(content_dim):
min_dim_i = traversal_start[dim_i]
max_dim_i = traversal_end[dim_i]
if verbose:
print(min_dim_i, max_dim_i)
c_traversals = get_traversals(content_code, dim_i, min_dim_i, max_dim_i, n_traversals)
# Pass to the decoder
dict_z = {
"c": c_traversals,
"s": style_code.repeat((n_traversals, 1))
}
z = model.combine_content_style(dict_z)
recons = model.decode(z)
grid = torchvision.utils.make_grid(recons, nrow=1) # Caveat: nrow is num of colms!
grids.append(grid)
grids = torch.cat(grids, dim=2)
if show:
show_timg(grids, title=title)
if to_save:
out_path = out_path or Path(f'./content_traversal_{now2str()}.png')
torchvision.utils.save_image(grids, out_path)
model.train(is_training)
def run_style_traversal(
model: pl.LightningModule,
content_code: torch.Tensor,
style_code: torch.Tensor,
traversal_start: Union[float, Iterable[float]],
traversal_end: Union[float, Iterable[float]],
n_traversals: int,
show: bool = True,
title: str = '',
to_save: bool = True,
out_path: Optional[Path] = None,
verbose: bool = False,
) -> torch.Tensor:
"""Given a fixed content code, traverse the style code each dimension $j$
independently, from `traversal_start[j]` to `traversal_end[j]` at
`n_traversals` steps.
Parameters
----------
model : pl.LightningModule
content_code : torch.Tensor; shape (dim_content, )
style_code : torch.Tensor; shape (dim_style,)
traversal_start : Iterable; length == dim_style
a vector of floats that indicate the starting point of the traversal
for each dimsion of the style code
traversal_end : Iterable; length == dim_style
a vector of floats that indicate the ending point of the traversal
for each dimsion of the style code
n_traversals : int
how many traversal steps per dimension
show : bool
True to show the result in a grid where $j$th col is the style dim $j$,
and the $i$th row shows the $i$th step in that direction.
title : str
Returns
-------
torch.Tensor; shape (`n_traversals`, dim_style, *dim_input_x)
a batch of reconstructions from each dimension's traversals.
Eg: output[n][j] contains a (C,H,W) image reconstructed by a $n$th
step at style_code's dim jth direction with the fixed style code.
"""
is_training = model.training
model.eval()
style_dim = style_code.shape[-1]
try:
traversal_start[0]
except TypeError:
traversal_start = torch.zeros(style_dim).fill_(traversal_start)
try:
traversal_end[0]
except TypeError:
traversal_end = torch.zeros(style_dim).fill_(traversal_end)
with torch.no_grad():
# Traverse for each dim
grids = [] # k,v = dim_i, batch of recons while traversing in dim_i direction (n_traversals, *dim_x)
for dim_i in range(style_dim):
min_dim_i = traversal_start[dim_i]
max_dim_i = traversal_end[dim_i]
print(min_dim_i, max_dim_i)
traversals = get_traversals(style_code, dim_i, min_dim_i, max_dim_i, n_traversals)
# Pass to the decoder
dict_z = {
"c": content_code.repeat((n_traversals, 1)),
"s": traversals,
}
z = model.combine_content_style(dict_z)
recons = model.decode(z)
grid = torchvision.utils.make_grid(recons, nrow=1) # Caveat: nrow is num of colms!
grids.append(grid)
grids = torch.cat(grids, dim=2)
if show:
show_timg(grids, title=title)
if to_save:
out_path = out_path or Path(f'./style_traversal_{now2str()}.png')
torchvision.utils.save_image(grids, out_path)
model.train(is_training)
# return recons
| StarcoderdataPython |
33933 | <reponame>jmbjorndalen/pycsp_classic<gh_stars>0
#!/usr/bin/env python
# -*- coding: latin-1 -*-
from common import *
from pycsp import *
from pycsp.plugNplay import *
from pycsp.net import *
@process
def test1():
print("Test1")
waitForSignal()
c = getNamedChannel("foo1")
print("- Trying to write to channel")
print("-", c.write("I'm here"))
print("- Trying next write (should be poisoned)")
c.write("I'm here")
print("---poison failed !!!!!")
@process
def test2():
print("Test2")
waitForSignal()
c = getNamedChannel("foo2")
print("- Trying to write to channel")
c.write("I'm here")
time.sleep(2)
print("- poisoning channel method")
time.sleep(1)
poisonChannel(c.read)
@process
def test3():
print("Test3")
waitForSignal()
ca = getNamedChannel("foo3a")
cb = getNamedChannel("foo3b")
print("- Trying to write to channel")
ca.write("I'm here")
print("- Trying to use Alt on channel b")
alt = Alternative(cb.read)
ret = alt.select()
print("- returned from alt.select():", ret)
print("- reading :", ret())
print("- Done")
def waitForSignal():
"Waits until the other side has registered its channels"
global ctrl
ctrl.read()
ctrl = getNamedChannel("foo")
Sequence(test1())
Sequence(test2())
Sequence(test3())
ctrl.read()
print("all tests done")
time.sleep(1)
| StarcoderdataPython |
35395 | #!/usr/bin/env bash
trap 'ret=$?; printf "%s\n" "$ERR_MSG" >&2; exit "$ret"' ERR
for file in $(find $1 -name \*.lp.bz2) ; do
echo $file
outputname="../gr/subgraphs/$(basename $file).gr"
./lp2dgf.py -f $file > $outputname
if [ $? -ne 0 ]; then
echo 'ERROR stopping...'
exit 1
fi
done
| StarcoderdataPython |
1702810 | data=[6,5,3,1,8,7,2,4]
def merge_sort(array):
if len(array)>1:
#find the division point
mid=len(array)//2
left_array=array[:mid]
right_array=array[mid:]
print(left_array,right_array)
#use recursion to keep dividing
merge_sort(left_array)
merge_sort(right_array)
#initialize the comparison index
right_index=0
left_index=0
merge_index=0
print(left_array)
#start comparing
#case 1: right array compare with left array
while right_index<len(right_array) and left_index<len(left_array):
if right_array[right_index]<left_array[left_index]:
array[merge_index]=right_array[right_index]
right_index+=1
else:
array[merge_index]=left_array[left_index]
left_index+=1
merge_index+=1
#case 2: right array compare with itself
while right_index<len(right_array):
array[merge_index]=right_array[right_index]
right_index+=1
merge_index+=1
#case 3: left array compare with itself
while left_index<len(left_array):
array[merge_index]=left_array[left_index]
left_index+=1
merge_index+=1
return array
d=merge_sort(data) | StarcoderdataPython |
190041 | # -*- coding: utf-8 -*-
import os
bin_names = 'md5sum, sha512sum, comm, uniq, nl, b2sum, sum, wc, sha256sum, ptx, sha1sum, join, dir, shuf, tail, tsort, ls, sort, base64, base32'
bin_names = bin_names.split(', ')
total_opcodes = 0
num_valid_funcs = 0
for name in bin_names:
op_dir = './%s_ops_info/' % name
html_dir = './%s_html' % name
all_func_ops_files = os.listdir(op_dir)
for func_f in all_func_ops_files:
if not func_f.startswith('<'):
continue
print('file: ' + op_dir + func_f)
with open(op_dir + func_f, 'r') as f:
opcodes = f.read()
if len(opcodes) < 20:
continue
num_valid_funcs += 1
total_opcodes += len(opcodes.split())
print('total opcodes' + str(total_opcodes))
print('functions: ' + str(num_valid_funcs))
print('avg per function: ' + str(total_opcodes / num_valid_funcs))
| StarcoderdataPython |
4819663 | <filename>ppms/__init__.py
from astropy.io.ascii import basic, core
from astropy.table import Table, MaskedColumn
from astropy import units as u, constants as c
import numpy as np
import dateutil.parser as dparser
from scipy.ndimage import median_filter
class MaglabHeader(basic.CsvHeader):
comment = r'\s*;'
write_comment = ';'
start_line = 1
def get_cols(self, lines):
lines = self.process_lines(lines)
start_line = self.start_line
for i, line in enumerate(lines):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = [x.strip() for x in next(self.splitter([line]))]
self.units = next(self.splitter([next(lines)]))
self._set_cols_from_names()
for c, u in zip(self.cols, self.units):
setattr(c, 'unit', u)
class MaglabData(core.BaseData):
comment = r'\s*;'
write_comment = ';'
start_line = 5
class Maglab(basic.Csv):
"""Reads a Oxford Instruments Maglab data file."""
_format_name = 'maglab'
_io_registry_can_write = False
_description = 'Oxford Instruments Maglab data file reader'
header_class = MaglabHeader
data_class = MaglabData
def normalize(table):
data = []
for col in table.columns.values():
if isinstance(col, MaskedColumn) and col.mask.all():
# drop completely empty columns
continue
data.append(col)
return Table(data)
class PPMSHeader(basic.CsvHeader):
UNITS = {
# Heat Capacity Option units
'Seconds': 'second',
'seconds': 'second',
'Oersted': '0.0001 * T',
'Kelvin': 'K',
'µJ/K': 'uJ/K',
'µJ/K/K': 'uJ/K/K',
'Seconds': 'second',
# ACMS option units
'sec': 'second',
'emu': 'erg/gauss',
'Oe': '0.0001 * T',
'code': None,
}
comment = r'\s*;'
write_comment = ';'
def start_line(self, lines):
return list(lines).index('[Data]') + 1
def _set_cols_from_names(self):
names, units = [], []
for header in self.names:
if '(' in header:
h, u = [x.strip() for x in header.replace(')', '').split('(')]
else:
h, u = header.strip(), None
names.append(h)
if u in self.UNITS:
units.append(self.UNITS[u])
else:
units.append(u)
self.names = names
super(PPMSHeader, self)._set_cols_from_names()
for col, unit in zip(self.cols, units):
if unit:
col.unit = unit
class PPMSData(basic.CsvData):
comment = r'\s*;'
write_comment = ';'
def start_line(self, lines):
return list(lines).index('[Data]') + 2
class PPMSOutputter(core.TableOutputter):
def __call__(self, cols, meta):
cols = [c for c in cols if any(c.str_vals)]
return normalize(super(PPMSOutputter, self).__call__(cols, meta))
class PPMS(basic.Csv):
"""Reads a Quantum Design PPMS data file."""
_format_name = 'ppms'
_io_registry_can_write = False
_description = 'Quantum Design PPMS data file reader'
header_class = PPMSHeader
data_class = PPMSData
outputter_class = PPMSOutputter
fu = u.def_unit(['f.u.', 'formula unit'], u.dimensionless_unscaled)
def acms_legacy(path, volume=None, formula_units=None, mode='acdc'):
"""Reduce and preprocess acms dataset.
..note::
The colnames were changed. This function still uses the old
schema.
:param volume: The sample volume.
:param formula_units: The numer of formula units of the sample.
:param mode: Data modes, either 'acdc', 'ac' or 'dc'.
"""
if volume:
if not isinstance(volume, u.Quantity):
raise ValueError('Missing type of volume parameter.')
source = Table.read(path, format='ascii.ppms')
# Boolean mask, True for DC magnetisation measurements
dc_mask = source['Measure Type'] == 0
ac_mask = source['Measure Type'] == 5
if mode == 'acdc' and (np.sum(dc_mask) != np.sum(~ac_mask)):
raise ValueError('Nonequal number of ac ({}) and dc ({}) measurements'.format(np.sum(ac_mask), np.sum(dc_mask)) )
data = Table(masked=False)
if mode == 'ac':
data['B'] = source[ac_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[ac_mask]['Temperature']
else:
data['B'] = source[dc_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[dc_mask]['Temperature']
if (mode == 'ac') or (mode == 'acdc'):
data['B_ac'] = source[ac_mask]["Amplitude"].to(u.T)
data['m_ac'] = source[ac_mask]["M'"]
data["m'_ac"] = source[ac_mask]["M''"]
if volume:
H = data['B_ac'].quantity / c.mu0
M = data['m_ac'].quantity / volume
M_imag = data["m'_ac"].quantity / volume
data["χ"] = (M / H).si
data["χ'"] = (M_imag / H).si
if (mode == 'dc') or (mode == 'acdc'):
data['m'] = source[dc_mask]['M-DC']
if formula_units:
# calculate magnetic moment per formula unit
data['m_fu'] = data['m'].to(c.muB) / formula_units
if volume:
# calculate magnetisation.
data['M'] = (data['m'].quantity / volume).si
data.meta['temperature'] = np.round(data['T'].mean(), 1)
if volume:
data.meta['volume'] = volume
data.meta['z'] = source['Sample Center'].quantity[0].value
if mode == 'ac' or mode == 'acdc':
data.meta['frequency'] = source[ac_mask]['Frequency'][0]
data.meta['path'] = path
try:
# Try to extract date information from filepath
data.meta['date'] = dparser.parse(path,fuzzy=True)
except ValueError:
pass
return data
def acms(path, volume=None, formula_units=None, demag=None, mode='acdc', scan=None, masked=False):
"""Reduce and preprocess acms dataset.
:param volume: The sample volume.
:param formula_units: The numer of formula units of the sample.
:param demag: The demagnetizing factor. To calculate the demagnetizing correction,
the magnetisation M is needed. Therefore the volume is mandatory and it only
works with modes 'dc' or 'acdc'.
:param mode: Data modes, either 'acdc', 'ac' or 'dc'.
:param scan: The scan variable. if scan is 'B' then dM/dH can be calculated.
"""
if mode not in {'ac', 'dc', 'acdc'}:
raise ValueError("invalid mode. Must be one of 'ac', 'dc' or 'acdc'")
if volume:
if not isinstance(volume, u.Quantity):
raise ValueError('Missing type of volume parameter.')
if demag:
if volume is None:
raise ValueError(
'volume parameter is neccessary to calculate the'
'magnetisation used for demagnetizing correction.')
if mode == 'ac':
raise ValueError(
"Can't calculate demagnetizing correction with mode"
"'ac'. Magnetisation is neccessary.")
source = Table.read(path, format='ascii.ppms')
if masked:
data = Table()
dc_mask = ac_mask = np.ones(len(source), dtype=bool)
else:
data = Table(masked=False)
# Boolean mask, True for DC magnetisation measurements
dc_mask = source['Measure Type'] == 0
ac_mask = source['Measure Type'] == 5
if mode == 'acdc' and (np.sum(dc_mask) != np.sum(ac_mask)):
raise ValueError('Nonequal number of ac ({}) and dc ({}) measurements'.format(np.sum(ac_mask), np.sum(dc_mask)) )
if mode == 'ac':
data['B'] = source[ac_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[ac_mask]['Temperature']
else:
data['B'] = source[dc_mask]['Magnetic Field'].to(u.T).round(4)
data['T'] = source[dc_mask]['Temperature']
data['H'] = data['B'] / c.mu0
if (mode == 'ac') or (mode == 'acdc'):
data['t_ac'] = source[ac_mask]['Time Stamp']
data['f'] = source[ac_mask]['Frequency']
data['B_ac'] = source[ac_mask]["Amplitude"].to(u.T)
data["m'_ac"] = source[ac_mask]["M'"]
data["m''_ac"] = source[ac_mask]["M''"]
if volume:
H = data['B_ac'].quantity / c.mu0
M = data["m'_ac"].quantity / volume
M_imag = data["m''_ac"].quantity / volume
data["χ'"] = (M / H).si
data["χ''"] = (M_imag / H).si
# Handle higher harmonic susceptibilities
harmonics_real = [x for x in source[ac_mask].columns if x.startswith("M '[")]
for colname in harmonics_real:
i = int(colname[4:-1])
data["m'_ac[{}]".format(i)] = source[ac_mask][colname]
if volume:
M_i = data["m'_ac[{}]".format(i)].quantity / volume
data["χ'[{}]".format(i)] = (M_i / H).si
harmonics_imag = [x for x in source[ac_mask].columns if x.startswith("M ''[")]
for colname in harmonics_imag:
i = int(colname[5:-1])
data["m''_ac[{}]".format(i)] = source[ac_mask][colname]
if volume:
M_imag_i = data["m''_ac[{}]".format(i)].quantity / volume
data["χ''[{}]".format(i)] = (M_imag_i / H).si
if (mode == 'dc') or (mode == 'acdc'):
data['t_dc'] = source[dc_mask]['Time Stamp']
data['m'] = source[dc_mask]['M-DC']
if formula_units:
# calculate magnetic moment per formula unit
data['m_fu'] = data['m'].to(c.muB) / formula_units
if volume:
# calculate magnetisation.
data['M'] = M = (data['m'].quantity / volume).si
if scan == 'B':
data['dM/dH'] = np.gradient(M) / np.gradient(data['H'])
if demag:
demagnetizing_correction(data, demag=demag)
data.meta['temperature'] = np.round(data['T'].mean(), 1)
if volume:
data.meta['volume'] = volume
data.meta['z'] = source['Sample Center'].quantity[0].value
data.meta['path'] = path
try:
# Try to extract date information from filepath
data.meta['date'] = dparser.parse(path, fuzzy=True)
except ValueError:
pass
return data
def demagnetizing_correction(data, demag):
"""Calculates the demagnetizing correction.
The ac susceptibility is corrected according to [1]
[1]: <NAME>., <NAME>. & <NAME>. AC magnetic
susceptibility technique for the characterization of high
temperature superconductors. Egyptian Journal of Solids 23,
231–250 (2000).
"""
Hext, M = data['H'], data['M']
data['H_int'] = Hint = Hext - demag * M
data['B_int'] = c.mu0 * Hint
#scale = Hext / Hint
#idx = Hext == 0
#scale[idx] = median_filter(scale, size=5)[idx]
for name, col in data.columns.items():
if name == 'dM/dH':
data['dM/dH_int'] = np.gradient(M) / np.gradient(data['H_int'])
elif name == "χ'" or name.startswith("χ'["):
chi_r = col
chi_i = data[name.replace("'", "''")]
data[name + '_int'] = (chi_r - demag * (chi_r**2 + chi_i**2)) / (demag**2 * (chi_r**2 + chi_i**2) - 2 * demag * chi_r + 1)
elif name == "χ''" or name.startswith("χ''["):
chi_i = col
chi_r = data[name.replace("''", "'")]
data[name + '_int'] = chi_i / (demag**2 * (chi_r**2 + chi_i**2) - 2 * demag * chi_r + 1)
#data[name + '_int'] = col * scale
def magnetic_moment_in_fu(m, formula_units):
"""Converts the magnetic moment from si units to Bohr magneton per formula
units.
:param m: Magnetic moment.
:param formula_units: The number of formula units.
"""
return m.to(c.muB) / formula_units
def heatcapacity(path):
# The HC option sometimes creates comment lines without commas.
with open(path, 'r', encoding='cp1252', newline='') as f:
buffer = ''.join([l for l in f.readlines() if 'Error' not in l])
source = Table.read(buffer, format='ascii.ppms')
data = Table(masked=False)
data['B'] = source['Field'].to(u.T).round(4)
data['T'] = source['System Temp']
data['Tsample'] = source['Sample Temp']
data['Tpuck'] = source['Puck Temp']
data['C'] = source['Total HC']
data['Csample'] = source['Samp HC']
data['Caddenda'] = source['Addenda HC']
data['Coffset'] = source['Addenda Offset HC']
return data | StarcoderdataPython |
1677982 | import amass
from amass.commands import CommonArgs, Arg
class Command(amass.commands.DjangoCommand):
usage = CommonArgs("""
Delete resource from AMASS
""",
[
Arg("resource", "Name of resource to delete"),
], [])
def __init__(self):
amass.commands.Command.__init__(self)
self.file = __file__
def run(self, config, args):
arg_vals = self.parse_args(args)
[resource] = self.get_django_models(config, "Resource")
r = None
try:
r = resource.objects.get(name=arg_vals["resource"])
except:
amass.abort("Resource '%s' does not exist" % arg_vals["resource"])
try:
r.delete()
print "Resource '%s' sucessfully deleted" % arg_vals["resource"]
except Exception as e:
amass.abort("Problem deleting resource '%s': %s" % (arg_vals["resource"], str(e))) | StarcoderdataPython |
1645225 | <reponame>vinissimus/guillotina_s3storage
import os
import aiohttp
import pytest
from guillotina import task_vars
from guillotina import testing
def settings_configurator(settings):
if "applications" in settings:
settings["applications"].append("guillotina_s3storage")
else:
settings["applications"] = ["guillotina_s3storage"]
settings["load_utilities"]["s3"] = {
"provides": "guillotina_s3storage.interfaces.IS3BlobStore",
"factory": "guillotina_s3storage.storage.S3BlobStore",
"settings": {
"bucket": os.environ.get("S3CLOUD_BUCKET", "testbucket"),
"aws_client_id": os.environ.get("S3CLOUD_ID", "x" * 10),
"aws_client_secret": os.environ.get("S3CLOUD_SECRET", "x" * 10), # noqa
},
}
if "S3CLOUD_ID" not in os.environ:
settings["load_utilities"]["s3"]["settings"].update(
{
"endpoint_url": "http://localhost:19000",
"verify_ssl": False,
"ssl": False,
}
)
testing.configure_with(settings_configurator)
class PatchedBaseRequest(aiohttp.web_request.Request):
@property
def content(self):
return self._payload
def __enter__(self):
task_vars.request.set(self)
def __exit__(self, *args):
pass
async def __aenter__(self):
return self.__enter__()
async def __aexit__(self, *args):
return self.__exit__()
@pytest.fixture(scope="function")
def own_dummy_request(dummy_request, minio):
dummy_request.__class__ = PatchedBaseRequest
yield dummy_request
| StarcoderdataPython |
152629 | from django.core.management.base import CommandError
from django.db import models
from django.utils.translation import ugettext_lazy as _
class DjCrontabSchedule(models.Model):
minute = models.CharField(max_length=64, default="*")
hour = models.CharField(max_length=64, default="*")
day_of_week = models.CharField(max_length=64, default="*",)
day_of_month = models.CharField(max_length=64, default="*")
month_of_year = models.CharField(max_length=64, default="*")
class Meta:
db_table = "djcelery_crontabschedule"
class DjPeriodicTasks(models.Model):
ident = models.SmallIntegerField(default=1, primary_key=True, unique=True)
last_update = models.DateTimeField(null=False)
class Meta:
db_table = "djcelery_periodictasks"
class DjIntervalSchedule(models.Model):
every = models.IntegerField(_("every"), null=False)
period = models.CharField(_("period"), max_length=24)
class Meta:
db_table = "djcelery_intervalschedule"
verbose_name = _("interval")
verbose_name_plural = _("intervals")
ordering = ["period", "every"]
class DjPeriodicTask(models.Model):
name = models.CharField(_("name"), max_length=200, unique=True, help_text=_("Useful description"),)
task = models.CharField(_("task name"), max_length=200)
interval = models.ForeignKey(
DjIntervalSchedule, null=True, blank=True, verbose_name=_("interval"), on_delete=models.CASCADE,
)
crontab = models.ForeignKey(
DjCrontabSchedule,
null=True,
blank=True,
verbose_name=_("crontab"),
on_delete=models.CASCADE,
help_text=_("Use one of interval/crontab"),
)
args = models.TextField(_("Arguments"), blank=True, default="[]", help_text=_("JSON encoded positional arguments"),)
kwargs = models.TextField(
_("Keyword arguments"), blank=True, default="{}", help_text=_("JSON encoded keyword arguments"),
)
queue = models.CharField(
_("queue"), max_length=200, blank=True, null=True, default=None, help_text=_("Queue defined in CELERY_QUEUES"),
)
exchange = models.CharField(_("exchange"), max_length=200, blank=True, null=True, default=None,)
routing_key = models.CharField(_("routing key"), max_length=200, blank=True, null=True, default=None,)
expires = models.DateTimeField(_("expires"), blank=True, null=True,)
enabled = models.BooleanField(_("enabled"), default=True,)
last_run_at = models.DateTimeField(auto_now=False, auto_now_add=False, editable=False, blank=True, null=True,)
total_run_count = models.PositiveIntegerField(default=0, editable=False,)
date_changed = models.DateTimeField(auto_now=True)
description = models.TextField(_("description"), blank=True)
no_changes = False
class Meta:
db_table = "djcelery_periodictask"
verbose_name = _("periodic task")
verbose_name_plural = _("periodic tasks")
class DjWorkerState(models.Model):
hostname = models.CharField(_("hostname"), max_length=255, unique=True)
last_heartbeat = models.DateTimeField(_("last heartbeat"), null=True, db_index=True)
class Meta:
"""Model meta-data."""
verbose_name = _("worker")
verbose_name_plural = _("workers")
get_latest_by = "last_heartbeat"
ordering = ["-last_heartbeat"]
class DjTaskState(models.Model):
state = models.CharField(_("state"), max_length=64)
task_id = models.CharField(_("UUID"), max_length=36, unique=True)
name = models.CharField(_("name"), max_length=200, null=True, db_index=True,)
tstamp = models.DateTimeField(_("event received at"), db_index=True)
args = models.TextField(_("Arguments"), null=True)
kwargs = models.TextField(_("Keyword arguments"), null=True)
eta = models.DateTimeField(_("ETA"), null=True)
expires = models.DateTimeField(_("expires"), null=True)
result = models.TextField(_("result"), null=True)
traceback = models.TextField(_("traceback"), null=True)
runtime = models.FloatField(_("execution time"), null=True, help_text=_("in seconds if task succeeded"),)
retries = models.IntegerField(_("number of retries"), default=0)
worker = models.ForeignKey(DjWorkerState, null=True, verbose_name=_("worker"), on_delete=models.CASCADE,)
hidden = models.BooleanField(editable=False, default=False, db_index=True)
class Meta:
"""Model meta-data."""
verbose_name = _("task")
verbose_name_plural = _("tasks")
get_latest_by = "tstamp"
ordering = ["-tstamp"]
def execute(new_table, old_table, tz=None): # pylint: disable=invalid-name
for old_data in old_table.objects.all():
new_data = new_table()
if tz:
new_data.__setattr__("timezone", tz)
for field in old_data._meta.fields:
field_name = field.name
# 判断是否为外键
if field_name in ("crontab", "interval", "worker"):
try:
# 写入外键id
new_data.__setattr__(field_name + "_id", old_data.__getattribute__(field_name).id)
except AttributeError:
new_data.__setattr__(field_name + "_id", None)
else:
new_data.__setattr__(field_name, old_data.__getattribute__(field_name))
new_data.save()
| StarcoderdataPython |
29929 | <filename>examples/decrypt.py
#!/usr/bin/env python
# Copyright (c) 2020 Janky <<EMAIL>>
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Inspired by https://github.com/rnpgp/rnp/blob/master/src/examples/decrypt.c
from pyrop.bind import RopBind
from pyrop.error import RopError
message = "Dummy"
def example_pass_provider(session, app_ctx, key, pgp_context, buf_len):
if pgp_context == 'decrypt (symmetric)':
return True, 'encpassword'
if pgp_context == 'decrypt':
return True, 'password'
return False, None
def decrypt(rop, usekeys):
alt = rop.tagging()
try:
# initialize FFI object
ses = rop.create_session(rop.KEYSTORE_GPG, rop.KEYSTORE_GPG)
# check whether we want to use key or password for decryption
if usekeys:
try:
# load secret keyring, as it is required for public-key decryption. However, you may
# need to load public keyring as well to validate key's signatures.
keyfile = rop.create_input(path="secring.pgp")
# we may use secret=True and public=True as well
ses.load_keys(rop.KEYSTORE_GPG, keyfile, secret=True)
except RopError:
print("Failed to read secring")
raise
finally:
rop.drop(object_=keyfile)
# set the password provider
ses.set_pass_provider(example_pass_provider, None)
try:
# create file input and memory output objects for the encrypted message and decrypted
# message
input_ = rop.create_input(path="encrypted.asc")
output = rop.create_output(max_alloc=0)
ses.decrypt(input_, output)
# get the decrypted message from the output structure
buf = output.memory_get_str(False)
except RopError:
print("Public-key decryption failed")
raise
print("Decrypted message ({}):\n{}\n".format("with key" if usekeys else \
"with password", buf))
global message
message = buf
finally:
rop.drop(from_=alt)
def execute():
rop = RopBind()
try:
decrypt(rop, True)
decrypt(rop, False)
finally:
rop.close()
if __name__ == '__main__':
execute()
| StarcoderdataPython |
46718 | #-*-coding:utf8-*-
from __future__ import print_function # Python 2/3 compatibility
import boto3
import time
import json
import decimal
import datetime
import json
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class DetailInfoOnline(object):
def __init__(self, table_kind, region_name, endpoint_url, table_name):
self._dynamodb = boto3.resource(table_kind, region_name = region_name, endpoint_url = endpoint_url)
self._table = self._dynamodb.Table(table_name)
def query_item(self, docid):
try:
response = self._table.query(
KeyConditionExpression=Key('id').eq(docid)
)
if len(response) > 0:
return response['Items']
else:
return []
except Exception,e:
print (str(e))
return []
def scan_all_with_wait(self):
try:
pe = "id, info"
response = self._table.scan(
ProjectionExpression=pe,
)
result = []
while "Items" in response and 0 < len(response["Items"]):
if 0 != len(response["Items"]):
result += response["Items"]
if "LastEvaluatedKey" not in response:
break
lek = response["LastEvaluatedKey"]
response = self._table.scan(
ProjectionExpression=pe,
ExclusiveStartKey=lek
)
time.sleep(2)
print(len(result))
#break
return result
except Exception, e:
print (str(e))
return []
def scan_all(self):
try:
pe = "userId, anonymId"
response = self._table.scan(
ProjectionExpression=pe,
)
result = []
while "Items" in response and 0 < len(response["Items"]):
if 0 != len(response["Items"]):
print(response["Items"])
result += response["Items"]
if "LastEvaluatedKey" not in response:
break
lek = response["LastEvaluatedKey"]
print(lek)
response = self._table.scan(
ProjectionExpression=pe,
ExclusiveStartKey=lek
)
return result
except Exception, e:
print (str(e))
return []
def query_mul_item(self, docid_list):
try:
fe = Attr("id").is_in(docid_list)
response = self._table.scan(
FilterExpression=fe
)
result = []
while "Items" in response and 0 < len(response["Items"]):
if 0 != len(response["Items"]):
result += response["Items"]
if "LastEvaluatedKey" not in response:
break
lek = response['LastEvaluatedKey']
response = self._table.scan(
FilterExpression=fe,
ExclusiveStartKey=lek
)
return result
except Exception,e:
print(str(e))
return []
if __name__ =="__main__":
print("start...")
data_obj = DetailInfoOnline('dynamodb', 'ap-northeast-2', "https://dynamodb.ap-northeast-2.amazonaws.com", 'MXNewsUserDev')
res = data_obj.scan_all()
print(res[0])
print("end...") | StarcoderdataPython |
4833357 | <filename>src/strategy.py
class Strategy:
def __init__(self,config):
self.config=config
def pickAndPlace(outputFile):
"create all the necessary modules"
"use the picker as an interator to get a list of parts"
"for each board"
"current board = this board"
"pick a part"
"place a part"
def paste(outputFile):
"create all the necessary modules"
"paste.generate(outputFile)"
# General
# - Needs to be able to generate multiple files
# - Needs to be able to prompt for input
# Load the conf file
# Load the requested drivers
# Feed the input files into the drivers
#
# - Paster
# Runs a paster to put paste on the board
# - Loads board mask
# - Loads boards extends
# - Generates gcode
#
# - Board
# - Does it define one or more boards
# - Should be able to ask for the origin of a board
#
#
#
# - Placer
# Places part on board
# - Loads board mask
# - Loads part list
# - responsible for figuring out where to place the part
# - potentially validating the part was placed correct (vision)
#
#
# - Picker
# Picks the part off of the board
# - Provides a list of parts to place in order
# - Load parts list
# - Needs a model of how to pick parts
# - Must be able to request more part strips
# - responsible for figuring out the gcode to pick the part
# - potentially validating the part was picked up (vision)
#
pass
| StarcoderdataPython |
19251 | from .technews_helper import TechNews
from .mail_helper import EmailContentHelper
| StarcoderdataPython |
67971 | <reponame>DLeinHellios/GrudgeMatch<gh_stars>0
import os, json, sys, sqlite3
class Config:
def __init__(self):
'''Holds configuration options'''
self.path = os.path.join('data', 'config.json')
self.load()
def create_default(self):
'''Creates the default config file is config is missing or damaged'''
default = {"settings":{
"hide_sidebar": False,
}}
with open(os.path.join(self.path), 'w', encoding='utf-8') as cFile:
json.dump(default, cFile, ensure_ascii=False, indent=2)
return default
def load(self):
'''Reads data/config.json and builds dict as self.settings'''
try:
with open(self.path) as cFile:
c = json.load(cFile)
except:
# Create a blank player file
c = self.create_default()
self.settings = c['settings']
def save(self):
'''Saves config data to data/config.json'''
data = {'settings':self.settings}
with open(self.path, 'w', encoding='utf-8') as cFile:
json.dump(data, cFile, ensure_ascii=False, indent=2)
class Query:
def __init__(self,conn,cursor):
'''Contains methods to query database'''
self.db = conn
self.c = cursor
def all_player_names(self, isActive):
'''Returns a list of all player names'''
if isActive: # Only active players
self.c.execute('SELECT Name FROM Players WHERE IsActive=1;')
names = self.c.fetchall()
else: # All players
self.c.execute('SELECT Name FROM Players;')
names = self.c.fetchall()
for i in range(len(names)):
names[i] = names[i][0]
return names
def all_game_names(self, isActive):
'''Returns a list of all game names'''
if isActive: # Only active games
self.c.execute('SELECT Name FROM Games WHERE IsActive=1;')
names = self.c.fetchall()
else: # All games
self.c.execute('SELECT Name FROM Games;')
names = self.c.fetchall()
for i in range(len(names)):
names[i] = names[i][0]
return names
def all_player_ids(self):
'''Returns a dictionary of all players, where key=name, value=id'''
self.c.execute('SELECT Id, Name FROM Players;')
results = self.c.fetchall()
ids = {}
for r in results:
ids[r[1]] = r[0]
return ids
def all_game_ids(self):
'''Returns a dictionary of all games, where key=name, value=id'''
self.c.execute('SELECT Id, Name FROM Games;')
results = self.c.fetchall()
ids = {}
for r in results:
ids[r[1]] = r[0]
return ids
def all_player_status(self):
'''Returns a dict where key=name, value=IsActive for all players'''
self.c.execute('SELECT Name, IsActive FROM Players;')
results = self.c.fetchall()
# Format as dict
statuses = {}
for result in results:
statuses[result[0]] = result[1]
return statuses
def all_game_status(self):
'''Returns a dict where key=name, value=IsActive for all games'''
self.c.execute('SELECT Name, IsActive FROM Games;')
results = self.c.fetchall()
# Format as dict
statuses = {}
for result in results:
statuses[result[0]] = result[1]
return statuses
def all_player_details(self, isActive):
'''Returns a list of tuples (Id, Name, Wins, Matches, LastMatch) for each player'''
if isActive:
active = ('1',)
else:
active = ('%',)
self.c.execute('''
SELECT
Players.Id AS Id,
Players.Name AS Name,
(SELECT COUNT(*) FROM MatchRecords WHERE WinnerId = Players.Id) as WinCount,
(SELECT COUNT(*) FROM MatchRecords WHERE Player1Id = Players.Id OR Player2Id = Players.Id) as MatchCount,
(SELECT MAX(Date) FROM MatchRecords WHERE Player1Id = Players.Id OR Player2Id = Players.Id) as LastMatch
FROM Players
WHERE Players.IsActive = ?
GROUP BY Players.Id;''', active)
return self.c.fetchall()
def all_game_details(self, isActive):
'''Returns a list of tuples (name,nMatches,lastMatch) for each game'''
if isActive:
active = ('1',)
else:
active = ('%',)
self.c.execute('''
SELECT
Games.Id AS Id,
Games.Name AS Name,
(SELECT COUNT(*) FROM MatchRecords WHERE GameId = Games.Id) as MatchCount,
(SELECT MAX(Date) FROM MatchRecords WHERE GameId = Games.Id) as LastMatch
FROM Games
WHERE Games.IsActive = ?
GROUP BY Games.Id;''', active)
return self.c.fetchall()
def game_info(self, gameName):
'''Returns game developer, platform and release year for game specified by name'''
self.c.execute('''
SELECT Games.Developer, Games.Platform, Games.ReleaseYear
FROM Games
WHERE Games.Name = ?''', (gameName,))
return self.c.fetchall()[0]
def match_folders(self):
'''Returns a sorted list of all games found in match records (id, name)'''
self.c.execute('''
SELECT DISTINCT Records.GameId as GameId,
(SELECT Games.Name FROM Games WHERE Games.Id = Records.GameId) AS GameName
FROM MatchRecords AS Records, Games
WHERE Games.IsActive = 1
ORDER BY Games.Name;''')
return self.c.fetchall()
def match_records(self, p1, p2, game):
'''Accepts ids for p1, p2, and game, returns all matching results from records table, pass '%' for wildcard'''
args = (p1,p1,p2,p2,game)
self.c.execute('''
SELECT
Records.Id AS RecordId,
Records.Date AS Date,
(SELECT Players.Name FROM Players WHERE Players.Id = Records.Player1Id) AS Player1,
(SELECT Players.Name FROM Players WHERE Players.Id = Records.Player2Id) AS Player2,
(SELECT Players.Name FROM Players WHERE Players.Id = Records.WinnerId) AS Winner,
(SELECT Games.Name FROM Games WHERE Games.Id = Records.GameId) AS Game
FROM MatchRecords as Records
WHERE (Records.Player1Id LIKE ? OR Records.Player2ID LIKE ?)
AND (Records.Player1Id LIKE ? OR Player2Id LIKE ?)
AND Records.GameId LIKE ?
ORDER BY Game, Date;''', args)
return self.c.fetchall()
class Data:
def __init__(self):
'''Top-level data management object, holds data on players, games, tags, config, and records'''
self.init_dir()
self.path = os.path.join('data', 'records.db')
if os.path.isfile(self.path):
self.db = sqlite3.connect(self.path) # Connection
self.c = self.db.cursor() # Cursor
else:
self.init_db()
self.query = Query(self.db, self.c)
self.config = Config()
def init_dir(self):
'''Creates data dir if missing'''
try:
if not os.path.exists("data"):
os.makedirs("data")
except:
print('Unable to create data directory! Exiting...')
sys.exit()
def init_db(self):
'''Creates initial db tables'''
try:
self.db = sqlite3.connect(self.path) # Connection
self.c = self.db.cursor() # Cursor
with open(os.path.join("src", "sql", "create_db_tables.sql")) as script:
cmd = script.read()
self.c.executescript(cmd)
self.db.commit()
except:
print("Unable to initialize database! Exiting...")
sys.exit()
def new_player(self, name):
'''Creates a new player record with the provided name'''
try:
self.c.execute('INSERT INTO "Players" ("Name") VALUES (?)', (name,))
self.db.commit()
except:
print("Unable to add player! Exiting...")
sys.exit()
def new_game(self, name, developer, platform, release):
'''Creates a new game record with the provided name'''
try:
self.c.execute('INSERT INTO "Games" ("Name","Developer","Platform","ReleaseYear") VALUES (?,?,?,?)', (name, developer, platform, release))
self.db.commit()
except:
print("Unable to add game! Exiting...")
sys.exit()
def activate_player(self, name):
'''Flips players.is_active to True for provided player'''
try:
self.c.execute('UPDATE Players SET IsActive=1 WHERE Name=?', (name,))
self.db.commit()
except:
print("Unable to activate player! Exiting...")
sys.exit()
def activate_game(self, name):
'''Flips games.is_active to True for provided player'''
try:
self.c.execute('UPDATE Games SET IsActive=1 WHERE Name=?', (name,))
self.db.commit()
except:
print("Unable to activate game! Exiting...")
sys.exit()
def deactivate_player(self, name):
'''Flips players.is_active to False for provided player'''
try:
self.c.execute('UPDATE Players SET IsActive=0 WHERE Name=?', (name,))
self.db.commit()
except:
print("Unable to deactivate player! Exiting...")
sys.exit()
def deactivate_game(self, name):
'''Flips games.is_active to False for provided player'''
try:
self.c.execute('UPDATE Games SET IsActive=0 WHERE Name=?', (name,))
self.db.commit()
except:
print("Unable to deactivate game! Exiting...")
sys.exit()
def update_game_info(self, name, values):
'''Updates single game info for specified name'''
self.c.execute('''
UPDATE Games
SET Developer = ?, Platform = ?, ReleaseYear = ?
WHERE Name = ?''', tuple(values + [name]))
self.db.commit()
def validate_player_name(self, name):
'''Returns 0 for valid names, 1+ for error codes'''
reserved = ['0','1','2','3','4','5','6','7','8','9','default','player','name','game','data']
illegal = [',', '\\', '.', "/", "`", "~"]
err = 0
if name in self.query.all_player_names(True): # Name in use
err = 1
elif name in self.query.all_player_names(False): # Name in-use, but inactive
err = 2
elif name.lower() in reserved: # Name is on reserved list
err = 3
elif len(name) > 10: # Name is too long
err = 4
elif not err: # Name uses illegal character
for i in illegal:
if i in name:
err = 5
return err
def validate_game_name(self, name):
'''Returns 0 for valid names, 1+ for error codes'''
reserved = ['0','1','2','3','4','5','6','7','8','9','default','player','name','game','date']
illegal = [',', '\\', '.', "/", "`", "~"]
err = 0
if name in self.query.all_game_names(True): # Name in-use
err = 1
elif name in self.query.all_game_names(False): # Name in-use, but inactive
err = 2
elif name.lower() in reserved: # Name is on reserved list
err = 3
elif len(name) > 30: # Name is too long
err =5
elif not err:
for i in illegal:
if i in name: # Name uses an illegal character
err = 4
return err
def convert_match(self, match):
'''Accepts match dict containing names, returns dict of ids'''
playerIDs = self.query.all_player_ids()
gameIDs = self.query.all_game_ids()
converted = {}
converted['game'] = gameIDs[match['game']]
converted['p1'] = playerIDs[match['p1']]
converted['p2'] = playerIDs[match['p2']]
converted['win'] = playerIDs[match['win']]
converted['date'] = match['date']
return converted
def record_match(self, match):
'''Records a single match to the Records table'''
record = self.convert_match(match)
args = ()
self.c.execute('''
INSERT INTO "MatchRecords" (GameId, Player1Id, Player2Id, WinnerId, Date)
VALUES (?,?,?,?,?)''', (record['game'], record['p1'], record['p2'], record['win'], record['date']))
self.db.commit()
| StarcoderdataPython |
193993 | <reponame>Alfon-sec/client-python
# coding: utf-8
import json
from dateutil.parser import parse
class Report:
def __init__(self, opencti):
self.opencti = opencti
self.properties = """
id
standard_id
entity_type
parent_types
spec_version
created_at
updated_at
createdBy {
... on Identity {
id
standard_id
entity_type
parent_types
spec_version
name
description
roles
contact_information
x_opencti_aliases
created
modified
objectLabel {
edges {
node {
id
value
color
}
}
}
}
... on Organization {
x_opencti_organization_type
x_opencti_reliability
}
... on Individual {
x_opencti_firstname
x_opencti_lastname
}
}
objectMarking {
edges {
node {
id
standard_id
entity_type
definition_type
definition
created
modified
x_opencti_order
x_opencti_color
}
}
}
objectLabel {
edges {
node {
id
value
color
}
}
}
externalReferences {
edges {
node {
id
standard_id
entity_type
source_name
description
url
hash
external_id
created
modified
}
}
}
revoked
confidence
created
modified
name
description
report_types
published
x_opencti_graph_data
x_opencti_report_status
objects {
edges {
node {
... on BasicObject {
id
entity_type
parent_types
}
... on BasicRelationship {
id
entity_type
parent_types
}
... on StixObject {
standard_id
spec_version
created_at
updated_at
}
... on AttackPattern {
name
}
... on Campaign {
name
}
... on CourseOfAction {
name
}
... on Individual {
name
}
... on Organization {
name
}
... on Sector {
name
}
... on Indicator {
name
}
... on Infrastructure {
name
}
... on IntrusionSet {
name
}
... on Position {
name
}
... on City {
name
}
... on Country {
name
}
... on Region {
name
}
... on Malware {
name
}
... on ThreatActor {
name
}
... on Tool {
name
}
... on Vulnerability {
name
}
... on XOpenCTIIncident {
name
}
... on StixCoreRelationship {
standard_id
spec_version
created_at
updated_at
relationship_type
}
}
}
}
"""
"""
List Report objects
:param filters: the filters to apply
:param search: the search keyword
:param first: return the first n rows from the after ID (or the beginning if not set)
:param after: ID of the first row for pagination
:return List of Report objects
"""
def list(self, **kwargs):
filters = kwargs.get("filters", None)
search = kwargs.get("search", None)
first = kwargs.get("first", 500)
after = kwargs.get("after", None)
order_by = kwargs.get("orderBy", None)
order_mode = kwargs.get("orderMode", None)
custom_attributes = kwargs.get("customAttributes", None)
get_all = kwargs.get("getAll", False)
with_pagination = kwargs.get("withPagination", False)
if get_all:
first = 500
self.opencti.log(
"info", "Listing Reports with filters " + json.dumps(filters) + "."
)
query = (
"""
query Reports($filters: [ReportsFiltering], $search: String, $first: Int, $after: ID, $orderBy: ReportsOrdering, $orderMode: OrderingMode) {
reports(filters: $filters, search: $search, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {
edges {
node {
"""
+ (custom_attributes if custom_attributes is not None else self.properties)
+ """
}
}
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
globalCount
}
}
}
"""
)
result = self.opencti.query(
query,
{
"filters": filters,
"search": search,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
return self.opencti.process_multiple(result["data"]["reports"], with_pagination)
"""
Read a Report object
:param id: the id of the Report
:param filters: the filters to apply if no id provided
:return Report object
"""
def read(self, **kwargs):
id = kwargs.get("id", None)
filters = kwargs.get("filters", None)
custom_attributes = kwargs.get("customAttributes", None)
if id is not None:
self.opencti.log("info", "Reading Report {" + id + "}.")
query = (
"""
query Report($id: String!) {
report(id: $id) {
"""
+ (
custom_attributes
if custom_attributes is not None
else self.properties
)
+ """
}
}
"""
)
result = self.opencti.query(query, {"id": id})
return self.opencti.process_multiple_fields(result["data"]["report"])
elif filters is not None:
result = self.list(filters=filters)
if len(result) > 0:
return result[0]
else:
return None
"""
Read a Report object by stix_id or name
:param type: the Stix-Domain-Entity type
:param stix_id: the STIX ID of the Stix-Domain-Entity
:param name: the name of the Stix-Domain-Entity
:return Stix-Domain-Entity object
"""
def get_by_stix_id_or_name(self, **kwargs):
stix_id = kwargs.get("stix_id", None)
name = kwargs.get("name", None)
published = kwargs.get("published", None)
custom_attributes = kwargs.get("customAttributes", None)
object_result = None
if stix_id is not None:
object_result = self.read(id=stix_id, customAttributes=custom_attributes)
if object_result is None and name is not None and published is not None:
published_final = parse(published).strftime("%Y-%m-%d")
object_result = self.read(
filters=[
{"key": "name", "values": [name]},
{"key": "published_day", "values": [published_final]},
],
customAttributes=custom_attributes,
)
return object_result
"""
Check if a report already contains a thing (Stix Object or Stix Relationship)
:return Boolean
"""
def contains_stix_object_or_stix_relationship(self, **kwargs):
id = kwargs.get("id", None)
stix_object_or_stix_relationship_id = kwargs.get(
"stixObjectOrStixRelationshipId", None
)
if id is not None and stix_object_or_stix_relationship_id is not None:
self.opencti.log(
"info",
"Checking StixObjectOrStixRelationship {"
+ stix_object_or_stix_relationship_id
+ "} in Report {"
+ id
+ "}",
)
query = """
query ReportContainsStixObjectOrStixRelationship($id: String!, $stixObjectOrStixRelationshipId: String!) {
reportContainsStixObjectOrStixRelationship(id: $id, stixObjectOrStixRelationshipId: $stixObjectOrStixRelationshipId)
}
"""
result = self.opencti.query(
query,
{
"id": id,
"stixObjectOrStixRelationshipId": stix_object_or_stix_relationship_id,
},
)
return result["data"]["reportContainsStixObjectOrStixRelationship"]
else:
self.opencti.log(
"error",
"[opencti_report] Missing parameters: id or entity_id",
)
"""
Create a Report object
:param name: the name of the Report
:return Report object
"""
def create(self, **kwargs):
stix_id = kwargs.get("stix_id", None)
created_by = kwargs.get("createdBy", None)
object_marking = kwargs.get("objectMarking", None)
object_label = kwargs.get("objectLabel", None)
external_references = kwargs.get("externalReferences", None)
revoked = kwargs.get("revoked", None)
confidence = kwargs.get("confidence", None)
lang = kwargs.get("lang", None)
created = kwargs.get("created", None)
modified = kwargs.get("modified", None)
name = kwargs.get("name", None)
description = kwargs.get("description", "")
report_types = kwargs.get("report_types", None)
published = kwargs.get("published", None)
x_opencti_graph_data = kwargs.get("x_opencti_graph_data", None)
x_opencti_report_status = kwargs.get("x_opencti_report_status", None)
update = kwargs.get("update", False)
if name is not None and description is not None and published is not None:
self.opencti.log("info", "Creating Report {" + name + "}.")
query = """
mutation ReportAdd($input: ReportAddInput) {
reportAdd(input: $input) {
id
standard_id
entity_type
parent_types
}
}
"""
result = self.opencti.query(
query,
{
"input": {
"stix_id": stix_id,
"createdBy": created_by,
"objectMarking": object_marking,
"objectLabel": object_label,
"externalReferences": external_references,
"revoked": revoked,
"confidence": confidence,
"lang": lang,
"created": created,
"modified": modified,
"name": name,
"description": description,
"report_types": report_types,
"published": published,
"x_opencti_graph_data": x_opencti_graph_data,
"x_opencti_report_status": x_opencti_report_status,
"update": update,
}
},
)
return self.opencti.process_multiple_fields(result["data"]["reportAdd"])
else:
self.opencti.log(
"error",
"[opencti_report] Missing parameters: name and description and published and report_class",
)
"""
Add a Stix-Entity object to Report object (object_refs)
:param id: the id of the Report
:param entity_id: the id of the Stix-Entity
:return Boolean
"""
def add_stix_object_or_stix_relationship(self, **kwargs):
id = kwargs.get("id", None)
stix_object_or_stix_relationship_id = kwargs.get(
"stixObjectOrStixRelationshipId", None
)
if id is not None and stix_object_or_stix_relationship_id is not None:
self.opencti.log(
"info",
"Adding StixObjectOrStixRelationship {"
+ stix_object_or_stix_relationship_id
+ "} to Report {"
+ id
+ "}",
)
query = """
mutation ReportEditRelationAdd($id: ID!, $input: StixMetaRelationshipAddInput) {
reportEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"input": {
"toId": stix_object_or_stix_relationship_id,
"relationship_type": "object",
},
},
)
return True
else:
self.opencti.log(
"error", "[opencti_report] Missing parameters: id and entity_id"
)
return False
"""
Remove a Stix-Entity object to Report object (object_refs)
:param id: the id of the Report
:param entity_id: the id of the Stix-Entity
:return Boolean
"""
def remove_stix_object_or_stix_relationship(self, **kwargs):
id = kwargs.get("id", None)
stix_object_or_stix_relationship_id = kwargs.get(
"stixObjectOrStixRelationshipId", None
)
if id is not None and stix_object_or_stix_relationship_id is not None:
self.opencti.log(
"info",
"Removing StixObjectOrStixRelationship {"
+ stix_object_or_stix_relationship_id
+ "} to Report {"
+ id
+ "}",
)
query = """
mutation ReportEditRelationDelete($id: ID!, $toId: String!, $relationship_type: String!) {
reportEdit(id: $id) {
relationDelete(toId: $toId, relationship_type: $relationship_type) {
id
}
}
}
"""
self.opencti.query(
query,
{
"id": id,
"toId": stix_object_or_stix_relationship_id,
"relationship_type": "object",
},
)
return True
else:
self.opencti.log(
"error", "[opencti_report] Missing parameters: id and entity_id"
)
return False
"""
Import a Report object from a STIX2 object
:param stixObject: the Stix-Object Report
:return Report object
"""
def import_from_stix2(self, **kwargs):
stix_object = kwargs.get("stixObject", None)
extras = kwargs.get("extras", {})
update = kwargs.get("update", False)
if stix_object is not None:
# TODO: Compatibility with OpenCTI 3.X to be REMOVED
if "report_types" not in stix_object:
stix_object["report_types"] = (
[stix_object["x_opencti_report_class"]]
if "x_opencti_report_class" in stix_object
else None
)
if "confidence" not in stix_object:
stix_object["confidence"] = (
stix_object["x_opencti_source_confidence_level"]
if "x_opencti_source_confidence_level" in stix_object
else 0
)
if "x_opencti_report_status" not in stix_object:
stix_object["x_opencti_report_status"] = (
stix_object["x_opencti_object_status"]
if "x_opencti_object_status" in stix_object
else 0
)
return self.create(
stix_id=stix_object["id"],
createdBy=extras["created_by_id"]
if "created_by_id" in extras
else None,
objectMarking=extras["object_marking_ids"]
if "object_marking_ids" in extras
else None,
objectLabel=extras["object_label_ids"]
if "object_label_ids" in extras
else [],
externalReferences=extras["external_references_ids"]
if "external_references_ids" in extras
else [],
revoked=stix_object["revoked"] if "revoked" in stix_object else None,
confidence=stix_object["confidence"]
if "confidence" in stix_object
else None,
lang=stix_object["lang"] if "lang" in stix_object else None,
created=stix_object["created"] if "created" in stix_object else None,
modified=stix_object["modified"] if "modified" in stix_object else None,
name=stix_object["name"],
description=self.opencti.stix2.convert_markdown(
stix_object["description"]
)
if "description" in stix_object
else "",
report_types=stix_object["report_types"]
if "report_types" in stix_object
else None,
published=stix_object["published"],
x_opencti_graph_data=stix_object["x_opencti_graph_data"]
if "x_opencti_graph_data" in stix_object
else None,
x_opencti_report_status=stix_object["x_opencti_report_status"]
if "x_opencti_report_status" in stix_object
else None,
update=update,
)
else:
self.opencti.log("error", "[opencti_report] Missing parameters: stixObject")
| StarcoderdataPython |
86561 | <reponame>chetat/market-research
from .. import db
class TrackingScript(db.Model):
__tablename__ = "tracking_script"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
script = db.Column(db.String(150), nullable=False)
| StarcoderdataPython |
1768562 | <reponame>bntumb/Neural-Networks-Module-CE889
import pygame
'''
Class created using code from https://www.pygame.org/wiki/IntersectingLineDetection
Mathematic explanation: https://www.mathopenref.com/coordintersection.html
'''
class CollisionUtility:
@staticmethod
def check_lander_collision_with_surface(lander, surface):
lander_bottom_line = [lander.rect.bottomleft, lander.rect.bottomright]
lander_top_line = [lander.rect.topleft, lander.rect.topright]
lander_left_line = [lander.rect.topleft, lander.rect.bottomleft]
lander_right_line = [lander.rect.topright, lander.rect.bottomright]
surface_points = CollisionUtility.surface_points_below_lander(lander, surface)
intersection_point_found = False
for i in range(len(surface_points)-1):
top_intersect_point = CollisionUtility.calculateIntersectPoint(lander_top_line[0], lander_top_line[1], surface_points[i], surface_points[i+1])
bottom_intersect_point = CollisionUtility.calculateIntersectPoint(lander_bottom_line[0], lander_bottom_line[1], surface_points[i], surface_points[i+1])
left_intersect_point = CollisionUtility.calculateIntersectPoint(lander_left_line[0], lander_left_line[1], surface_points[i], surface_points[i+1])
right_intersect_point = CollisionUtility.calculateIntersectPoint(lander_right_line[0], lander_right_line[1], surface_points[i], surface_points[i+1])
if (bottom_intersect_point != None or top_intersect_point != None or left_intersect_point != None or right_intersect_point != None):
intersection_point_found = True
if (not intersection_point_found):
lowest_lander_point = max(lander_bottom_line[0][1], lander_bottom_line[1][1], lander_top_line[0][1], lander_top_line[1][1])
lowest_surface_point = 0
for p in surface_points:
lowest_surface_point = max(lowest_surface_point, p[1])
intersection_point_found = (lowest_surface_point < lowest_lander_point)
return intersection_point_found
# Calc the gradient 'm' of a line between p1 and p2
@staticmethod
def calculateGradient(p1, p2):
# Ensure that the line is not vertical
if (p1[0] != p2[0]):
m = (p1[1] - p2[1]) / (p1[0] - p2[0])
return m
else:
return None
# Calc the point 'b' where line crosses the Y axis
@staticmethod
def calculateYAxisIntersect(p, m):
return p[1] - (m * p[0])
# Calc the point where two infinitely long lines (p1 to p2 and p3 to p4) intersect.
# Handle parallel lines and vertical lines (the later has infinate 'm').
# Returns a point tuple of points like this ((x,y),...) or None
# In non parallel cases the tuple will contain just one point.
# For parallel lines that lay on top of one another the tuple will contain
# all four points of the two lines
@staticmethod
def getIntersectPoint(p1, p2, p3, p4):
m1 = CollisionUtility.calculateGradient(p1, p2)
m2 = CollisionUtility.calculateGradient(p3, p4)
# See if the the lines are parallel
if (m1 != m2):
# Not parallel
# See if either line is vertical
if (m1 is not None and m2 is not None):
# Neither line vertical
b1 = CollisionUtility.calculateYAxisIntersect(p1, m1)
b2 = CollisionUtility.calculateYAxisIntersect(p3, m2)
x = (b2 - b1) / (m1 - m2)
y = (m1 * x) + b1
else:
# Line 1 is vertical so use line 2's values
if (m1 is None):
b2 = CollisionUtility.calculateYAxisIntersect(p3, m2)
x = p1[0]
y = (m2 * x) + b2
# Line 2 is vertical so use line 1's values
elif (m2 is None):
b1 = CollisionUtility.calculateYAxisIntersect(p1, m1)
x = p3[0]
y = (m1 * x) + b1
else:
assert False
return ((x,y),)
else:
# Parallel lines with same 'b' value must be the same line so they intersect
# everywhere in this case we return the start and end points of both lines
# the calculateIntersectPoint method will sort out which of these points
# lays on both line segments
b1, b2 = None, None # vertical lines have no b value
if m1 is not None:
b1 = CollisionUtility.calculateYAxisIntersect(p1, m1)
if m2 is not None:
b2 = CollisionUtility.calculateYAxisIntersect(p3, m2)
# If these parallel lines lay on one another
if b1 == b2:
return p1,p2,p3,p4
else:
return None
# For line segments (ie not infinitely long lines) the intersect point
# may not lay on both lines.
#
# If the point where two lines intersect is inside both line's bounding
# rectangles then the lines intersect. Returns intersect point if the line
# intesect o None if not
@staticmethod
def calculateIntersectPoint(p1, p2, p3, p4):
p = CollisionUtility.getIntersectPoint(p1, p2, p3, p4)
if p is not None:
width = p2[0] - p1[0]
height = p2[1] - p1[1]
r1 = pygame.Rect(p1, (width , height))
r1.normalize()
width = p4[0] - p3[0]
height = p4[1] - p3[1]
r2 = pygame.Rect(p3, (width, height))
r2.normalize()
# Ensure both rects have a width and height of at least 'tolerance' else the
# collidepoint check of the Rect class will fail as it doesn't include the bottom
# and right hand side 'pixels' of the rectangle
tolerance = 1
if r1.width < tolerance:
r1.width = tolerance
if r1.height < tolerance:
r1.height = tolerance
if r2.width < tolerance:
r2.width = tolerance
if r2.height < tolerance:
r2.height = tolerance
for point in p:
try:
res1 = r1.collidepoint(point)
res2 = r2.collidepoint(point)
if res1 and res2:
point = [int(pp) for pp in point]
return point
except:
# sometimes the value in a point are too large for PyGame's Rect class
str = "point was invalid ", point
print(str)
# This is the case where the infinately long lines crossed but
# the line segments didn't
return None
else:
return None
@staticmethod
def surface_points_below_lander(lander, surface):
lander_leftmost_point = lander.rect.bottomleft[0]
lander_rightmost_point = lander.rect.bottomright[0]
points_below_lander = []
leftmost_point_found = False
rightmost_point_found = False
for i in range(len(surface.polygon_points)-1):
if (not leftmost_point_found):
p = surface.polygon_points[i]
p1 = surface.polygon_points[i+1]
if (p[0] <= lander_leftmost_point and p1[0] > lander_leftmost_point):
points_below_lander.append(p)
leftmost_point_found = True
elif (not rightmost_point_found):
p = surface.polygon_points[i]
if (p[0] < lander_rightmost_point):
points_below_lander.append(p)
else:
points_below_lander.append(p)
rightmost_point_found = True
return points_below_lander
@staticmethod
def check_gameobject_window_collision(gameobject, screen_dimensions):
gameobject_leftmost_point = gameobject.rect.topleft[0]
gameobject_rightmost_point = gameobject.rect.topright[0]
gameobject_bottommost_point = gameobject.rect.bottomleft[1]
# Check left side of the window
if (gameobject_rightmost_point < 0):
return True
# Check right side of the window
elif (gameobject_leftmost_point > screen_dimensions[0]):
return True
# Check top side of the window
# there is no need to check bottom side since there will be collision with surface
elif (gameobject_bottommost_point < 0):
return True
else:
return False | StarcoderdataPython |
1692912 | # Copyright (C) 2013-2014 SignalFuse, Inc.
# Copyright (C) 2015 SignalFx, Inc.
#
# Docker container orchestration utility.
from __future__ import print_function
import collections
import json
import time
from docker import auth
import os
try:
import urlparse
except ImportError:
# Try for Python3
from urllib import parse as urlparse
from .. import audit
from .. import exceptions
from ..termoutput import green, blue, red, time_ago
CONTAINER_STATUS_FMT = '{:<25s} '
TASK_RESULT_FMT = '{:<10s}'
class Task:
"""Base class for tasks acting on containers."""
def __init__(self, action, o, container):
"""Initialize the base task parameters.
Args:
o (termoutput.OutputFormatter): the output formatter used for task
output.
container (entities.Container): the container the task operates on.
"""
self.action = action
self.o = o
self.container = container
def _wait_for_status(self, cond, retries=10):
"""Wait for the container's status to comply to the given condition."""
while retries >= 0:
status = self.container.status(refresh=True)
if cond(status):
return True
retries -= 1
if retries >= 0:
time.sleep(0.5)
return False
def _check_for_state(self, state, cond):
"""Wait for the container to reach the given lifecycle state by executing
the corresponding, configured lifecycle checks, taking into account the
container state (through _wait_for_status) while the checks wait for
the target status to be reached.
Args:
state (string): the target lifecycle state.
cond (lambda): a lambda function that takes in the container's
status (from inspection) and returns True if it conforms to the
target desired lifecycle state.
"""
checks = self.container.start_lifecycle_checks(state)
if not checks:
return self._wait_for_status(cond)
# Wait for all checks to complete
while not checks.ready():
checks.wait(1)
if not self._wait_for_status(cond, retries=1):
return False
# Check results
for check in checks.get():
if not check:
return False
return True
def run(self, auditor=None):
if auditor:
auditor.action(action=self.action, level=audit.DEBUG,
what=self.container)
try:
self._run()
if auditor:
auditor.success(action=self.action, level=audit.DEBUG,
what=self.container)
except Exception as e:
if auditor:
auditor.error(action=self.action, what=self.container,
message=e)
exceptions.raise_with_tb()
def _run(self):
raise NotImplementedError
class StatusTask(Task):
"""Check for and display a container's status."""
def __init__(self, o, container):
Task.__init__(self, 'status', o, container)
def _run(self):
self.o.reset()
self.o.pending('checking...')
try:
if self.container.is_running():
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
self.o.commit(green('running{}'.format(
time_ago(self.container.started_at))))
else:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(red('down{}'.format(
time_ago(self.container.finished_at))))
except Exception:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(red(TASK_RESULT_FMT.format('host down')))
return
class StartTask(Task):
"""Start a container, refreshing the image if requested.
If reuse is True, the container will not be removed and re-created
if it exists."""
def __init__(self, o, container, registries={}, refresh=False,
reuse=False):
Task.__init__(self, 'start', o, container)
self._registries = registries
self._refresh = refresh
self._reuse = reuse
def _run(self):
self.o.reset()
error = None
try:
# TODO: None is used to indicate that no action was performed
# because the container and its application were already
# running. This makes the following code not very nice and this
# could be improved.
result = self._create_and_start_container()
if result is None:
self.o.commit(blue('up{}'.format(
time_ago(self.container.started_at))))
elif result:
self.o.commit(green('started'))
else:
self.o.commit(red('service did not start!'))
if result is False:
error = [
('Halting start sequence because {} failed to start!'
.format(self.container)),
self.container.ship.backend.logs(self.container.id)]
raise exceptions.ContainerOrchestrationException(
self.container, '\n'.join(error).strip())
except Exception:
self.o.commit(red('failed to start container!'))
raise
def _create_and_start_container(self):
"""Start the container.
If the container and its application are already running, no action is
performed and the function returns None to indicate that. Otherwise, a
new container must be created and started. To achieve this, any
existing container of the same name is first removed. Then, if
necessary or if requested, the container image is pulled from its
registry. Finally, the container is created and started, configured as
necessary. We then wait for the application to start and return True or
False depending on whether the start was successful."""
self.o.pending('checking service...')
if self.container.is_running():
self.o.commit(blue(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
# We use None as a special marker showing the container and the
# application were already running.
return None
# Otherwise we need to start it.
if (not self._reuse) or (not self.container.status()):
CleanTask(self.o, self.container, standalone=False).run()
# Check if the image is available, or if we need to pull it down.
image = self.container.get_image_details()
if self._refresh or \
not filter(
lambda i: self.container.image in (i['RepoTags'] or []),
self.container.ship.backend.images(image['repository'])):
PullTask(self.o, self.container, self._registries,
standalone=False).run()
# Create and start the container.
ports = self.container.ports \
and list(map(lambda p: tuple(p['exposed'].split('/')),
self.container.ports.values())) \
or None
self.o.pending('creating container from {}...'.format(
self.container.short_image))
self.container.ship.backend.create_container(
image=self.container.image,
name=self.container.name,
hostname=self.container.hostname,
environment=self.container.env,
volumes=list(self.container.get_volumes()),
cpu_shares=self.container.cpu_shares,
host_config=self.container.host_config,
ports=ports,
detach=True,
working_dir=self.container.workdir,
command=self.container.command)
self.o.pending('waiting for container...')
if not self._wait_for_status(lambda x: x):
raise exceptions.ContainerOrchestrationException(
self.container,
'Container status could not be obtained after creation!')
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
ports = collections.defaultdict(list) if self.container.ports else None
if ports is not None:
for port in self.container.ports.values():
ports[port['exposed']].append(
(port['external'][0], port['external'][1].split('/')[0]))
self.o.pending('starting container {}...'
.format(self.container.id[:7]))
self.container.ship.backend.start(
self.container.id)
# Waiting one second and checking container state again to make sure
# initialization didn't fail.
self.o.pending('waiting for initialization...')
def check_running(x):
return x and x['State']['Running']
if not self._wait_for_status(check_running):
raise exceptions.ContainerOrchestrationException(
self.container,
'Container status could not be obtained after start!')
# Wait up for the container's application to come online.
self.o.pending('waiting for service...')
return self._check_for_state('running', check_running)
class StopTask(Task):
"""Stop a container."""
def __init__(self, o, container):
Task.__init__(self, 'stop', o, container)
def _run(self):
self.o.reset()
self.o.pending('checking container...')
try:
status = self.container.status(refresh=True)
if not status or not status['State']['Running']:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(blue(TASK_RESULT_FMT.format('down')))
return
except:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(red(TASK_RESULT_FMT.format('host down')))
return
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
try:
self.o.pending('stopping service...')
self.container.ship.backend.stop(
self.container.id, timeout=self.container.stop_timeout)
if not self._check_for_state('stopped',
lambda x: not x or
(x and not x['State']['Running'])):
raise Exception('failed stopped lifecycle checks')
self.o.commit(green(TASK_RESULT_FMT.format('stopped')))
except Exception as e:
# Stop failures are non-fatal, usually it's just the container
# taking more time to stop than the timeout allows.
self.o.commit(red('failed: {}'.format(e)))
class RestartTask(Task):
"""Task that restarts a container."""
def __init__(self, o, container, registries={}, refresh=False,
step_delay=0, stop_start_delay=0, reuse=False,
only_if_changed=False):
Task.__init__(self, 'restart', o, container)
self._registries = registries
self._refresh = refresh
self._step_delay = step_delay
self._stop_start_delay = stop_start_delay
self._reuse = reuse
self._only_if_changed = only_if_changed
def _run(self):
self.o.reset()
if self._refresh:
PullTask(self.o, self.container, self._registries,
standalone=False).run()
if self._only_if_changed:
if self.container.is_running():
self.o.pending('checking image...')
images = self.container.ship.get_image_ids()
if images.get(self.container.image) == \
self.container.status()['Image']:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(blue('up to date'))
return
if self._step_delay:
self.o.pending('waiting {}s before restart...'
.format(self._step_delay))
time.sleep(self._step_delay)
StopTask(self.o, self.container).run()
self.o.reset()
if self._stop_start_delay:
self.o.pending('waiting {}s before starting...'
.format(self._stop_start_delay))
time.sleep(self._stop_start_delay)
StartTask(self.o, self.container, self._registries,
False, self._reuse).run()
class LoginTask(Task):
"""Log in with the registry hosting the image a container is based on.
Extracts the registry name from the image needed for the container, and if
authentication data is provided for that registry, login to it so a
subsequent pull operation can be performed.
"""
def __init__(self, o, container, registries={}):
Task.__init__(self, 'login', o, container)
self._registries = registries
def _run(self):
registry = LoginTask.registry_for_container(self.container,
self._registries)
if not registry:
return
if 'username' not in registry or not registry['username']:
dockercfg_path = os.path.expanduser(os.path.join('/root/.docker', 'config.json'))
if dockercfg_path and os.path.exists(dockercfg_path):
auth_configs = auth.load_config(dockercfg_path)
authcfg = auth.resolve_authconfig(auth_configs, registry['registry'])
username = authcfg.get('username', None)
if username:
registry['username'] = username
else:
raise Exception("Missing login (username) credentials for registry {}".format(registry['registry']))
if 'password' not in registry or not registry['password']:
dockercfg_path = os.path.expanduser(os.path.join('/root/.docker', 'config.json'))
if dockercfg_path and os.path.exists(dockercfg_path):
auth_configs = auth.load_config(dockercfg_path)
authcfg = auth.resolve_authconfig(auth_configs, registry['registry'])
username = authcfg.get('password', None)
if username:
registry['password'] = username
else:
raise Exception("Missing login (password) credentials for registry {}".format(registry['registry']))
if 'username' not in registry:
raise Exception("Missing username for registry {}".format(registry['registry']))
if 'password' not in registry:
raise Exception("Missing password for registry {}".format(registry['registry']))
self.o.reset()
self.o.pending('logging in to {}...'.format(registry['registry']))
try:
self.container.ship.backend.login(**registry)
except Exception as e:
raise exceptions.ContainerOrchestrationException(
self.container,
'Login to {} as {} failed: {}'
.format(registry['registry'], registry['username'], e))
@staticmethod
def registry_for_container(container, registries={}):
image = container.get_image_details()
if image['repository'].find('/') <= 0:
return None
registry, repo_name = image['repository'].split('/', 1)
if registry not in registries:
# If the registry defined name doesn't match, try to find a
# matching registry by registry FQDN.
for name, info in registries.items():
fqdn = urlparse.urlparse(info['registry']).netloc
if registry == fqdn or registry == fqdn.split(':')[0]:
registry = name
break
return registries.get(registry)
class PullTask(Task):
"""Pull (download) the image a container is based on."""
def __init__(self, o, container, registries={}, standalone=True):
Task.__init__(self, 'pull', o, container)
self._registries = registries
self._standalone = standalone
self._progress = {}
def _run(self):
self.o.reset()
# First, attempt to login if we can/need to.
LoginTask(self.o, self.container, self._registries).run()
self.o.pending('pulling image {}...'
.format(self.container.short_image))
registry = LoginTask.registry_for_container(self.container,
self._registries)
insecure = (urlparse.urlparse(registry['registry']).scheme == 'http'
if registry else False)
image = self.container.get_image_details()
# Pull the image (this may be a no-op, but that's fine).
for dlstatus in self.container.ship.backend.pull(
stream=True, insecure_registry=insecure, **image):
if dlstatus:
percentage = self._update_pull_progress(dlstatus)
self.o.pending('... {:.1f}%'.format(percentage))
if self._standalone:
self.o.commit(CONTAINER_STATUS_FMT.format(''))
self.o.commit(green(TASK_RESULT_FMT.format('done')))
def _update_pull_progress(self, last):
"""Update an image pull progress map with latest download progress
information for one of the image layers, and return the average of the
download progress of all layers as an indication of the overall
progress of the pull."""
last = json.loads(last.decode('utf-8'))
if 'error' in last:
raise exceptions.ContainerOrchestrationException(
self.container,
'Pull of image {} failed: {}'.format(
self.container.image,
last['errorDetail']['message'].encode('utf-8')))
try:
self._progress[last['id']] = (
100 if last['status'] == 'Download complete' else
(100.0 * last['progressDetail']['current'] /
last['progressDetail']['total']))
except:
pass
total = 0
if len(self._progress):
for downloaded in self._progress.values():
total += downloaded
total /= len(self._progress)
return total
class CleanTask(Task):
"""Remove a container from Docker if it exists."""
def __init__(self, o, container, standalone=True):
Task.__init__(self, 'clean', o, container)
self._standalone = standalone
def _run(self):
self.o.reset()
status = self.container.status()
if not status:
if self._standalone:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(blue(TASK_RESULT_FMT.format('absent')))
return
if status['State']['Running']:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(red(TASK_RESULT_FMT.format('skipped')))
return
self.o.pending('removing container {}...'.format(
self.container.shortid))
self.container.ship.backend.remove_container(self.container.id, v=True)
if self._standalone:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid))
self.o.commit(green(TASK_RESULT_FMT.format('removed')))
| StarcoderdataPython |
118520 | """
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
"""
Test suite to automatically test emulator REST API endpoints.
"""
import os
import unittest
import requests
import simplejson as json
import time
from emuvim.test.api_base_openstack import ApiBaseOpenStack
class testRestApi(ApiBaseOpenStack):
"""
Tests to check the REST API endpoints of the emulator.
"""
def setUp(self):
# create network
self.createNet(nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, autolinkswitches=True)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[1])
self.net.addLink(self.dc[0], self.dc[1])
# start api
self.startApi()
# start Mininet network
self.startNet()
@unittest.skip("temporarily disabled")
def testChainingDummy(self):
print('->>>>>>> test Chaining Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_chaining.json")).read()
url = "http://0.0.0.0:8004/v1/tenantabc123/stacks"
requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)), headers=headers)
print('->>>>>>> test Chaining Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v1")
print(" ")
print('->>>>>>> test Chaining List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/list"
chainlistresponse = requests.get(url, headers=headers)
self.assertEqual(chainlistresponse.status_code, 200)
self.assertEqual(json.loads(chainlistresponse.content)["chains"], [])
print(" ")
print('->>>>>>> test Loadbalancing List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/list"
lblistresponse = requests.get(url, headers=headers)
self.assertEqual(lblistresponse.status_code, 200)
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"], [])
print(" ")
testchain = "dc0_s1_firewall1/fire-out-0/dc0_s1_iperf1/iper-in-0"
print('->>>>>>> test Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" %(testchain)
chainvnfresponse = requests.put(url)
self.assertEqual(chainvnfresponse.status_code, 200)
self.assertGreaterEqual(json.loads(chainvnfresponse.content)["cookie"], 0)
print(" ")
print('->>>>>>> test Chaining List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/list"
chainlistresponse = requests.get(url, headers=headers)
self.assertEqual(chainlistresponse.status_code, 200)
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["dst_vnf"], "dc0_s1_firewall1")
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["dst_intf"], "fire-out-0")
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["src_vnf"], "dc0_s1_iperf1")
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["src_intf"], "iper-in-0")
print(" ")
print('->>>>>>> test Chain VNF Delete Chain ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
deletechainvnfresponse = requests.delete(url)
self.assertEqual(deletechainvnfresponse.status_code, 200)
self.assertEqual(deletechainvnfresponse.content, "true")
print(" ")
print('->>>>>>> test Chaining List If Empty Again ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/list"
chainlistresponse = requests.get(url, headers=headers)
self.assertEqual(chainlistresponse.status_code, 200)
self.assertEqual(json.loads(chainlistresponse.content)["chains"], [])
print(" ")
testchain = "dc0_s1_firewall1/fire-out-0/dc0_s1_iperf1/iper-in-0"
print('->>>>>>> test Stack Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
stackchainvnfresponse = requests.post(url, data=json.dumps(json.loads('{"path":["dc1.s1", "s1","s2","s3","s1","dc1.s1"]}')), headers=headers)
self.assertEqual(stackchainvnfresponse.status_code, 200)
print (stackchainvnfresponse.content)
self.assertGreaterEqual(json.loads(stackchainvnfresponse.content)["cookie"], 0)
print(" ")
print('->>>>>>> test Stack Chaining List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/list"
chainlistresponse = requests.get(url, headers=headers)
self.assertEqual(chainlistresponse.status_code, 200)
print (chainlistresponse.content)
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["dst_vnf"], "dc0_s1_firewall1")
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["dst_intf"], "fire-out-0")
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["src_vnf"], "dc0_s1_iperf1")
self.assertEqual(json.loads(chainlistresponse.content)["chains"][0]["src_intf"], "iper-in-0")
self.assertItemsEqual(json.loads(chainlistresponse.content)["chains"][0]["path"],['dc1.s1', 's1', 's2', 's3', 's1', 'dc1.s1'])
print(" ")
print('->>>>>>> test Chain VNF Delete Chain ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
deletechainvnfresponse = requests.delete(url)
self.assertEqual(deletechainvnfresponse.status_code, 200)
self.assertEqual(deletechainvnfresponse.content, "true")
print(" ")
print('->>>>>>> test Chaining List If Empty Again ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/list"
chainlistresponse = requests.get(url, headers=headers)
self.assertEqual(chainlistresponse.status_code, 200)
self.assertEqual(json.loads(chainlistresponse.content)["chains"], [])
print(" ")
testchain = "dc0_s1_firewall1/non-existing-interface/dc0_s1_iperf1/iper-in-0"
print('->>>>>>> test Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
chainvnfresponse = requests.put(url)
self.assertEqual(chainvnfresponse.status_code, 501)
print(" ")
testchain = "dc0_s1_firewall1/fire-out-0/dc0_s1_iperf1/non-existing-interface"
print('->>>>>>> test Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
chainvnfresponse = requests.put(url)
self.assertEqual(chainvnfresponse.status_code, 501)
print(" ")
testchain = "dc0_s1_firewall1/non-existing-interface/dc0_s1_iperf1/iper-in-0"
print('->>>>>>> test Chain VNF Delete Chain ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
deletechainvnfresponse = requests.delete(url)
self.assertEqual(deletechainvnfresponse.status_code, 501)
print(" ")
testchain = "dc0_s1_firewall1/fire-out-0/dc0_s1_iperf1/non-existing-interface"
print('->>>>>>> test Chain VNF Delete Chain ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
deletechainvnfresponse = requests.delete(url)
self.assertEqual(deletechainvnfresponse.status_code, 501)
print(" ")
testchain = "non-existent-dc/s1/firewall1/firewall1:cp03:output/dc0/s1/iperf1/iperf1:cp02:input"
print('->>>>>>> test Chain VNF Non-Existing DC ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
chainvnfresponse = requests.put(url)
self.assertEqual(chainvnfresponse.status_code, 500)
print(" ")
testchain = "dc0/s1/firewall1/non-existent:cp03:output/dc0/s1/iperf1/iperf1:cp02:input"
print('->>>>>>> test Chain VNF Non-Existing Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
chainvnfresponse = requests.put(url)
self.assertEqual(chainvnfresponse.status_code, 500)
print(" ")
testchain = "dc0/s1/firewall1/firewall1:cp03:output/dc0/s1/iperf1/non-existent:cp02:input"
print('->>>>>>> test Chain VNF Non-Existing Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
chainvnfresponse = requests.put(url)
self.assertEqual(chainvnfresponse.status_code, 500)
print(" ")
testchain = "dc0/s1/firewall1/firewall1:cp03:output/dc0/s1/iperf1/iperf1:cp02:input"
print('->>>>>>> test Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
chainvnfresponse = requests.put(url)
print (chainvnfresponse.content)
self.assertEqual(chainvnfresponse.status_code, 200)
self.assertGreaterEqual(json.loads(chainvnfresponse.content)["cookie"], 0)
print(" ")
print('->>>>>>> test Chain VNF Delete Chain ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
deletechainvnfresponse = requests.delete(url)
self.assertEqual(deletechainvnfresponse.status_code, 200)
self.assertEqual(deletechainvnfresponse.content, "true")
print(" ")
print('->>>>>>> test Chaining List If Empty Again ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/list"
chainlistresponse = requests.get(url, headers=headers)
self.assertEqual(chainlistresponse.status_code, 200)
self.assertEqual(json.loads(chainlistresponse.content)["chains"], [])
print(" ")
testchain = "dc0/s1/firewall1/firewall1:cp03:output/dc0/s1/iperf1/iperf1:cp02:input"
print('->>>>>>> test Stack Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
stackchainvnfresponse = requests.post(url, data=json.dumps(
json.loads('{"path":["dc1.s1", "s1","s2","s3","s1","dc1.s1"]}')), headers=headers)
self.assertEqual(stackchainvnfresponse.status_code, 200)
print (stackchainvnfresponse.content)
self.assertGreaterEqual(json.loads(stackchainvnfresponse.content)["cookie"], 0)
print(" ")
testchain = "dc0/s1/firewall1/firewall1:cp03:output/dc0/s1/iperf1/iperf1:cp02:input"
print('->>>>>>> test Stack Chain VNF Interfaces ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/chain/%s" % (testchain)
stackchainvnfresponse = requests.delete(url, headers=headers)
self.assertEqual(stackchainvnfresponse.status_code, 200)
print(" ")
print('->>>>>>> test Loadbalancing ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/dc0/s1/firewall1/firewall1:cp03:output"
lblistresponse = requests.post(url, data=json.dumps(
{"dst_vnf_interfaces":[{"pop":"dc0","stack":"s1","server":"iperf1","port":"iperf1:cp02:input"}]})
, headers=headers)
print (lblistresponse.content)
self.assertEqual(lblistresponse.status_code, 200)
self.assertIn("dc0_s1_firewall1:fire-out-0", lblistresponse.content)
print(" ")
print('->>>>>>> test Loadbalancing List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/list"
lblistresponse = requests.get(url, headers=headers)
self.assertEqual(lblistresponse.status_code, 200)
print (lblistresponse.content )
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["paths"][0]["dst_vnf"], "dc0_s1_iperf1")
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["paths"][0]["dst_intf"], "iper-in-0")
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["src_vnf"], "dc0_s1_firewall1")
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["src_intf"],"fire-out-0")
print(" ")
print('->>>>>>> test delete Loadbalancing ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/dc0/s1/firewall1/firewall1:cp03:output"
lbdeleteresponse = requests.delete(url, headers=headers)
print (lbdeleteresponse.content)
self.assertEqual(lbdeleteresponse.status_code, 200)
print(" ")
print('->>>>>>> testFloatingLoadbalancer ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/dc0/floating/bla/blubb"
lblistresponse = requests.post(url, data=json.dumps(
{"dst_vnf_interfaces":[{"pop":"dc0","stack":"s1","server":"iperf1","port":"iperf1:cp02:input"}]})
, headers=headers)
print (lblistresponse.content)
self.assertEqual(lblistresponse.status_code, 200)
resp = json.loads(lblistresponse.content)
self.assertIsNotNone(resp.get('cookie'))
self.assertIsNotNone(resp.get('floating_ip'))
cookie = resp.get('cookie')
print(" ")
print('->>>>>>> testDeleteFloatingLoadbalancer ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/dc0/floating/%s/blubb" % cookie
lblistresponse = requests.delete(url, headers=headers)
print (lblistresponse.content)
self.assertEqual(lblistresponse.status_code, 200)
print(" ")
print('->>>>>>> testLoadbalancingCustomPath ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/dc0_s1_firewall1/fire-out-0"
lblistresponse = requests.post(url, data=json.dumps(
{"dst_vnf_interfaces":{"dc0_s1_iperf1":"iper-in-0"},
"path": {"dc0_s1_iperf1": {"iper-in-0": ["dc1.s1", "s1","s2","s3","s1","dc1.s1"]}}}), headers=headers)
print (lblistresponse.content)
self.assertEqual(lblistresponse.status_code, 200)
self.assertIn("dc0_s1_firewall1:fire-out-0", lblistresponse.content)
print(" ")
print('->>>>>>> testLoadbalancingListCustomPath ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/list"
lblistresponse = requests.get(url, headers=headers)
self.assertEqual(lblistresponse.status_code, 200)
print (lblistresponse.content )
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["paths"][0]["dst_vnf"], "dc0_s1_iperf1")
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["paths"][0]["dst_intf"], "iper-in-0")
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["paths"][0]["path"],
["dc1.s1", "s1","s2","s3","s1","dc1.s1"] )
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["src_vnf"], "dc0_s1_firewall1")
self.assertEqual(json.loads(lblistresponse.content)["loadbalancers"][0]["src_intf"],"fire-out-0")
print(" ")
print('->>>>>>> test Delete Loadbalancing ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/lb/dc0_s1_firewall1/fire-out-0"
lblistresponse = requests.delete(url, headers=headers)
self.assertEqual(lblistresponse.status_code, 200)
print(" ")
print('->>>>>>> test Query Topology ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:4000/v1/topo"
topolistresponse = requests.get(url, headers=headers)
print(topolistresponse.content)
self.assertEqual(topolistresponse.status_code, 200)
print(" ")
def testNovaDummy(self):
print('->>>>>>> test Nova Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_create_stack.json")).read()
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)),
headers=headers)
print('->>>>>>> test Nova List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/"
listapiversionnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionnovaresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["id"], "v2.1")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["status"], "CURRENT")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["version"], "2.38")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["min_version"], "2.1")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["updated"], "2013-07-23T11:33:21Z")
print(" ")
print('->>>>>>> test Nova Version Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla"
listapiversion21novaresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversion21novaresponse.status_code, 200)
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["id"], "v2.1")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["status"], "CURRENT")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["version"], "2.38")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["min_version"], "2.1")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["updated"], "2013-07-23T11:33:21Z")
print(" ")
print('->>>>>>> test Nova Version List Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
listserverapisnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisnovaresponse.status_code, 200)
self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
print(" ")
print('->>>>>>> test Nova Delete Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisnovaresponse.content)["servers"][0]["id"])
deleteserverapisnovaresponse = requests.delete(url, headers=headers)
self.assertEqual(deleteserverapisnovaresponse.status_code, 204)
print(" ")
print('->>>>>>> test Nova Delete Non-Existing Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non-existing-ix"
deleteserverapisnovaresponse = requests.delete(url, headers=headers)
self.assertEqual(deleteserverapisnovaresponse.status_code, 404)
print(" ")
print('->>>>>>> testNovaVersionListServerAPIs_withPortInformation ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/andPorts"
listserverapisnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisnovaresponse.status_code, 200)
self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
print(" ")
print('->>>>>>> test Nova List Flavors ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
listflavorsresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsresponse.status_code, 200)
self.assertIn(json.loads(listflavorsresponse.content)["flavors"][0]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsresponse.content)["flavors"][1]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsresponse.content)["flavors"][2]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
print(" ")
print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
addflavorsresponse = requests.post(url,
data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
headers=headers)
self.assertEqual(addflavorsresponse.status_code, 200)
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
print(" ")
print('->>>>>>> test Nova List Flavors Detail ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
listflavorsdetailresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsdetailresponse.status_code, 200)
self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][1]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][2]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
print(" ")
print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
addflavorsresponse = requests.post(url,
data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
headers=headers)
self.assertEqual(addflavorsresponse.status_code, 200)
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
print(" ")
print('->>>>>>> test Nova List Flavor By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/%s" % (json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"])
listflavorsbyidresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsbyidresponse.status_code, 200)
self.assertEqual(json.loads(listflavorsbyidresponse.content)["flavor"]["id"], json.loads(listflavorsdetailresponse.content)["flavors"][0]["id"])
print(" ")
print('->>>>>>> test Nova List Images ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images"
listimagesresponse = requests.get(url, headers=headers)
self.assertEqual(listimagesresponse.status_code, 200)
print(listimagesresponse.content)
# deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
#self.assertIn(json.loads(listimagesresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
print(" ")
print('->>>>>>> test Nova List Images Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/detail"
listimagesdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listimagesdetailsresponse.status_code, 200)
# deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
#self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
self.assertEqual(json.loads(listimagesdetailsresponse.content)["images"][0]["metadata"]["architecture"],"x86_64")
print(" ")
print('->>>>>>> test Nova List Image By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/%s" % (json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
listimagebyidresponse = requests.get(url, headers=headers)
self.assertEqual(listimagebyidresponse.status_code, 200)
self.assertEqual(json.loads(listimagebyidresponse.content)["image"]["id"],json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
print(" ")
print('->>>>>>> test Nova List Image By Non-Existend Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/non_existing_id"
listimagebynonexistingidresponse = requests.get(url, headers=headers)
self.assertEqual(listimagebynonexistingidresponse.status_code, 404)
print(" ")
#find ubuntu id
for image in json.loads(listimagesresponse.content)["images"]:
if image["name"] == "ubuntu:trusty":
ubuntu_image_id = image["id"]
print('->>>>>>> test Nova Create Server Instance ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
createserverinstance = requests.post(url, data=data, headers=headers)
self.assertEqual(createserverinstance.status_code, 200)
self.assertEqual(json.loads(createserverinstance.content)["server"]["image"]["id"], ubuntu_image_id)
print(" ")
print('->>>>>>> test Nova Create Server Instance With Already Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
createserverinstance = requests.post(url, data=data, headers=headers)
self.assertEqual(createserverinstance.status_code, 409)
print(" ")
print('->>>>>>> test Nova Version List Server APIs Detailed ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/detail"
listserverapisdetailedresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisdetailedresponse.status_code, 200)
self.assertEqual(json.loads(listserverapisdetailedresponse.content)["servers"][0]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Nova Show Server Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"])
listserverdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listserverdetailsresponse.status_code, 200)
self.assertEqual(json.loads(listserverdetailsresponse.content)["server"]["flavor"]["links"][0]["rel"], "bookmark")
print(" ")
print('->>>>>>> test Nova Show Non-Existing Server Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non_existing_server_id"
listnonexistingserverdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listnonexistingserverdetailsresponse.status_code, 404)
print(" ")
def testNeutronDummy(self):
print('->>>>>>> test Neutron Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_create_stack.json")).read()
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)), headers=headers)
# test_heatapi_keystone_get_token = open("test_heatapi_keystone_get_token.json").read()
print('->>>>>>> test Neutron List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v2.0")
print(" ")
print('->>>>>>> test Neutron Show API v2.0 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0"
listapiversionv20response = requests.get(url, headers=headers)
self.assertEqual(listapiversionv20response.status_code, 200)
self.assertEqual(json.loads(listapiversionv20response.content)["resources"][0]["name"], "subnet")
self.assertEqual(json.loads(listapiversionv20response.content)["resources"][1]["name"], "network")
self.assertEqual(json.loads(listapiversionv20response.content)["resources"][2]["name"], "ports")
print(" ")
print('->>>>>>> test Neutron List Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
listnetworksesponse1 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse1.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse1.content)["networks"][0]["status"], "ACTIVE")
listNetworksId = json.loads(listnetworksesponse1.content)["networks"][0]["id"]
listNetworksName = json.loads(listnetworksesponse1.content)["networks"][0]["name"]
listNetworksId2 = json.loads(listnetworksesponse1.content)["networks"][1]["id"]
print(" ")
print('->>>>>>> test Neutron List Non-Existing Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?name=non_existent_network_name"
listnetworksesponse2 = requests.get(url,headers=headers)
self.assertEqual(listnetworksesponse2.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Networks By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?name=" + listNetworksName #tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse3 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse3.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse3.content)["networks"][0]["name"], listNetworksName)
print(" ")
print('->>>>>>> test Neutron List Networks By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse4 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse4.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse4.content)["networks"][0]["id"], listNetworksId)
print(" ")
print('->>>>>>> test Neutron List Networks By Multiple Ids ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId + "&id="+ listNetworksId2 # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse5 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse5.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][0]["id"], listNetworksId)
self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][1]["id"], listNetworksId2)
print(" ")
print('->>>>>>> test Neutron Show Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/"+listNetworksId
shownetworksesponse = requests.get(url, headers=headers)
self.assertEqual(shownetworksesponse.status_code, 200)
self.assertEqual(json.loads(shownetworksesponse.content)["network"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Show Network Non-ExistendNetwork ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/non_existent_network_id"
shownetworksesponse2 = requests.get(url, headers=headers)
self.assertEqual(shownetworksesponse2.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
createnetworkresponse = requests.post(url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
self.assertEqual(createnetworkresponse.status_code, 201)
self.assertEqual(json.loads(createnetworkresponse.content)["network"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Create Network With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
createnetworkresponsefailure = requests.post(url,data='{"network": {"name": "sample_network","admin_state_up": true}}',headers=headers)
self.assertEqual(createnetworkresponsefailure.status_code, 400)
print(" ")
print('->>>>>>> test Neutron Update Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
updatenetworkresponse = requests.put(url, data='{"network": {"status": "ACTIVE", "admin_state_up":true, "tenant_id":"abcd123", "name": "sample_network_new_name", "shared":false}}' , headers=headers)
self.assertEqual(updatenetworkresponse.status_code, 200)
self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["name"], "sample_network_new_name")
self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["tenant_id"], "abcd123")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/non-existing-name123"
updatenetworkresponse = requests.put(url, data='{"network": {"name": "sample_network_new_name"}}', headers=headers)
self.assertEqual(updatenetworkresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Subnets ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
listsubnetsresponse = requests.get(url, headers=headers)
listSubnetName = json.loads(listsubnetsresponse.content)["subnets"][0]["name"]
listSubnetId = json.loads(listsubnetsresponse.content)["subnets"][0]["id"]
listSubnetId2 = json.loads(listsubnetsresponse.content)["subnets"][1]["id"]
self.assertEqual(listsubnetsresponse.status_code, 200)
self.assertNotIn('None', listSubnetName)
print(" ")
print('->>>>>>> test Neutron List Subnets By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?name="+listSubnetName
listsubnetByNameresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetByNameresponse.status_code, 200)
self.assertNotIn('None', json.loads(listsubnetByNameresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron List Subnets By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId
listsubnetsbyidresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetsbyidresponse.status_code, 200)
self.assertNotIn("None", json.loads(listsubnetsbyidresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron List Subnets By Multiple Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId +"&id="+listSubnetId2
listsubnetsbymultipleidsresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetsbymultipleidsresponse.status_code, 200)
self.assertNotIn("None", json.loads(listsubnetsbymultipleidsresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron Show Subnet->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(listsubnetsresponse.content)["subnets"][0]["id"])
showsubnetsresponse = requests.get(url, headers=headers)
self.assertEqual(showsubnetsresponse.status_code, 200)
self.assertNotIn("None", json.loads(showsubnetsresponse.content)["subnet"]["name"])
print(" ")
print('->>>>>>> test Neutron Show Non-Existing Subnet->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-id123"
showsubnetsresponse = requests.get(url, headers=headers)
self.assertEqual(showsubnetsresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createsubnetresponse = requests.post(url, data=createsubnetdata, headers=headers)
self.assertEqual(createsubnetresponse.status_code, 201)
self.assertEqual(json.loads(createsubnetresponse.content)["subnet"]["name"], "new_subnet")
print(" ")
print('->>>>>>> test Neutron Create Second Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createsubnetfailureresponse = requests.post(url, data=createsubnetdata, headers=headers)
self.assertEqual(createsubnetfailureresponse.status_code, 409)
print(" ")
print('->>>>>>> test Neutron Update Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(createsubnetresponse.content)["subnet"]["id"])
updatesubnetdata = '{"subnet": {"name": "new_subnet_new_name", "network_id":"some_id", "tenant_id":"new_tenant_id", "allocation_pools":"change_me", "gateway_ip":"192.168.1.120", "ip_version":4, "cidr":"10.0.0.1/24", "id":"some_new_id", "enable_dhcp":true} }'
updatesubnetresponse = requests.put(url, data=updatesubnetdata, headers=headers)
self.assertEqual(updatesubnetresponse.status_code, 200)
self.assertEqual(json.loads(updatesubnetresponse.content)["subnet"]["name"], "new_subnet_new_name")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-subnet-12345"
updatenonexistingsubnetdata = '{"subnet": {"name": "new_subnet_new_name"} }'
updatenonexistingsubnetresponse = requests.put(url, data=updatenonexistingsubnetdata, headers=headers)
self.assertEqual(updatenonexistingsubnetresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Ports ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
listportsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsesponse.status_code, 200)
self.assertEqual(json.loads(listportsesponse.content)["ports"][0]["status"], "ACTIVE")
listPortsName = json.loads(listportsesponse.content)["ports"][0]["name"]
listPortsId1 = json.loads(listportsesponse.content)["ports"][0]["id"]
listPortsId2 = json.loads(listportsesponse.content)["ports"][1]["id"]
print(" ")
print('->>>>>>> test Neutron List Ports By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?name=" + listPortsName
listportsbynameesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbynameesponse.status_code, 200)
self.assertEqual(json.loads(listportsbynameesponse.content)["ports"][0]["name"], listPortsName)
print(" ")
print('->>>>>>> test Neutron List Ports By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1
listportsbyidesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbyidesponse.status_code, 200)
self.assertEqual(json.loads(listportsbyidesponse.content)["ports"][0]["id"], listPortsId1)
print(" ")
print('->>>>>>> test Neutron List Ports By Multiple Ids ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1 +"&id="+listPortsId2
listportsbymultipleidsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbymultipleidsesponse.status_code, 200)
self.assertEqual(json.loads(listportsbymultipleidsesponse.content)["ports"][0]["id"], listPortsId1)
print(" ")
print('->>>>>>> test Neutron List Non-Existing Ports ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=non-existing-port-id"
listportsbynonexistingidsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbynonexistingidsesponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Show Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(listportsesponse.content)["ports"][0]["id"])
showportresponse = requests.get(url, headers=headers)
self.assertEqual(showportresponse.status_code, 200)
self.assertEqual(json.loads(showportresponse.content)["port"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Show Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/non-existing-portid123"
shownonexistingportresponse = requests.get(url, headers=headers)
self.assertEqual(shownonexistingportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Port In Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createnonexistingportdata = '{"port": {"name": "new_port", "network_id": "non-existing-id"} }'
createnonexistingnetworkportresponse = requests.post(url, data=createnonexistingportdata, headers=headers)
self.assertEqual(createnonexistingnetworkportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createportdata = '{"port": {"name": "new_port", "network_id": "%s", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","id":"new_id1234", "mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createportresponse = requests.post(url, data=createportdata, headers=headers)
self.assertEqual(createportresponse.status_code, 201)
print (createportresponse.content)
self.assertEqual(json.loads(createportresponse.content)["port"]["name"], "new_port")
print(" ")
print('->>>>>>> test Neutron Create Port With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createportwithexistingnamedata = '{"port": {"name": "new_port", "network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createportwithexistingnameresponse = requests.post(url, data=createportwithexistingnamedata, headers=headers)
self.assertEqual(createportwithexistingnameresponse.status_code, 500)
print(" ")
print('->>>>>>> test Neutron Create Port Without Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createportdatawithoutname = '{"port": {"network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createportwithoutnameresponse = requests.post(url, data=createportdatawithoutname, headers=headers)
self.assertEqual(createportwithoutnameresponse.status_code, 201)
self.assertIn("port:cp", json.loads(createportwithoutnameresponse.content)["port"]["name"])
print(" ")
print('->>>>>>> test Neutron Update Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(json.loads(createportresponse.content)["port"]["name"])
url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["name"])
updateportdata = '{"port": {"name": "new_port_new_name", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123", "network_id":"network_id123"} }'
updateportresponse = requests.put(url, data=updateportdata, headers=headers)
self.assertEqual(updateportresponse.status_code, 200)
self.assertEqual(json.loads(updateportresponse.content)["port"]["name"], "new_port_new_name")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/non-existing-port-ip"
updatenonexistingportdata = '{"port": {"name": "new_port_new_name"} }'
updatenonexistingportresponse = requests.put(url, data=updatenonexistingportdata, headers=headers)
self.assertEqual(updatenonexistingportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Delete Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
righturl = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["id"])
deleterightportresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deleterightportresponse.status_code, 204)
print(" ")
print('->>>>>>> test Neutron Delete Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/ports/unknownid"
deletewrongportresponse = requests.delete(wrongurl, headers=headers)
self.assertEqual(deletewrongportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Delete Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/subnets/unknownid"
righturl = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(updatesubnetresponse.content)["subnet"]["id"])
deletewrongsubnetresponse = requests.delete(wrongurl, headers=headers)
deleterightsubnetresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deletewrongsubnetresponse.status_code, 404)
self.assertEqual(deleterightsubnetresponse.status_code, 204)
print(" ")
print('->>>>>>> test Neutron Delete Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
righturl = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
deleterightnetworkresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deleterightnetworkresponse.status_code, 204)
print(" ")
print('->>>>>>> test Neutron Delete Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/networks/unknownid"
deletewrongnetworkresponse = requests.delete(wrongurl, headers=headers)
self.assertEqual(deletewrongnetworkresponse.status_code, 404)
print(" ")
def testKeystomeDummy(self):
print('->>>>>>> test Keystone Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(__file__), "test_heatapi_keystone_get_token.json")).read()
print('->>>>>>> test Keystone List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"]["values"][0]["id"], "v2.0")
print(" ")
print('->>>>>>> test Keystone Show ApiV2 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/v2.0"
showapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(showapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(showapiversionstackresponse.content)["version"]["id"], "v2.0")
print(" ")
print('->>>>>>> test Keystone Get Token ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/v2.0/tokens"
gettokenstackresponse = requests.post(url, data=json.dumps(json.loads(test_heatapi_keystone_get_token)), headers=headers)
self.assertEqual(gettokenstackresponse.status_code, 200)
self.assertEqual(json.loads(gettokenstackresponse.content)["access"]["user"]["name"], "tenantName")
print(" ")
def testHeatDummy(self):
print('->>>>>>> test Heat Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_create_stack.json")).read()
test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_update_stack.json")).read()
print('->>>>>>> test Heat List API Versions Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v1.0")
print(" ")
print('->>>>>>> test Create Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackresponse = requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)), headers=headers)
self.assertEqual(createstackresponse.status_code, 201)
self.assertNotEqual(json.loads(createstackresponse.content)["stack"]["id"], "")
print(" ")
print('->>>>>>> test Create Stack With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackwithexistingnameresponse = requests.post(url, data='{"stack_name" : "s1"}', headers=headers)
self.assertEqual(createstackwithexistingnameresponse.status_code, 409)
print(" ")
print('->>>>>>> test Create Stack With Unsupported Version ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackwitheunsupportedversionresponse = requests.post(url, data='{"stack_name" : "stackname123", "template" : {"heat_template_version": "2015-04-29"}}', headers=headers)
self.assertEqual(createstackwitheunsupportedversionresponse.status_code, 400)
print(" ")
print('->>>>>>> test List Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
liststackresponse = requests.get(url, headers=headers)
self.assertEqual(liststackresponse.status_code, 200)
self.assertEqual(json.loads(liststackresponse.content)["stacks"][0]["stack_status"], "CREATE_COMPLETE")
print(" ")
print('->>>>>>> test Show Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
liststackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(liststackdetailsresponse.status_code, 200)
self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "CREATE_COMPLETE")
print(" ")
print('->>>>>>> test Show Non-Exisitng Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/non_exisitng_id123"
listnonexistingstackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listnonexistingstackdetailsresponse.status_code, 404)
print(" ")
print('->>>>>>> test Update Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
updatestackresponse = requests.put(url, data=json.dumps(json.loads(test_heatapi_template_update_stack)),
headers=headers)
self.assertEqual(updatestackresponse.status_code, 202)
liststackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "UPDATE_COMPLETE")
print(" ")
print('->>>>>>> test Update Non-Existing Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/non_existing_id_1234"
updatenonexistingstackresponse = requests.put(url, data={"non": "sense"}, headers=headers)
self.assertEqual(updatenonexistingstackresponse.status_code, 404)
print(" ")
print('->>>>>>> test Delete Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s" % \
json.loads(createstackresponse.content)['stack']['id']
deletestackdetailsresponse = requests.delete(url, headers=headers)
self.assertEqual(deletestackdetailsresponse.status_code, 204)
print(" ")
def test_CombinedTesting(self):
print('->>>>>>> test Combinded tests->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__),
"test_heatapi_template_create_stack.json")).read()
test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(__file__),
"test_heatapi_template_update_stack.json")).read()
print('->>>>>>> test Combined Create Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackresponse = requests.post(url,
data=json.dumps(json.loads(test_heatapi_template_create_stack)),
headers=headers)
self.assertEqual(createstackresponse.status_code, 201)
self.assertNotEqual(json.loads(createstackresponse.content)["stack"]["id"], "")
print(" ")
print('->>>>>>> test Combined Neutron List Ports ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
listportsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsesponse.status_code, 200)
self.assertEqual(len(json.loads(listportsesponse.content)["ports"]), 9)
for port in json.loads(listportsesponse.content)["ports"]:
self.assertEqual(len(str(port['fixed_ips'][0]['subnet_id'])), 36)
print(" ")
print('->>>>>>> test Combined Neutron List Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
listnetworksesponse = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse.status_code, 200)
self.assertEqual(len(json.loads(listnetworksesponse.content)["networks"]), 10)
for net in json.loads(listnetworksesponse.content)["networks"]:
self.assertEqual(len(str(net['subnets'][0])), 36)
print(" ")
print('->>>>>>> test Combined Update Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s"% \
json.loads(createstackresponse.content)['stack']['id']
updatestackresponse = requests.put(url,
data=json.dumps(json.loads(test_heatapi_template_update_stack)),
headers=headers)
self.assertEqual(updatestackresponse.status_code, 202)
liststackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "UPDATE_COMPLETE")
print(" ")
print('->>>>>>> test Combined Neutron List Ports ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
listportsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsesponse.status_code, 200)
self.assertEqual(len(json.loads(listportsesponse.content)["ports"]), 18)
for port in json.loads(listportsesponse.content)["ports"]:
self.assertEqual(len(str(port['fixed_ips'][0]['subnet_id'])), 36)
print(" ")
print('->>>>>>> test Combined Neutron List Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
listnetworksesponse = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse.status_code, 200)
self.assertEqual(len(json.loads(listnetworksesponse.content)["networks"]), 14)
for net in json.loads(listnetworksesponse.content)["networks"]:
self.assertEqual(len(str(net['subnets'][0])), 36)
print(" ")
# workflow create floating ip and assign it to a server
print('->>>>>>> CombinedNeutronCreateFloatingIP ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/floatingips"
createflip = requests.post(url, headers=headers,
data='{"floatingip":{"floating_network_id":"default"}}')
self.assertEqual(createflip.status_code, 200)
self.assertIsNotNone(json.loads(createflip.content)["floatingip"].get("port_id"))
port_id = json.loads(createflip.content)["floatingip"].get("port_id")
print(" ")
print('->>>>>>> CombinedNovaGetServer ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/detail"
listserverapisdetailedresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisdetailedresponse.status_code, 200)
self.assertEqual(json.loads(listserverapisdetailedresponse.content)["servers"][0]["status"], "ACTIVE")
server_id = json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"]
print(" ")
print('->>>>>>> CombinedNovaAssignInterface ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s/os-interface" % server_id
assign = requests.post(url, headers=headers,
data='{"interfaceAttachment":{"net_id": "default"}}')
self.assertEqual(assign.status_code, 202)
self.assertIsNotNone(json.loads(assign.content)["interfaceAttachment"].get("port_id"))
port_id = json.loads(assign.content)["interfaceAttachment"].get("port_id")
print(" ")
print('->>>>>>> CombinedNovaDeleteInterface ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s/os-interface/%s" % (server_id, port_id)
getintfs = requests.delete(url, headers=headers)
self.assertEqual(getintfs.status_code, 202)
print(" ")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1652266 | from typing import Tuple
import numpy as np
from pandas import DataFrame, Series
from spotify_confidence.analysis.constants import CI_LOWER, CI_UPPER, SFX1, SFX2
class BootstrapComputer(object):
def __init__(self, bootstrap_samples_column, interval_size):
self._bootstrap_samples = bootstrap_samples_column
self._interval_size = interval_size
def _point_estimate(self, row: Series) -> float:
return row[self._bootstrap_samples].mean()
def _variance(self, row: Series) -> float:
variance = row[self._bootstrap_samples].var()
if variance < 0:
raise ValueError("Computed variance is negative. " "Please check your inputs.")
return variance
def _std_err(self, row: Series) -> float:
return None
def _add_point_estimate_ci(self, row: DataFrame) -> Series:
row[CI_LOWER] = np.percentile(row[self._bootstrap_samples], 100 * (1 - self._interval_size) / 2)
row[CI_UPPER] = np.percentile(row[self._bootstrap_samples], 100 * (1 - (1 - self._interval_size) / 2))
return row
def _p_value(self, row) -> float:
return -1
def _ci(self, row, alpha_column: str) -> Tuple[float, float]:
differences = row[self._bootstrap_samples + SFX2] - row[self._bootstrap_samples + SFX1]
lower = np.percentile(differences, 100 * row[alpha_column] / 2)
upper = np.percentile(differences, 100 * (1 - row[alpha_column] / 2))
return lower, upper
def _achieved_power(self, df: DataFrame, mde: float, alpha: float) -> DataFrame:
return None
| StarcoderdataPython |
1737923 | class Solution(object):
def countSubstrings(self, s, t):
n, m = len(s), len(t)
def test(i, j):
res = pre = cur = 0
for k in xrange(min(n - i, m - j)):
cur += 1
if s[i + k] != t[j + k]:
pre, cur = cur, 0
res += pre
return res
return sum(test(i, 0) for i in xrange(n)) + sum(test(0, j) for j in xrange(1, m))
| StarcoderdataPython |
63730 | """Demo using test environment for grpc testing"""
import logging
from google.protobuf import json_format
from framework.config import settings
from tests.base_test import BaseTestCase
from utils.channel_factory import get_channel
from utils.builders.grpc_builders import build_number_from_file, build_number_from_dict
from services.doubler.doubler_pb2_grpc import DoublerStub
from services.doubler.doubler_pb2 import Number
log = logging.getLogger(__name__)
METADATA = (('key1', 'val1'), ('key2', 'val2'),)
TIMEOUT_SEC = 0.15
class ExampleGrpcTestCase(BaseTestCase):
"""Tests use server from grpc-demo/doubler"""
@classmethod
def setUpClass(cls):
"""test class setup"""
cls._channel = get_channel(settings["doubler_grpc_host"],
settings["doubler_grpc_port"],
metadata=METADATA)
cls._stub = DoublerStub(cls._channel)
@classmethod
def tearDownClass(cls):
"""tearDownClass runs after tests"""
cls._channel.close()
def test_grpc_call1(self):
"""grpc call test1"""
request = build_number_from_file('resources/requests/doubler/request1.json')
# https://grpc.io/grpc/python/grpc.html#multi-callable-interfaces
response = ExampleGrpcTestCase._stub.Double(request, timeout=TIMEOUT_SEC)
log.debug(f'response: {json_format.MessageToJson(response)}')
self.assertEqual(response.value, 10.0)
def test_grpc_call2(self):
"""grpc call test2"""
request = build_number_from_dict({'value': -4.0})
response = ExampleGrpcTestCase._stub.Double(request, timeout=TIMEOUT_SEC)
self.assertEqual(response.value, -8.0)
def test_grpc_call3(self):
"""grpc call test3"""
request = Number(value=3.0)
response = ExampleGrpcTestCase._stub.Double(request, timeout=TIMEOUT_SEC)
self.assertEqual(response.value, 6.0)
| StarcoderdataPython |
19462 | """
## ## ## ## ##
## ## ##
## ## ##
## ## ## ## ## ##
## ##
## ##
##
AUTHOR = <NAME> <<EMAIL>>
"""
import sys
import boto3
import click
import threading
from botocore.exceptions import ClientError
from secureaws import checkaws
from secureaws import setupaws
from secureaws import rsautil
# Important Variables - DO NOT change the values
REGION = {
"N_VIRGINIA": "us-east-1",
"OHIO": "us-east-2",
"N_CALIFORNIA": "us-west-1",
"OREGON": "us-west-2",
"MUMBAI": "ap-south-1",
"SEOUL": "ap-northeast-2",
"SINGAPORE": "ap-southeast-1",
"SYDNEY": "ap-southeast-2",
"TOKYO": "ap-northeast-1",
"CANADA": "ca-central-1",
"FRANKFURT": "eu-central-1",
"IRELAND": "eu-west-1",
"LONDON": "eu-west-2",
"PARIS": "eu-west-3",
"SAO_PAULO": "sa-east-1",
"BAHRAIN": "me-south-1",
"STOCKHOLM": "eu-north-1",
"HONG_KONG": "ap-east-1"
}
class secureaws:
region = ""
session = None
def __init__(self, access_key="", secret_key="", profile="", region=""):
self.region = region
try:
if access_key == "" and secret_key == "" and profile == "":
self.session = boto3.Session(region_name=region)
elif profile != "":
self.session = boto3.Session(profile_name=profile, region_name=region)
elif access_key != "" and secret_key != "":
self.session = boto3.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region)
except Exception as e:
print("Error: {}".format(e))
exit(1)
def getSession(self):
return self.session
# Managing CLI
@click.group()
def chk_group():
pass
@chk_group.command()
@click.option('--access-key', help='AWS IAM User Access Key')
@click.option('--secret-key', help='AWS IAM User Access Key')
@click.option('--profile', help='AWS CLI profile')
@click.option('--region', default='us-east-1', help='AWS region identifier. Default: us-east-1')
def check(access_key, secret_key, profile, region):
'''
This command will scan your AWS account to identify whether basic security services are enabled or not.
\b
IAM Policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"cloudtrail:DescribeTrails",
"config:DescribeConfigurationRecorderStatus",
"ec2:DescribeFlowLogs",
"iam:GetAccountSummary",
"iam:GetAccountPasswordPolicy",
"macie:ListMemberAccounts",
"guardduty:ListDetectors",
"s3:ListAllMyBuckets",
"s3:GetEncryptionConfiguration",
"ec2:DescribeVolumes"
],
"Resource": "*"
}
]
}
\b
Usage:
- Scan AWS account using profile:
secureaws check --profile xxx --region xxx
- Scan AWS account using keys:
secureaws check --access-key xxx --secret-key xxx --region xxx
'''
secureaws_obj = secureaws(access_key, secret_key, profile, region)
checkaws.check_account(secureaws_obj.getSession())
@click.group()
def setup_group():
pass
@setup_group.command()
@click.option('--menu', is_flag=True, help='Display interactive menu to setup security services')
@click.option('--access-key', help='AWS IAM User Access Key')
@click.option('--secret-key', help='AWS IAM User Access Key')
@click.option('--profile', help='AWS CLI profile')
@click.option('--region', default='us-east-1', help='AWS region identifier. Default: us-east-1')
@click.option('--yes', '-y', 'non_interactive', is_flag=True, help='Non-interactive mode')
@click.option('--service', '-s', 'svc', multiple=True, help='Specific service name to setup')
@click.option('--bucket-name', multiple=True, help='Bucket name to encrypt. Only applicable for s3-sse')
@click.option('--instance-id', multiple=True, help='Instance ID (Required only for ebs-sse)')
@click.option('--volume-id', multiple=True, help='Volume ID (Required only for ebs-sse)')
@click.option('--kms-id', help='Supports both KMS Key ID or Alias. Only supported for s3-sse and ebs-sse')
def setup(menu, access_key, secret_key, profile, region, non_interactive, svc, bucket_name, instance_id, volume_id, kms_id):
'''
\b
This command supports securing following services on your AWS account:
- CloudTrail
- Config
- Flow Logs
- MFA (Default User: root)
- S3 SSE (Default: AES256)
- EBS SSE (Default: aws/ebs)
- Password Policy
\b
It is recommended to further restrict down the policy as per your need.
IAM Policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:CreateBucket",
"s3:PutEncryptionConfiguration",
"s3:ListAllMyBuckets",
"s3:PutBucketPolicy",
"s3:HeadBucket",
"cloudtrail:StartLogging",
"cloudtrail:CreateTrail",
"iam:CreateRole",
"iam:PassRole",
"iam:AttachRolePolicy",
"iam:CreatePolicy",
"iam:UpdateAccountPasswordPolicy",
"iam:CreateVirtualMFADevice",
"iam:EnableMFADevice",
"iam:GetUser",
"iam:ListMFADevices",
"config:StartConfigurationRecorder",
"config:PutDeliveryChannel",
"config:PutConfigurationRecorder",
"logs:CreateLogGroup",
"logs:DescribeLogGroups",
"ec2:CreateFlowLogs",
"ec2:DescribeVpcs",
"ec2:StopInstances",
"ec2:StartInstances",
"ec2:CreateSnapshot",
"ec2:CopySnapshot",
"ec2:CreateVolume",
"ec2:AttachVolume",
"ec2:DeleteVolume",
"ec2:DeleteSnapshot"
],
"Resource": "*"
}
]
}
\b
Service Names:
- cloudtrail
- config
- flowlogs
- mfa
- s3-sse
- ebs-sse
- password-policy
\b
Usage:
- Setup all services using AWS profile:
secureaws setup --profile xxx --region xxx
- Setup all services using AWS keys in non-interactive mode (except ebs-sse):
secureaws setup --access-key xxx --secret-key xxx --region xxx -y
- Setup specific service(s):
secureaws setup --profile xxx --service cloudtrail -s flowlogs -s mfa --region xxx
- Setup MFA for an Root user:
secureaws setup --profile xxx -s mfa
- Setup MFA for an IAM user:
secureaws setup --profile xxx -s mfa=username
- Encrypt all S3 buckets using KMS Key ID:
secureaws setup --profile xxx --region xxx -s s3-sse --kms-id xxx
- Encrypt specific S3 buckets using default encryption:
secureaws setup --profile xxx --region xxx -s s3-sse --bucket-name xxx --bucket-name xxx
- Encrypt EBS Volumes using Instance ID(s):
secureaws setup --profile xxx -s ebs-sse --instance-id xxx --region xxx
- Encrypt EBS Volumes using Volume ID(s) and KMS Alias:
secureaws setup --profile xxx -s ebs-sse --volume-id xxx --volume-id xxx --kms-id alias/xxx --region xxx
'''
secureaws_obj = secureaws(access_key, secret_key, profile, region)
if menu:
setupaws.secure_account_menu(secureaws_obj.getSession())
else:
setupaws.secure_account(secureaws_obj.getSession(), svc, buckets=bucket_name, instance_id=instance_id, volume_id=volume_id, kms_id=kms_id, non_interactive=non_interactive)
@click.group()
def rsa_group():
pass
@rsa_group.command()
@click.option('--file-name', help='File name for private and public key')
@click.option('--key-size', default=4096, help='Key size (Default: 4096)')
def genrsa(file_name, key_size):
'''
This will generate RSA key pair
'''
rsautil.create_rsa_key_pair(file_name, key_size)
# Map all click groups
sa = click.CommandCollection(sources=[chk_group,setup_group,rsa_group])
def main():
sa()
if __name__ == '__main__':
sa()
| StarcoderdataPython |
1657869 | <filename>tools/efro/error.py<gh_stars>1-10
# Copyright (c) 2011-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality for dealing with errors."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
pass
class CleanError(Exception):
"""An error that should be presented to the user as a simple message.
These errors should be completely self-explanatory, to the point where
a traceback or other context would not be useful.
A CleanError with no message can be used to inform a script to fail
without printing any message.
This should generally be limited to errors that will *always* be
presented to the user (such as those in high level tool code).
Exceptions that may be caught and handled by other code should use
more descriptive exception types.
"""
def pretty_print(self, flush: bool = False) -> None:
"""Print the error to stdout, using red colored output if available.
If the error has an empty message, prints nothing (not even a newline).
"""
from efro.terminal import Clr
errstr = str(self)
if errstr:
print(f'{Clr.SRED}{errstr}{Clr.RST}', flush=flush)
| StarcoderdataPython |
1723640 | def get_prefix_table(pattern):
table = [0]
pattern_length = len(pattern)
for i in range(1, pattern_length):
j = 0
while pattern[i] == pattern[j]:
j += 1
# j < pattern_length:
# if :
# if j == pattern_length:
# else:
table.append(j)
return table
def substring_index(needle, haystack):
prefix_table = get_prefix_table(needle)
haystack_length = len(haystack)
j = 0
for i in range(haystack_length):
while j > 0 and needle[j] != haystack[i]:
j = prefix_table[j - 1]
if needle[j] == haystack[i]:
j += 1
if len(needle) == j:
yield i - (j - 1)
i = i - (j - 1) + 1
j = prefix_table[j - 1]
return None
tests = [("TEST", "THIS IS A TEST TEXT"), ("ANA", "BANANA")]
for test in tests:
for occurrence in substring_index(*test):
print("Found {0} at {1} position".format(test[0], occurrence))
| StarcoderdataPython |
3266132 | <gh_stars>1-10
import shlex
import subprocess
import sys
import click
from ecstools.commands.service.env import container_selection
from ecstools.resources.service import Service
@click.command(short_help='Run ECS Exec')
@click.argument('cluster')
@click.argument('service')
@click.argument('command', default='/bin/bash')
@click.option('-c', '--container', type=str, default=None, help='Container name')
@click.option('-t', '--task', type=str, default=None, help='Task id')
@click.pass_context
def exec(ctx, cluster, service, command, container, task):
"""Run ECS Exec"""
ecs = ctx.obj['ecs']
ecr = ctx.obj['ecr']
try:
if not task:
tasks = ecs.list_tasks(cluster=cluster, serviceName=service, desiredStatus='RUNNING')
task = tasks['taskArns'][0]
task_desc = ecs.describe_tasks(cluster=cluster, tasks=[task])['tasks'][0]
except Exception as e:
sys.exit(e)
task_id = task_desc['taskArn'].split('/')[-1]
task_def = task_desc['taskDefinitionArn'].split('/')[-1]
started_at = task_desc['startedAt'].replace(microsecond=0)
click.echo(f'{task_id} {started_at} {task_def}')
try:
srv = Service(ecs, ecr, cluster, service)
for c in srv.task_definition().containers():
click.echo(f"\t{c['name']}: {c['image']}")
except Exception as e:
sys.exit(e)
if not container:
container_desc = container_selection(srv.task_definition().containers())
container = container_desc['name']
cmd = f'aws ecs execute-command --cluster {cluster} --task {task_id} --container {container}' \
f' --command "{command}" --interactive'
try:
subprocess.check_call(shlex.split(cmd))
except Exception as e:
sys.exit(e)
| StarcoderdataPython |
1778901 | from setup import *
def get_general_ts_all(test_type):
"""
A generic function used to get all the rows of a specific general touchscreen test. After the csv is generated, it
will ask for the user to save the file in a directory. Used for Habituation 1 and 2, Initial Touch, Must Touch,
and Must Initiate.
:param test_type: A string that represents one of the general touchscreen test types.
"""
df = data_setup(test_type)
if df is not None:
save_file_message(df)
def get_general_ts_first_day(test_type):
"""
A generic function used to get all the first days of a specific general touchscreen test. After the csv is
generated, it will ask for the user to save the file in a directory. Used for Habituation 1 and 2, Initial Touch,
Must Touch, and Must Initiate.
:param test_type: A string that represents one of the general touchscreen test types.
"""
df = data_setup(test_type)
if df is not None:
df = df.loc[df['Day'] == 1]
save_file_message(df)
def get_general_ts_last_day(test_type):
"""
A generic function used to get all the last days of a specific general touchscreen test. After the csv is
generated, it will ask for the user to save the file in a directory. Used for Habituation 1 and 2, Initial Touch,
Must Touch, and Must Initiate.
:param test_type: A string that represents one of the general touchscreen test types.
"""
df = data_setup(test_type)
if df is not None:
df = df.drop_duplicates(subset='ID', keep='last')
save_file_message(df)
def check_enter_day(enter_day):
"""
This function checks to make sure that the selected day value is valid.
:param enter_day: An Entry widget that contains the numerical value of the day
:return: The numerical value of the day in the Entry widget
:except ValueError: If the value is empty or the value is not numeric, this function will stop and return.
"""
try:
selected_day = int(enter_day.get())
return selected_day
except ValueError:
mb.showerror('General Touchscreen Error',
'check_enter_day() error: Either the value is empty or the value is not numeric!')
print('check_enter_day() error: Either the value is empty or the value is not numeric!')
return
def check_enter_id(enter_id):
"""
This function checks to make sure that the selected id value is valid.
:param enter_id: An Entry widget that contains the numerical value of the id
:return: The numerical value of the id in the Entry widget
:except ValueError: If the value is empty or the value is not numeric, this function will stop and return.
"""
try:
selected_id = int(enter_id.get())
return selected_id
except ValueError:
mb.showerror('General Touchscreen Error',
'check_enter_id() error: Either the value is empty or the value is not numeric!')
print('check_enter_id() error: Either the value is empty or the value is not numeric!')
return
def get_general_ts_select_day(test_type, enter_day):
"""
A generic function used to get all rows on a selected days of a specific general touchscreen test. After the csv is
generated, it will ask for the user to save the file in a directory. Used for Habituation 1 and 2, Initial Touch,
Must Touch, and Must Initiate.
:param test_type: A string that represents one of the general touchscreen test types.
:param enter_day: An Entry widget that contains the selected day value
"""
# check that the inputs to the criteria widgets are valid
selected_day = check_enter_day(enter_day)
if selected_day is None:
mb.showerror('General Touchscreen Error',
'get_general_ts_select_day() error: Either the day value is empty or the value is not numeric!')
print('get_general_ts_select_day() error: Either the day value is empty or the value is not numeric!')
return
df = data_setup(test_type)
if df is not None:
df = df.loc[df['Day'] == selected_day]
save_file_message(df)
def get_general_ts_select_id(test_type, enter_id):
"""
A generic function used to get all rows on a selected id of a specific general touchscreen test. After the csv is
generated, it will ask for the user to save the file in a directory. Used for Habituation 1 and 2, Initial Touch,
Must Touch, and Must Initiate.
:param test_type: A string that represents one of the general touchscreen test types.
:param enter_id: An Entry widget that contains the selected id value
"""
# check that the inputs to the criteria widgets are valid
selected_id = check_enter_id(enter_id)
if selected_id is None:
mb.showerror('General Touchscreen Error',
'get_general_ts_select_id() error: Either the id value is empty or the value is not numeric!')
print('get_general_ts_select_id() error: Either the id value is empty or the value is not numeric!')
return
df = data_setup(test_type)
if df is not None:
df = df.loc[df['Day'] == selected_id]
save_file_message(df)
def punish_incorrect_last_days(df, min_trial_req, criteria_one, criteria_two):
"""
This function determines the last day (aka the first time the animal has met the PI criteria) for all animals. The
function grabs all rows that have at least the minimum trials completed requirement and checks if the animal on the
first day has at least criteria_one percent correctness and on the second day has at least criteria_two percent
correctness. If it passes the criteria, the function will mark the day that it passed the criteria. At the end, the
function will grab all the first occurrences of when the animal passed the criteria and return it as a new
dataframe.
:param df: A dataframe that represents cleaned Punish Incorrect data
:param min_trial_req: A value that represents the minimum required trials to pass the criteria (int)
:param criteria_one: A value that represents the minimum percent correctness for the first day (int)
:param criteria_two: A value that represents the minimum precent correctness for the second day (int)
:return: A dataframe that only contains the rows that the animals met their criteria on. If an animal did not reach
the criteria, it will not show up.
"""
df_copy = df.copy(deep=True)
df_copy = df_copy.loc[df_copy['NumberOfTrial'] >= min_trial_req]
df_copy.sort_values(['ID', 'Day'], inplace=True)
df_copy.reset_index(drop=True, inplace=True)
row_index = 0
while row_index < df_copy.shape[0] - 1:
rows_to_check = list()
# compare two rows at a time with the same ID
for row in range(2):
row_to_add = df_copy.loc[row_index + row]
while row_to_add['ID'] != df_copy.at[row_index, 'ID'] and row_index < df_copy.shape[0] - 1:
row_index += 1
rows_to_check.append(row_to_add)
last_row_info = rows_to_check[-1]
if len(rows_to_check) < 2:
continue
if last_row_info['ID'] != rows_to_check[0]['ID']:
continue
# checks the correctness matches the requirement and that both days are only 1 day apart
if rows_to_check[0]['PercentCorrect'] >= criteria_one and rows_to_check[1]['PercentCorrect'] >= criteria_two \
and abs(rows_to_check[0]['Day'] - rows_to_check[1]['Day']) == 1:
df_copy.at[last_row_info.name, 'Criteria Passed?'] = 'yes'
row_index += 1
# only take the first occurrence of the rows that passed the criteria
df_copy = df_copy.loc[df_copy['Criteria Passed?'] == 'yes']
df_copy['Mice ID'] = df_copy['ID']
df_copy = df_copy.groupby('Mice ID').first()
return df_copy
def get_punish_incorrect_normal(df, min_trials, percent_one, percent_two):
"""
This function drops rows for trials after the criteria met date. The resulting dataframe will contain rows with the
animal's start date to the criteria end date. If an animal never reaches the criteria, then it will display all the
trials from the start of testing to the end of testing (for punish incorrect).
:param df: A dataframe that represents cleaned Punish Incorrect data
:param min_trials: A value that represents the minimum required trials to pass the criteria (int)
:param percent_one: A value that represents the minimum percent correctness for the first day (int)
:param percent_two: A value that represents the minimum precent correctness for the second day (int)
"""
df_copy = punish_incorrect_last_days(df, min_trials, percent_one, percent_two)
# drop rows that have larger day values and same ids (those are probably extra days to retain mouse memory)
for index in df_copy.iterrows():
df.drop(df.loc[(df['ID'] == index[1]['ID']) & (df['Day'] > index[1]['Day'])].index, inplace=True)
def get_punish_incorrect_criteria_days(df, min_trials, percent_one, percent_two):
"""
This function will call the get_punish_incorrect_normal() and then get all the last days. For animals that passed
the criteria, their last day is the day they met the criteria. For animals that did not pass the criteria, their
last day is the last day the test was ran.
:param df: A dataframe that represents cleaned Punish Incorrect data
:param min_trials: A value that represents the minimum required trials to pass the criteria (int)
:param percent_one: A value that represents the minimum percent correctness for the first day (int)
:param percent_two: A value that represents the minimum percent correctness for the second day (int)
"""
get_punish_incorrect_normal(df, min_trials, percent_one, percent_two)
df.drop_duplicates(subset='ID', keep='last', inplace=True)
def pi_widget_check(min_trials, percent_one, percent_two):
"""
This function checks all the criteria widgets to make sure that they are filled in and have valid numeric values.
:param min_trials: An entry with the value that represents the minimum required trials to pass the criteria (int)
:param percent_one: An entry with the value that represents the minimum percent correctness for the first day (int)
:param percent_two: An entry with the value that represents the minimum percent correctness for the second day (int)
:return: Returns the value for the minimum required trials, the minimum required percent correctness for the first
day, and the minimum required percent correctness for the second day.
:except ValueError: If any of the criteria entries are empty or invalid, the function will print an error message
and stop.
"""
try:
minimum_trials = int(min_trials.get())
except ValueError:
mb.showerror('General Touchscreen Error',
'pi_widget_check() error: Either the trial value is empty of the value is not numeric!')
print('pi_widget_check() error: Either the trial value is empty of the value is not numeric!')
return
try:
correct_one = int(percent_one.get())
except ValueError:
mb.showerror('General Touchscreen Error',
'pi_widget_check() error: Either the percent correctness 1 is empty of the value is not numeric!')
print('pi_widget_check() error: Either the percent correctness 1 is empty of the value is not numeric!')
return
try:
correct_two = int(percent_two.get())
except ValueError:
mb.showerror('General Touchscreen Error',
'pi_widget_check() error: Either the percent correctness 2 is empty of the value is not numeric!')
print('pi_widget_check() error: Either the percent correctness 2 is empty of the value is not numeric!')
return
return minimum_trials, correct_one, correct_two
def pi_all_button(min_trials, percent_one, percent_two):
"""
This function creates a csv file for the Punish Incorrect test. Each animal will have rows that start from their
start date to their criteria met date. If the animal does not meet the criteria, then their last date will be the
last day of the test. At the end, the function will ask the user to save the newly created csv file in a directory.
:param min_trials: An entry with the value that represents the minimum required trials to pass the criteria (int)
:param percent_one: An entry with the value that represents the minimum percent correctness for the first day (int)
:param percent_two: An entry with the value that represents the minimum percent correctness for the second day (int)
"""
# check that the inputs to the criteria widgets are valid
if pi_widget_check(min_trials, percent_one, percent_two) is not None:
minimum_trials, correct_one, correct_two = pi_widget_check(min_trials, percent_one, percent_two)
else:
mb.showerror('General Touchscreen Error',
'pi_all_button() error: One of the three criteria is either empty or non-numeric!')
print('pi_all_button() error: One of the three criteria is either empty or non-numeric!')
return
df = data_setup('PI')
if df is not None:
get_punish_incorrect_normal(df, minimum_trials, correct_one, correct_two)
save_file_message(df)
def pi_first_button(min_trials, percent_one, percent_two):
"""
This function creates a csv file for the Punish Incorrect test. Each row will be the first day the animal ran the
test. At the end, the function will ask the user to save the newly created csv file in a directory.
:param min_trials: An entry with the value that represents the minimum required trials to pass the criteria (int)
:param percent_one: An entry with the value that represents the minimum percent correctness for the first day (int)
:param percent_two: An entry with the value that represents the minimum percent correctness for the second day (int)
"""
# check that the inputs to the criteria widgets are valid
if pi_widget_check(min_trials, percent_one, percent_two) is not None:
minimum_trials, correct_one, correct_two = pi_widget_check(min_trials, percent_one, percent_two)
else:
mb.showerror('General Touchscreen Error',
'pi_first_button() error: One of the three criteria is either empty or non-numeric!')
print('pi_first_button() error: One of the three criteria is either empty or non-numeric!')
return
df = data_setup('PI')
if df is not None:
get_punish_incorrect_normal(df, minimum_trials, correct_one, correct_two)
df = df.loc[df['Day'] == 1]
save_file_message(df)
def pi_last_button(min_trials, percent_one, percent_two):
"""
This function creates a csv file for the Punish Incorrect test. Each row will be the last day the animal ran the
test. If the animal does not meet the criteria, then their last date will be the last day of the test. At the end,
the function will ask the user to save the newly created csv file in a directory.
:param min_trials: An entry with the value that represents the minimum required trials to pass the criteria (int)
:param percent_one: An entry with the value that represents the minimum percent correctness for the first day (int)
:param percent_two: An entry with the value that represents the minimum percent correctness for the second day (int)
"""
# check that the inputs to the criteria widgets are valid
if pi_widget_check(min_trials, percent_one, percent_two) is not None:
minimum_trials, correct_one, correct_two = pi_widget_check(min_trials, percent_one, percent_two)
else:
mb.showerror('General Touchscreen Error',
'pi_last_button() error: One of the three criteria is either empty or non-numeric!')
print('pi_last_button() error: One of the three criteria is either empty or non-numeric!')
return
df = data_setup('PI')
if df is not None:
get_punish_incorrect_criteria_days(df, minimum_trials, correct_one, correct_two)
save_file_message(df)
def pi_select_day_button(min_trials, percent_one, percent_two, enter_day):
"""
This function creates a csv file for the Punish Incorrect test. Each row will be the selected day the animal ran the
test. At the end, the function will ask the user to save the newly created csv file in a directory.
:param min_trials: An entry with the value that represents the minimum required trials to pass the criteria (int)
:param percent_one: An entry with the value that represents the minimum percent correctness for the first day (int)
:param percent_two: An entry with the value that represents the minimum percent correctness for the second day (int)
:param enter_day: A widget that contains the value that represents the selected day.
"""
# check that the inputs to the criteria widgets are valid
if pi_widget_check(min_trials, percent_one, percent_two) is not None:
minimum_trials, correct_one, correct_two = pi_widget_check(min_trials, percent_one, percent_two)
else:
mb.showerror('General Touchscreen Error',
'pi_select_day_button() error: One of the three criteria is either empty or non-numeric!')
print('pi_select_day_button() error: One of the three criteria is either empty or non-numeric!')
return
# check that the inputs to the criteria widgets are valid
selected_day = check_enter_day(enter_day)
if selected_day is None:
mb.showerror('General Touchscreen Error',
'pi_select_day_button() error: One of the three criteria is either empty or non-numeric!')
print('pi_select_day_button() error: Either the day value is empty or the value is not numeric!')
return
df = data_setup('PI')
if df is not None:
get_punish_incorrect_normal(df, minimum_trials, correct_one, correct_two)
df = df.loc[df['Day'] == selected_day]
save_file_message(df)
def pi_select_id_button(min_trials, percent_one, percent_two, enter_id):
"""
This function creates a csv file for the Punish Incorrect test. Each row will be all the trials from start date to
criteria date for a selected animal id. If the animal does not meet the criteria, then their last date will be the
last day of the test. At the end, the function will ask the user to save the newly created csv file in a directory.
:param min_trials: An entry with the value that represents the minimum required trials to pass the criteria (int)
:param percent_one: An entry with the value that represents the minimum percent correctness for the first day (int)
:param percent_two: An entry with the value that represents the minimum percent correctness for the second day (int)
:param enter_id: A widget that contains the value that represents the selected id.
"""
# check that the inputs to the criteria widgets are valid
if pi_widget_check(min_trials, percent_one, percent_two) is not None:
minimum_trials, correct_one, correct_two = pi_widget_check(min_trials, percent_one, percent_two)
else:
mb.showerror('General Touchscreen Error',
'pi_select_id_button() error: One of the three criteria is either empty or non-numeric!')
print('pi_select_id_button() error: One of the three criteria is either empty or non-numeric!')
return
# check that the inputs to the criteria widgets are valid
selected_id = check_enter_id(enter_id)
if selected_id is None:
mb.showerror('General Touchscreen Error',
'pi_select_id_button() error: Either the id value is empty or the value is not numeric!')
print('pi_select_id_button() error: Either the id value is empty or the value is not numeric!')
return
df = data_setup('PI')
if df is not None:
get_punish_incorrect_normal(df, minimum_trials, correct_one, correct_two)
df = df.loc[df['Day'] == selected_id]
save_file_message(df)
def make_general_ts_buttons(tk, root):
"""
This function creates all the general touchscreen buttons found on the General Touchscreen sub-menu.
:param tk: The TKinter library
:param root: A specific frame where all the buttons will live on.
"""
# creates hab 1 and hab 2 buttons
hab_one_btn = tk.Button(root, text='Habituation 1', command=lambda: get_general_ts_all('Hab1'), width=30)
hab_one_btn.grid(row=0, column=0)
hab_two_btn = tk.Button(root, text='Habituation 2', command=lambda: get_general_ts_all('Hab2'), width=30)
hab_two_btn.grid(row=1, column=0)
# visual spacer between hab and it
spacer_btn = tk.Label(root, text='', width=57, bg='#D6D6D6')
spacer_btn.grid(row=2, columnspan=2)
# creates all it buttons
it_btn = tk.Button(root, text='Initial Touch (All)', command=lambda: get_general_ts_all('IT'), width=30)
it_btn.grid(row=3, column=0)
it_first_btn = tk.Button(root, text='Initial Touch (First Day)', command=lambda: get_general_ts_first_day('IT'),
width=30)
it_first_btn.grid(row=4, column=0)
it_last_btn = tk.Button(root, text='Initial Touch (Last Day)', command=lambda: get_general_ts_last_day('IT'),
width=30)
it_last_btn.grid(row=5, column=0)
it_sel_day_text = tk.Entry(root, width=30, justify='center')
it_sel_day_text.grid(row=6, column=1)
it_sel_day_btn = tk.Button(root, text='Initial Touch (Select Day)',
command=lambda: get_general_ts_select_day('IT', it_sel_day_text), width=30)
it_sel_day_btn.grid(row=6, column=0)
it_sel_id_text = tk.Entry(root, width=30, justify='center')
it_sel_id_text.grid(row=7, column=1)
it_sel_id_btn = tk.Button(root, text='Initial Touch (Select ID)',
command=lambda: get_general_ts_select_id('IT', it_sel_id_text), width=30)
it_sel_id_btn.grid(row=7, column=0)
# visual spacer between it and mi
spacer_btn_two = tk.Label(root, text='', width=57, bg='#D6D6D6')
spacer_btn_two.grid(row=8, columnspan=2)
# creates all the mi buttons
mi_btn = tk.Button(root, text='Must Initiate (All)', command=lambda: get_general_ts_all('MI'), width=30)
mi_btn.grid(row=9, column=0)
mi_first_btn = tk.Button(root, text='Must Initiate (First Day)', command=lambda: get_general_ts_first_day('MI'),
width=30)
mi_first_btn.grid(row=10, column=0)
mi_last_btn = tk.Button(root, text='Must Initiate (Last Day)', command=lambda: get_general_ts_last_day('MI'),
width=30)
mi_last_btn.grid(row=11, column=0)
mi_sel_day_text = tk.Entry(root, width=30, justify='center')
mi_sel_day_text.grid(row=12, column=1)
mi_sel_day_btn = tk.Button(root, text='Must Initiate(Select Day)',
command=lambda: get_general_ts_select_day('MI', it_sel_day_text), width=30)
mi_sel_day_btn.grid(row=12, column=0)
mi_sel_id_text = tk.Entry(root, width=30, justify='center')
mi_sel_id_text.grid(row=13, column=1)
mi_sel_id_btn = tk.Button(root, text='Must Initiate (Select ID)',
command=lambda: get_general_ts_select_id('MI', it_sel_id_text), width=30)
mi_sel_id_btn.grid(row=13, column=0)
# visual spacer between mi and mt
spacer_btn_three = tk.Label(root, text='', width=57, bg='#D6D6D6')
spacer_btn_three.grid(row=14, columnspan=2)
# creates all the mt buttons
mt_btn = tk.Button(root, text='Must Touch (All)', command=lambda: get_general_ts_all('MT'), width=30)
mt_btn.grid(row=15, column=0)
mt_first_btn = tk.Button(root, text='Must Touch (First Day)', command=lambda: get_general_ts_first_day('MT'),
width=30)
mt_first_btn.grid(row=16, column=0)
mt_last_btn = tk.Button(root, text='Must Touch (Last Day)', command=lambda: get_general_ts_last_day('MT'),
width=30)
mt_last_btn.grid(row=17, column=0)
mt_sel_day_text = tk.Entry(root, width=30, justify='center')
mt_sel_day_text.grid(row=18, column=1)
mt_sel_day_btn = tk.Button(root, text='Must Touch (Select Day)',
command=lambda: get_general_ts_select_day('MT', it_sel_day_text), width=30)
mt_sel_day_btn.grid(row=18, column=0)
mt_sel_id_text = tk.Entry(root, width=30, justify='center')
mt_sel_id_text.grid(row=19, column=1)
mt_sel_id_btn = tk.Button(root, text='Must Touch (Select ID)',
command=lambda: get_general_ts_select_id('MT', it_sel_id_text), width=30)
mt_sel_id_btn.grid(row=19, column=0)
# visual spacer between mt and pi
spacer_btn_four = tk.Label(root, text='', width=57, bg='#D6D6D6')
spacer_btn_four.grid(row=20, columnspan=2)
# creates all the pi criteria widgets
pi_min_trial_label = tk.Label(root, text='Enter the min req trial amount:')
pi_min_trial_label.grid(row=21, column=0)
pi_min_trial_text = tk.Entry(root, width=30, justify='center')
pi_min_trial_text.grid(row=21, column=1)
pi_correct_one_label = tk.Label(root, text='Enter the min % correct for first day:')
pi_correct_one_label.grid(row=22, column=0)
pi_correct_one_text = tk.Entry(root, width=30, justify='center')
pi_correct_one_text.grid(row=22, column=1)
pi_correct_two_label = tk.Label(root, text='Enter the min % correct for second day:')
pi_correct_two_label.grid(row=23, column=0)
pi_correct_two_text = tk.Entry(root, width=30, justify='center')
pi_correct_two_text.grid(row=23, column=1)
# creates all the pi buttons
pi_btn = tk.Button(root, text='Punish Incorrect (All)',
command=lambda: pi_all_button(pi_min_trial_text, pi_correct_one_text, pi_correct_two_text),
width=30)
pi_btn.grid(row=24, column=0)
pi_first_btn = tk.Button(root, text='Punish Incorrect (First Day)',
command=lambda: pi_first_button(pi_min_trial_text, pi_correct_one_text,
pi_correct_two_text),
width=30)
pi_first_btn.grid(row=25, column=0)
pi_last_btn = tk.Button(root, text='Punish Incorrect (Last Day)',
command=lambda: pi_last_button(pi_min_trial_text, pi_correct_one_text, pi_correct_two_text),
width=30)
pi_last_btn.grid(row=26, column=0)
pi_sel_day_text = tk.Entry(root, width=30, justify='center')
pi_sel_day_text.grid(row=27, column=1)
pi_sel_day_btn = tk.Button(root, text='Punish Incorrect (Select Day)',
command=lambda: pi_select_day_button(pi_min_trial_text, pi_correct_one_text,
pi_correct_two_text, pi_sel_day_text), width=30)
pi_sel_day_btn.grid(row=27, column=0)
pi_sel_id_text = tk.Entry(root, width=30, justify='center')
pi_sel_id_text.grid(row=28, column=1)
pi_sel_id_btn = tk.Button(root, text='Punish Incorrect (Select ID)',
command=lambda: pi_select_id_button(pi_min_trial_text, pi_correct_one_text,
pi_correct_two_text, pi_sel_id_text), width=30)
pi_sel_id_btn.grid(row=28, column=0)
| StarcoderdataPython |
1799009 | " Tensorflow version 1.x modeling codes. "
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import math
import collections
import re
import six
from six.moves import range
import tensorflow as tf
def bilinear_classifier(in1_BTH, in2_BTH, keep_prob, output_size=1,
add_bias_1=True, add_bias_2=True, name='Bilinear'):
"""biaffine_mapping() with dropout."""
with tf.variable_scope(name):
# Statically known input dimensions.
input_size = in1_BTH.get_shape().as_list()[-1]
# Dynamically known input dimensions
batch_size = tf.shape(in1_BTH)[0]
noise_shape = [batch_size, 1, input_size]
biaffine = biaffine_mapping(
in1_BTH,
in2_BTH,
output_size,
add_bias_1=add_bias_1,
add_bias_2=add_bias_2,
initializer=tf.zeros_initializer())
if output_size == 1:
output = tf.squeeze(biaffine, axis=2)
else:
output = tf.transpose(biaffine, [0, 1, 3, 2])
return output
def biaffine_mapping(vector_set_1,
vector_set_2,
output_size,
add_bias_1=True,
add_bias_2=True,
initializer=None):
"""Bilinear mapping: maps two vector spaces to a third vector space.
The input vector spaces are two 3d matrices: batch size x feature size x values
A typical application of the function is to compute a square matrix
representing a dependency tree. The output is for each feature a square
matrix of the form [feature size, output size, feature size]. If the output size
is set to 1 then results is [feature size, 1, feature size] equivalent to
a square matrix where the feature for instance represent the tokens on
the x-axis and y-axis. In this way represent the adjacency matrix of a
dependency graph (see https://arxiv.org/abs/1611.01734).
Args:
vector_set_1: vectors of space one
vector_set_2: vectors of space two
output_size: number of output labels (e.g. edge labels)
add_bias_1: Whether to add a bias for input one
add_bias_2: Whether to add a bias for input two
initializer: Initializer for the bilinear weight map
Returns:
Output vector space as 4d matrix:
batch size x feature size x output size x feature size
The output could represent an unlabeled dependency tree when
the output size is 1 or a labeled tree otherwise.
"""
with tf.variable_scope('Bilinear'):
# Dynamic shape info
batch_size = tf.shape(vector_set_1)[0]
feature_size = tf.shape(vector_set_1)[1]
if add_bias_1:
vector_set_1 = tf.concat(
[vector_set_1, tf.ones([batch_size, feature_size, 1])], axis=2)
if add_bias_2:
vector_set_2 = tf.concat(
[vector_set_2, tf.ones([batch_size, feature_size, 1])], axis=2)
# Static shape info
vector_set_1_size = vector_set_1.get_shape().as_list()[-1]
vector_set_2_size = vector_set_2.get_shape().as_list()[-1]
if not initializer:
initializer = tf.orthogonal_initializer()
# Mapping matrix
bilinear_map = tf.get_variable(
'bilinear_map', [vector_set_1_size, output_size, vector_set_2_size],
initializer=initializer)
# # The matrix operations and reshapings for bilinear mapping.
# # b: batch size (batch of features)
# # v1, v2: values (size of vectors)
# # n: tokens (size of feature)
# # r: labels (output size), e.g. 1 if unlabeled or number of edge labels.
#
# # [b, n, v1] -> [b*n, v1]
# vector_set_1 = tf.reshape(vector_set_1, [-1, vector_set_1_size])
#
# # [v1, r, v2] -> [v1, r*v2]
# bilinear_map = tf.reshape(bilinear_map, [vector_set_1_size, -1])
#
# # [b*n, v1] x [v1, r*v2] -> [b*n, r*v2]
# bilinear_mapping = tf.matmul(vector_set_1, bilinear_map)
#
# # [b*n, r*v2] -> [b, n*r, v2]
# bilinear_mapping = tf.reshape(
# bilinear_mapping,
# [batch_size, feature_size * output_size, vector_set_2_size])
#
# # [b, n*r, v2] x [b, n, v2]T -> [b, n*r, n]
# bilinear_mapping = tf.matmul(bilinear_mapping, vector_set_2, adjoint_b=True)
#
# # [b, n*r, n] -> [b, n, r, n]
# bilinear_mapping = tf.reshape(
# bilinear_mapping, [batch_size, feature_size, output_size, feature_size])
# #
# tmp = tf.einsum("BFX,XRY->BFRY", vector_set_1, bilinear_map)
# tmp = tf.einsum("BFRY,BFY->BFRF", tmp, vector_set_2)
# bilinear_mapping = tf.einsum("BFRY,BFY->BFRF", tmp, vector_set_2)
# [batch_size, n_out, seq_len, seq_len]
bilinear_mapping = tf.einsum('bxi,ioj,byj->bxoy', vector_set_1, bilinear_map, vector_set_2)
return bilinear_mapping
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def get_assignment_map_from_albert_checkpoint(tvars, init_checkpoint, num_of_group=0):
"""Compute the union of the current variables and albert checkpoint variables.
albert contains groups of layers which share variables.
"""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
init_vars_name = [name for (name, _) in init_vars]
if num_of_group > 0:
assignment_map = []
for gid in range(num_of_group):
assignment_map.append(collections.OrderedDict())
else:
assignment_map = collections.OrderedDict()
for name in name_to_variable:
if name in init_vars_name:
tvar_name = name
elif (re.sub(r"/group_\d+/", "/group_0/",
six.ensure_str(name)) in init_vars_name and
num_of_group > 1):
tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name))
elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/attention_\d+/", "/attention_1/",
six.ensure_str(name))
else:
tf.logging.info("name %s does not get matched", name)
continue
tf.logging.info("name %s match to %s", name, tvar_name)
if num_of_group > 0:
group_matched = False
for gid in range(1, num_of_group):
if (("/group_" + str(gid) + "/" in name) or
("/ffn_" + str(gid) + "/" in name) or
("/attention_" + str(gid) + "/" in name)):
group_matched = True
tf.logging.info("%s belongs to %dth", name, gid)
assignment_map[gid][tvar_name] = name
if not group_matched:
assignment_map[0][tvar_name] = name
else:
assignment_map[tvar_name] = name
initialized_variable_names[name] = 1
initialized_variable_names[six.ensure_str(name) + ":0"] = 1
return (assignment_map, initialized_variable_names)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def stack_rnn(inputs, rnn_size, dropout_rate, sequence_length, initial_state=None, num_layers=1, rnn_type='lstm'):
"""Stack multiple layers of RNN/LSTM/GRU with rnn.MultiRNNCell API"""
def get_cell():
if rnn_type.lower() == "lstm":
# call return: new_h, LSTMStateTuple(new_c, new_h)
rnn_cell = tf.nn.rnn_cell.LSTMCell(num_units=rnn_size)
elif rnn_type.lower() == "gru":
# call return: new_h, new_h
rnn_cell = tf.nn.rnn_cell.GRUCell(num_units=rnn_size)
else:
# call return: output, output, output = new_state
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=rnn_size)
rnn_cell = tf.nn.rnn_cell.DropoutWrapper(
cell=rnn_cell,
# input_keep_prob=1.0,
output_keep_prob=1 - dropout_rate
)
return rnn_cell
rnn_cells = [get_cell() for _ in range(num_layers)]
rnn_cells = tf.contrib.rnn.MultiRNNCell(rnn_cells)
if not initial_state:
input_shape = get_shape_list(inputs, expected_rank=3)
batch_size = input_shape[0]
initial_state = rnn_cells.zero_state(batch_size, tf.float32)
outputs, states = tf.nn.dynamic_rnn(
cell=rnn_cells,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32
)
# outputs shape [batch_size, seq, rnn_size]
# For LSTM, 'state' is a N-tuple where N is the number of Cells containing a
# tf.nn.rnn_cell.LSTMStateTuple for each cell
return outputs, states
def stack_rnn_fused(inputs, sequence_length, rnn_size, dropout_rate,
is_training, num_layers=1, rnn_type='lstm'):
""" FusedRNNCell with LSTM or GRU
Args:
inputs: `3-D` tensor with shape `[batch_size, time_len, input_size]`
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
Cell state (cs): A `3-D` tensor of shape `[batch_size, time_len,
output_size]`
"""
def get_rnn_fused(inputs, sequence_length, rnn_size, is_training, dropout_rate=0.5,
scope='{}-fused'.format(rnn_type)):
with tf.variable_scope(scope):
inputs = tf.transpose(inputs, perm=[1, 0, 2]) # Need time-major
if rnn_type.lower() == "lstm":
rnn_cell = tf.contrib.rnn.LSTMBlockFusedCell(rnn_size)
elif rnn_type.lower() == "gru":
rnn_cell = tf.contrib.rnn.GRUBlockCellV2(rnn_size)
outputs, _ = rnn_cell(inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.transpose(outputs, perm=[1, 0, 2])
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training)
return outputs
rnn_output = tf.identity(inputs)
for i in range(num_layers):
scope = 'rnn-fused-%s' % i
rnn_output = get_rnn_fused(
rnn_output,
sequence_length=sequence_length,
rnn_size=rnn_size,
is_training=is_training,
dropout_rate=dropout_rate,
scope=scope
) # (batch_size, seq_length, rnn_size)
return rnn_output
def lstm_fused(inputs, sequence_length, lstm_size, dropout_rate,
is_training, num_layers=1):
""" FusedRNNCell with LSTM
Args:
inputs: `3-D` tensor with shape `[batch_size, time_len, input_size]`
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
Cell state (cs): A `3-D` tensor of shape `[batch_size, time_len,
output_size]`
"""
def _lstm_fused(inputs, sequence_length, lstm_size, is_training, dropout_rate=0.5,
scope='lstm-fused'):
with tf.variable_scope(scope):
inputs = tf.transpose(inputs, perm=[1, 0, 2]) # Need time-major
lstm_cell = tf.contrib.rnn.LSTMBlockFusedCell(lstm_size)
outputs, _ = lstm_cell(inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.transpose(outputs, perm=[1, 0, 2])
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training)
return outputs
rnn_output = tf.identity(inputs)
for i in range(num_layers):
scope = 'lstm-fused-%s' % i
rnn_output = _lstm_fused(rnn_output, sequence_length, lstm_size=lstm_size,
is_training=is_training,
dropout_rate=dropout_rate,
scope=scope) # (batch_size, seq_length, 2*rnn_size)
return rnn_output
def bilstm_fused(inputs, sequence_lengths, lstm_size, bilstm_dropout_rate,
is_training, num_layers=1):
""" FusedRNNCell uses a single TF op for the entire LSTM. """
def _bi_lstm_fused(inputs, sequence_lengths, rnn_size, is_training,
dropout_rate=0.5, scope='bi-lstm-fused'):
with tf.variable_scope(scope):
inputs = tf.transpose(inputs, perm=[1, 0, 2]) # Need time-major
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(rnn_size)
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(rnn_size)
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
output_fw, _ = lstm_cell_fw(inputs, dtype=tf.float32,
sequence_length=sequence_lengths)
output_bw, _ = lstm_cell_bw(inputs, dtype=tf.float32,
sequence_length=sequence_lengths)
outputs = tf.concat([output_fw, output_bw], axis=-1)
outputs = tf.transpose(outputs, perm=[1, 0, 2])
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training)
return outputs
rnn_output = tf.identity(inputs)
for i in range(num_layers):
scope = 'bi-lstm-fused-%s' % i
rnn_output = _bi_lstm_fused(rnn_output, sequence_lengths, rnn_size=lstm_size,
is_training=is_training,
dropout_rate=bilstm_dropout_rate,
scope=scope) # (batch_size, seq_length, 2*rnn_size)
return rnn_output
def idcnn_layer(config, model_inputs, name=None):
"""
config params:
is_train
filter_height: for text, 1
filter_width:
embedding_dim: # input channels
num_filter: # of output channels
repeat_times
layers
:param idcnn_inputs: [batch_size, seq_len, emb_size]
:return: [batch_size, seq_len, cnn_output_width]
"""
# shape of input = [batch, in_height=1, in_width=seq_len, in_channels=emb_size]
model_inputs = tf.expand_dims(model_inputs, 1)
with tf.variable_scope("idcnn" if not name else name):
# filter [filter_height, filter_width, in_channels, out_channels]
filter_shape = [1, config.filter_width, config.embedding_dim,
config.num_filter]
# print(shape)
filter_weights = tf.get_variable(
"idcnn_filter",
shape=filter_shape,
initializer=config.initializer)
layerInput = tf.nn.conv2d(model_inputs,
filter_weights,
strides=[1, 1, 1, 1],
padding="SAME",
name="init_layer")
finalOutFromLayers = []
totalWidthForLastDim = 0
for j in range(config.repeat_times):
for i in range(len(config.layers)):
dilation = config.layers[i]['dilation']
isLast = True if i == (len(config.layers) - 1) else False
with tf.variable_scope("atrous-conv-layer-%d" % i,
reuse=tf.AUTO_REUSE):
w = tf.get_variable(
"filter_w",
shape=[1, config.filter_width, config.num_filter,
config.num_filter],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("filter_b", shape=[config.num_filter])
conv = tf.nn.atrous_conv2d(layerInput,
w,
rate=dilation,
padding="SAME")
conv = tf.nn.bias_add(conv, b)
conv = tf.nn.relu(conv)
if isLast:
finalOutFromLayers.append(conv)
totalWidthForLastDim += config.num_filter
layerInput = conv
finalOut = tf.concat(axis=3, values=finalOutFromLayers)
keepProb = 1.0 if config.is_train else 0.5
finalOut = tf.nn.dropout(finalOut, keepProb)
finalOut = tf.squeeze(finalOut, [1])
finalOut = tf.reshape(finalOut, [-1, totalWidthForLastDim])
config.cnn_output_width = totalWidthForLastDim
return finalOut
def cudnn_rnn(inputs, sequence_lengths, time_major=False,
num_layers=1, dropout=0.0, rnn_size=128, is_training=True,
cell_type='lstm', direction='unidirectional'):
""" cudnn_lstm/gru/rnn for id tensor.
Args:
inputs: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
sequence_lengths: an int32 array representing the variable sequence
lengths in a batch. The size of the array has to equal the batch_size.
If not provided, the same sequence length will be assumed.
time_major: The shape format of the `inputs` and `outputs` Tensors. If
true, these Tensors must be shaped ['max_time', 'batch_size', 'depth'].
If false, these Tensors must be shaped ['batch_size', 'max_time',
'depth']. By default this function accepts input and emits output in
time-major form. This param is only effective when 'sequence_lengths' is
used.
training: whether this operation will be used in training or inference.
direction: the direction model that the model operates.
Can be either 'unidirectional' or 'bidirectional'
Returns:
output: a tensor of shape `[time_len, batch_size, num_dirs * num_units]`
if `time_major` is True (default) or `[batch_size, time_len,
num_dirs * num_units]` if `time_major` is False.
It is a `concat([fwd_output, bak_output], axis=2)`.
output_states: a tuple of tensor(s) of the same shape and structure as
`initial_state`.
"""
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if inputs.shape.ndims == 2:
inputs = tf.expand_dims(inputs, axis=[-1])
model_dic = {
'lstm': tf.contrib.cudnn_rnn.CudnnLSTM,
'gru': tf.contrib.cudnn_rnn.CudnnGRU,
'rnn_relu': tf.contrib.cudnn_rnn.CudnnRNNRelu,
'rnn_tanh': tf.contrib.cudnn_rnn.CudnnRNNTanh,
}
model = model_dic[cell_type]
fn = model(
num_layers=num_layers,
num_units=rnn_size,
# input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=direction,
dropout=dropout,
# seed=None,
# dtype=tf.dtypes.float32,
# kernel_initializer=None,
# bias_initializer=None,
# name=None
)
outputs, output_states = fn(
inputs=inputs,
# initial_state=None,
sequence_lengths=sequence_lengths,
time_major=time_major,
training=is_training, )
return outputs, output_states
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def einsum_via_matmul(input_tensor, w, num_inner_dims):
"""Implements einsum via matmul and reshape ops.
to perform tf.einsum("BFH,HO->BFO", input_tensor, w),
call einsum_via_matmul(input_tensor, w, 1),
which is equivalent to tf.matmul(input_tensor, w)
but to perform tf.einsum("BFH,HND->BFND", input_tensor, w),
call einsum_via_matmul(input_tensor, w, 1)
tf.einsum("BFND,NDH->BFH", input_tensor, w),
call einsum_via_matmul(input_tensor, w, 2)
Args:
input_tensor: float Tensor of shape [<batch_dims>, <inner_dims>].
w: float Tensor of shape [<inner_dims>, <outer_dims>].
num_inner_dims: int. number of dimensions to use for inner products.
Returns:
float Tensor of shape [<batch_dims>, <outer_dims>].
"""
input_shape = get_shape_list(input_tensor)
w_shape = get_shape_list(w)
batch_dims = input_shape[: -num_inner_dims]
inner_dims = input_shape[-num_inner_dims:]
outer_dims = w_shape[num_inner_dims:]
inner_dim = np.prod(inner_dims)
outer_dim = np.prod(outer_dims)
if num_inner_dims > 1:
input_tensor = tf.reshape(input_tensor, batch_dims + [inner_dim])
if len(w_shape) > 2:
w = tf.reshape(w, [inner_dim, outer_dim])
ret = tf.matmul(input_tensor, w)
if len(outer_dims) > 1:
ret = tf.reshape(ret, batch_dims + outer_dims)
return ret
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def dense_layer_3d(input_tensor,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
head_size: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, num_attention_heads * head_size],
initializer=initializer)
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
num_attention_heads = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_attention_heads * head_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
num_attention_heads=1,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Activation function.
num_attention_heads: number of attention head in attention layer.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
del num_attention_heads # unused
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dot_product_attention(q, k, v, bias, dropout_rate=0.0):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
if len(from_shape) == 4:
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
elif len(from_shape) == 5:
# from_shape = [B, N, Block_num, block_size, depth]#
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3],
1], tf.float32)
bias = tf.matmul(broadcast_ones,
tf.cast(bias, tf.float32), transpose_b=True)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - bias) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
logits += adder
else:
adder = 0.0
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
size_per_head = int(from_shape[2] / num_attention_heads)
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act, "query")
# `key_layer` = [B, T, N, H]
k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act, "key")
# `value_layer` = [B, T, N, H]
v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act, "value")
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
attention_mask = tf.reshape(
attention_mask, [batch_size, 1, to_seq_length, 1])
# 'new_embeddings = [B, N, F, H]'
new_embeddings = dot_product_attention(q, k, v, attention_mask,
attention_probs_dropout_prob)
return tf.transpose(new_embeddings, [0, 2, 1, 3])
def attention_ffn_block(layer_input,
hidden_size=768,
attention_mask=None,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0):
"""A network with attention-ffn as sub-block.
Args:
layer_input: float Tensor of shape [batch_size, from_seq_length,
from_width].
hidden_size: (optional) int, size of hidden layer.
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
attention_head_size: int. Size of attention head.
attention_probs_dropout_prob: float. dropout probability for attention_layer
intermediate_size: int. Size of intermediate hidden layer.
intermediate_act_fn: (optional) Activation function for the intermediate
layer.
initializer_range: float. Range of the weight initializer.
hidden_dropout_prob: (optional) float. Dropout probability of the hidden
layer.
Returns:
layer output
"""
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output,
hidden_size,
attention_head_size,
create_initializer(initializer_range),
None,
name="dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output,
intermediate_size,
create_initializer(initializer_range),
intermediate_act_fn,
num_attention_heads=num_attention_heads,
name="dense")
with tf.variable_scope("output"):
ffn_output = dense_layer_2d(
intermediate_output,
hidden_size,
create_initializer(initializer_range),
None,
num_attention_heads=num_attention_heads,
name="dense")
ffn_output = dropout(ffn_output, hidden_dropout_prob)
ffn_output = layer_norm(ffn_output + attention_output)
return ffn_output
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=12,
num_attention_heads=12,
intermediate_size=3072,
inner_group_num=1,
intermediate_act_fn="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_hidden_groups: int. Number of group for the hidden layers, parameters
in the same group are shared.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
inner_group_num: int, number of inner repetition of attention and ffn.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = hidden_size // num_attention_heads
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
all_layer_outputs = []
if input_width != hidden_size:
prev_output = dense_layer_2d(
input_tensor, hidden_size, create_initializer(initializer_range),
None, name="embedding_hidden_mapping_in")
else:
prev_output = input_tensor
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
for layer_idx in range(num_hidden_layers):
group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups)
with tf.variable_scope("group_%d" % group_idx):
with tf.name_scope("layer_%d" % layer_idx):
layer_output = prev_output
for inner_group_idx in range(inner_group_num):
with tf.variable_scope("inner_group_%d" % inner_group_idx):
layer_output = attention_ffn_block(
layer_output, hidden_size, attention_mask,
num_attention_heads, attention_head_size,
attention_probs_dropout_prob, intermediate_size,
intermediate_act_fn, initializer_range, hidden_dropout_prob)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| StarcoderdataPython |
3382390 | import numpy as np
from perceptron import Perceptron
class MLP:
def __init__(self, X, i = 2, j = 2, k = 1, alpha = 0.1):
# X = input data
# i = number of neurons for the input layer
# j = number of neurons for the hidden layer
# k = number of neurons for the output layer
self.p = Perceptron(X.shape[1], alpha)
self.input_layer = []
for index in np.arange(0, i):
self.input_layer.append(Perceptron(X.shape[1], alpha))
self.hidden_layer = []
for index in np.arange(0,j):
self.hidden_layer.append(Perceptron(X.shape[1], alpha))
self.output_layer = []
for index in np.arange(0,k):
self.output_layer.append(Perceptron(X.shape[1], alpha))
print("MLP Created.")
print("Nodes on input layer: {}".format(len(self.input_layer)))
print("Nodes on hidden layer: {}".format(len(self.hidden_layer)))
print("Nodes on output layer: {}".format(len(self.output_layer)))
def derivate(self, x):
# Sigmoid derivate
y = x * (1 - x)
return y
def fit_single(self, X, target_prediction, epochs = 100):
# Fit single perceptron
self.p.fit(X, target_prediction, epochs)
def fit(self, X, target_prediction, epochs = 100):
# Fit MLP
# Add bias column
X = np.c_[X, np.ones((X.shape[0]))]
# Forward Propagation
for epoch in np.arange(0, epochs):
input_layer_Y = []
hidden_layer_Y = []
output_layer_Y = []
error_output_layer = []
# Input layer
for index in np.arange(0, len(self.input_layer)):
# Loop each data input
for (x, target) in zip(X, target_prediction):
weighted_x = np.dot(x, self.input_layer[index].weight)
input_layer_Y.append(self.input_layer[index].activation(weighted_x))
# Convert the output of the layer to match the initial input data type
self.input_layer_Y = np.array(input_layer_Y)
# Hidden layer
for index in np.arange(0, len(self.hidden_layer)):
# Loop each data input
for (x, target) in zip(self.input_layer_Y, target_prediction):
weighted_x = np.dot(x, self.hidden_layer[index].weight)
hidden_layer_Y.append(self.hidden_layer[index].activation(weighted_x))
# Convert the output of the layer to match the initial input data type
self.hidden_layer_Y = np.array(hidden_layer_Y)
# Output layer
for index in np.arange(0, len(self.output_layer)):
# Loop each data input
for (x, target) in zip(self.hidden_layer_Y, target_prediction):
weighted_x = np.dot(x, self.output_layer[index].weight)
p = self.output_layer[index].activation(weighted_x)
output_layer_Y.append(p)
error = p - target
error_output_layer.append(error[0])
# Convert the output of the layer to match the initial input data type
self.output_layer_Y = np.array(output_layer_Y)
self.error_output_layer = np.array(error_output_layer)
# Backpropagation
#self.update_weights(X, target_prediction)
#print("Output from input layer: {}".format(self.input_layer_Y))
#print("Output from hidden layer: {}".format(self.hidden_layer_Y))
#print("Output from output layer: {}".format(self.output_layer_Y))
#print("Error in the output layer: {}".format(self.error_output_layer))
def update_weights(self, X, target_prediction):
# Calculate deltas for error gradient and update weights
# delta_k = y_k(1 - y_k) * e_k
delta_output_layer = []
for k in np.arange(len(self.output_layer)):
delta_output_layer.append(self.derivate(self.output_layer_Y[k]) * self.error_output_layer[k])
# w_k = w_k - alpha * y_k * e_k
self.output_layer[k].weight += -self.output_layer[k].alpha * self.output_layer_Y[k] * self.error_output_layer[k]
# delta_j = y_j(1 - y_j) * sum (w_jk) * delta_k
delta_hidden_layer = []
for j in np.arange(0, len(self.hidden_layer)):
for k in np.arange(0, len(delta_output_layer)):
sum = self.hidden_layer[j].weight * delta_output_layer[k]
delta_hidden_layer.append(self.derivate(self.hidden_layer_Y[j]) * sum)
def predict_single(self, X):
# Single perceptron predict
return self.p.predict(X)
def predict(self, X, add_bias = True):
# MLP predict
X = np.atleast_2d(X)
if add_bias:
X = np.c_[X, np.ones((X.shape[0]))]
# Input layer
for index in np.arange(0, len(self.input_layer)):
# Loop each data input
for (x) in zip(X):
weighted_x = np.dot(x, self.input_layer[index].weight)
self.input_layer_Y[index] = self.input_layer[index].activation(weighted_x)
# Hidden layer
for index in np.arange(0, len(self.hidden_layer)):
# Loop each data input
for (x) in zip(X):
weighted_x = np.dot(x, self.hidden_layer[index].weight)
self.hidden_layer_Y = self.hidden_layer[index].activation(weighted_x)
# Output layer
for index in np.arange(0, len(self.output_layer)):
# Loop each data input
for (x) in zip(X):
weighted_x = np.dot(x, self.output_layer[index].weight)
prediction = self.output_layer[index].activation(weighted_x)
return prediction | StarcoderdataPython |
3326974 | for _ in range(int(input())):
full_str = input()
full_list = list(full_str.split(" "))
#print(full_list)
lenght = len(full_list)
print("Count =", lenght) | StarcoderdataPython |
3278452 | from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.urls import path
from rest_framework.generics import DestroyAPIView
from loja.api.views import LojasView, LojasDetailView, LojaCreateAPIView, LojaUpdateAPIView, LojaDeleteAPIView
app_name = 'loja'
urlpatterns = [
url(r'^listar/$', LojasView.as_view()),
url(r'^adicionar/$', login_required(LojaCreateAPIView.as_view())),
url(r'^editar/(?P<pk>[-\w]+)/$', login_required(LojaUpdateAPIView.as_view())),
url(r'^excluir/(?P<pk>[-\w]+)/$', login_required(LojaDeleteAPIView.as_view())),
url(r'^(?P<pk>[-\w]+)/$', login_required(LojasDetailView.as_view())),
] | StarcoderdataPython |
1626064 | <filename>src/hrflow_connectors/utils/logger.py
import logging
import sys
from typing import Union
LOGGER_NAME = "hrflow_connectors"
def get_logger() -> logging.Logger:
"""
Get logger with `NullHandler` by default
Returns:
logging.Logger: logger
"""
logger = logging.getLogger(LOGGER_NAME)
if not logger.hasHandlers():
logger.addHandler(logging.NullHandler())
return logger
def get_logger_with_basic_config(
level: Union[int, str] = logging.DEBUG
) -> logging.Logger:
"""
Get logger with basic configuration :
* `StreamHandler` on `stdout`
* Level = `DEBUG`
* Formatter = `[%(levelname)s][%(asctime)s][%(module)s:%(funcName)s:%(lineno)d] %(message)s`
Args:
level (Union[int, str], optional): Log level. Defaults to `logging.DEBUG`.
Returns:
logging.Logger: logger with basic configuration
"""
logger = get_logger()
logger.setLevel(level)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(level)
formatter = logging.Formatter(
"[%(levelname)s][%(asctime)s][%(module)s:%(funcName)s:%(lineno)d] %(message)s"
)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
return logger | StarcoderdataPython |
3250297 | <reponame>itsayusharya/passwordmanger
import sqlite3
import random
class Password:
""""Password Generator"""
def __init__(self):
"""constructor"""
lower = "abcdefghijklmnopqrstuvwxyz"
upper = lower.upper()
numbers = "1234567890"
symbols = "!@#$%^&*()_+<>?"
# Creates one single pool of characters
self.char_pool = lower+upper+numbers+symbols
def generate(self):
"""Generates Strong Password"""
pass_len = random.randint(8, 32) # Randomly select password length
char_list = [random.choice(self.char_pool)
for char in range(pass_len)] # Creates characters list
# Creates password by joining individual index from char_list
self.password = "".join(char_list)
return self.password
def passdb(self):
self.con = sqlite3.connect("pass.db")
self.cur = self.con.cursor()
self.cur.execute(
"""CREATE TABLE IF NOT EXISTS passwords(username, password)""")
print("Enter Username: ")
username = input()
password = self.generate()
print(f"Password generated for username '{username}': {password}")
with self.con:
self.cur.execute("INSERT INTO passwords VALUES(:user,:pass)",
{'user': username, 'pass': password})
self.con.commit()
self.con.execute(
"SELECT * FROM passwords")
print(self.cur.fetchall())
Password().passdb()
| StarcoderdataPython |
3360287 | #!/usr/bin/env python
# ~/sandboxes/PORTAGEshared/src/nn/test_portage_nnjm -native -s 2 -n 2 delme.bin <(echo '1 1 / 1 / 4')
from unpickle import Layer
from unpickle import Embed
from unpickle import writeModelToFile
import numpy as np
from numpy.random import random
from numpy import ones
from numpy import zeros
embedding_size = 3
source_voc_size = 5
target_voc_size = 4
layer_size = 6
output_size = 5
semb = Embed(source_voc_size, embedding_size, 2, 1*ones((source_voc_size, embedding_size)))
temb = Embed(target_voc_size, embedding_size, 1, 2*ones((target_voc_size, embedding_size)))
layer = Layer(3*ones((9, layer_size)), zeros((layer_size, 1)), 'Elemwise{tanh,no_inplace}.0')
out = Layer(4*ones((layer_size, output_size)), zeros((output_size, 1)), "none")
model_name = 'nnjm'
with open(model_name + '.txt', 'w') as f:
writeModelToFile(f, True, semb, temb, [layer], out.w.shape[1], out)
with open(model_name + '.bin', 'w') as f:
writeModelToFile(f, False, semb, temb, [layer], out.w.shape[1], out)
| StarcoderdataPython |
3284507 | <gh_stars>0
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import division, unicode_literals
import re
import ssl
import threading
import time
import traceback
from collections import defaultdict
from datetime import timedelta
from pyVim import connect
from pyVmomi import vim # pylint: disable=E0611
from pyVmomi import vmodl # pylint: disable=E0611
from six import iteritems
from six.moves import range
from datadog_checks.base import ensure_unicode, to_string
from datadog_checks.base.checks import AgentCheck
from datadog_checks.base.checks.libs.thread_pool import SENTINEL, Pool
from datadog_checks.base.checks.libs.timer import Timer
from datadog_checks.base.checks.libs.vmware.all_metrics import ALL_METRICS
from datadog_checks.base.checks.libs.vmware.basic_metrics import BASIC_METRICS
from datadog_checks.base.config import is_affirmative
from .cache_config import CacheConfig
from .common import SOURCE_TYPE
from .errors import BadConfigError, ConnectionError
from .event import VSphereEvent
from .metadata_cache import MetadataCache, MetadataNotFoundError
from .mor_cache import MorCache, MorNotFoundError
from .objects_queue import ObjectsQueue
# Default vCenter sampling interval
REAL_TIME_INTERVAL = 20
# Metrics are only collected on vSphere VMs marked by custom field value
VM_MONITORING_FLAG = 'DatadogMonitored'
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of objects batched at the same time in the QueryPerf method to query available metrics
BATCH_MORLIST_SIZE = 50
# Maximum number of objects to collect at once by the propertyCollector. The size of the response returned by the query
# is significantly lower than the size of the queryPerf response, so allow specifying a different value.
BATCH_COLLECTOR_SIZE = 500
REALTIME_RESOURCES = {'vm', 'host'}
RESOURCE_TYPE_METRICS = (vim.VirtualMachine, vim.Datacenter, vim.HostSystem, vim.Datastore, vim.ClusterComputeResource)
RESOURCE_TYPE_NO_METRIC = (vim.ComputeResource, vim.Folder)
SHORT_ROLLUP = {
"average": "avg",
"summation": "sum",
"maximum": "max",
"minimum": "min",
"latest": "latest",
"none": "raw",
}
def trace_method(method):
"""
Decorator to catch and print the exceptions that happen within async tasks.
Note: this should be applied to methods of VSphereCheck only!
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception:
args[0].print_exception("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Datadog
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
pool = None
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.batch_morlist_size = max(init_config.get("batch_morlist_size", BATCH_MORLIST_SIZE), 0)
self.batch_collector_size = max(init_config.get("batch_property_collector_size", BATCH_COLLECTOR_SIZE), 0)
self.refresh_morlist_interval = init_config.get('refresh_morlist_interval', REFRESH_MORLIST_INTERVAL)
self.clean_morlist_interval = max(
init_config.get('clean_morlist_interval', 2 * self.refresh_morlist_interval), self.refresh_morlist_interval
)
self.refresh_metrics_metadata_interval = init_config.get(
'refresh_metrics_metadata_interval', REFRESH_METRICS_METADATA_INTERVAL
)
# Connections open to vCenter instances
self.server_instances = {}
self.server_instances_lock = threading.RLock()
# Event configuration
self.event_config = {}
# Caching configuration
self.cache_config = CacheConfig()
# build up configurations
for instance in instances:
i_key = self._instance_key(instance)
# caches
self.cache_config.set_interval(CacheConfig.Morlist, i_key, self.refresh_morlist_interval)
self.cache_config.set_interval(CacheConfig.Metadata, i_key, self.refresh_metrics_metadata_interval)
# events
self.event_config[i_key] = instance.get('event_config')
# Queue of raw Mor objects to process
self.mor_objects_queue = ObjectsQueue()
# Cache of processed Mor objects
self.mor_cache = MorCache()
# managed entity raw view
self.registry = {}
# Metrics metadata, for each instance keeps the mapping: perfCounterKey -> {name, group, description}
self.metadata_cache = MetadataCache()
self.latest_event_query = {}
self.exception_printed = 0
def print_exception(self, msg):
""" Print exceptions happening in separate threads
Prevent from logging a ton of them if a potentially big number of them fail the same way
"""
if self.exception_printed < 10:
self.log.error(msg)
self.exception_printed += 1
def start_pool(self):
self.log.info("Starting Thread Pool")
pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(pool_size)
def terminate_pool(self):
self.log.info("Terminating Thread Pool")
self.pool.terminate()
self.pool.join()
assert self.pool.get_nworkers() == 0
def stop_pool(self):
self.log.info("Stopping Thread Pool, waiting for queued jobs to finish")
for _ in self.pool._workers:
self.pool._workq.put(SENTINEL)
self.pool.close()
self.pool.join()
assert self.pool.get_nworkers() == 0
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
tags = instance.get('tags', [])
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=last_time)
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got %s events from vCenter event manager", len(new_events))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key], tags)
# Can return None if the event if filtered out
event_payload = normalized_event.get_datadog_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
@staticmethod
def _instance_key(instance):
i_key = ensure_unicode(instance.get('name'))
if i_key is None:
raise BadConfigError("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
i_key = self._instance_key(instance)
elapsed = time.time() - self.cache_config.get_last(entity, i_key)
interval = self.cache_config.get_interval(entity, i_key)
return elapsed > interval
def _smart_connect(self, instance, service_check_tags):
# Check for ssl configs and generate an appropriate ssl context object
ssl_verify = instance.get('ssl_verify', True)
ssl_capath = instance.get('ssl_capath', None)
if not ssl_verify:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
elif ssl_capath:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(capath=ssl_capath)
# If both configs are used, log a message explaining the default
if not ssl_verify and ssl_capath:
self.log.debug(
"Your configuration is incorrectly attempting to "
"specify both a CA path, and to disable SSL "
"verification. You cannot do both. Proceeding with "
"disabling ssl verification."
)
try:
# Object returned by SmartConnect is a ServerInstance
# https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.ServiceInstance.html
server_instance = connect.SmartConnect(
host=instance.get('host'),
user=instance.get('username'),
pwd=instance.get('password'),
sslContext=context if not ssl_verify or ssl_capath else None,
)
except Exception as e:
err_msg = "Connection to {} failed: {}".format(ensure_unicode(instance.get('host')), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=err_msg)
raise ConnectionError(err_msg)
# Check that we have sufficient permission for the calls we need to make
try:
server_instance.CurrentTime()
except Exception as e:
err_msg = (
"A connection to {} can be established, but performing operations on the server fails: {}"
).format(ensure_unicode(instance.get('host')), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=err_msg)
raise ConnectionError(err_msg)
return server_instance
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
tags = instance.get('tags', [])
service_check_tags = [
'vcenter_server:{}'.format(i_key),
'vcenter_host:{}'.format(ensure_unicode(instance.get('host'))),
]
service_check_tags.extend(tags)
service_check_tags = list(set(service_check_tags))
with self.server_instances_lock:
if i_key not in self.server_instances:
self.server_instances[i_key] = self._smart_connect(instance, service_check_tags)
# Test if the connection is working
try:
self.server_instances[i_key].CurrentTime()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
except Exception:
# Try to reconnect. If the connection is definitely broken,
# this will send CRITICAL service check and raise
self.server_instances[i_key] = self._smart_connect(instance, service_check_tags)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
i_key = self._instance_key(instance)
if self.in_compatibility_mode(instance):
if instance.get('all_metrics', False):
return available_metrics
wanted_metrics = []
# Get only the basic metrics
for counter_id in available_metrics:
# No cache yet, skip it for now
if not self.metadata_cache.contains(i_key, counter_id):
self.log.debug("No metadata found for counter %s, will not collect it", ensure_unicode(counter_id))
continue
metadata = self.metadata_cache.get_metadata(i_key, counter_id)
if metadata.get('name') in BASIC_METRICS:
wanted_metrics.append(vim.PerformanceManager.MetricId(counterId=counter_id, instance="*"))
return wanted_metrics
else:
# The metadata cache contains only metrics of the desired level, so use it to filter the metrics to keep
return [
vim.PerformanceManager.MetricId(counterId=counter_id, instance="*")
for counter_id in available_metrics
if self.metadata_cache.contains(i_key, counter_id)
]
def get_external_host_tags(self):
"""
Returns a list of tags for every host that is detected by the vSphere
integration.
Returns a list of pairs (hostname, {'SOURCE_TYPE: list_of_tags},)
"""
self.log.debug("Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
if not self.mor_cache.contains(i_key):
self.log.warning("Unable to extract host tags for vSphere instance: %s", i_key)
continue
for _, mor in self.mor_cache.mors(i_key):
# Note: some mors have a None hostname
hostname = mor.get('hostname')
if hostname:
external_host_tags.append((hostname, {SOURCE_TYPE: mor.get('tags')}))
return external_host_tags
def _get_parent_tags(self, mor, all_objects):
properties = all_objects.get(mor, {})
parent = properties.get('parent')
if parent:
tags = []
parent_name = ensure_unicode(all_objects.get(parent, {}).get('name', 'unknown'))
if isinstance(parent, vim.HostSystem):
tags.append('vsphere_host:{}'.format(parent_name))
elif isinstance(parent, vim.Folder):
tags.append('vsphere_folder:{}'.format(parent_name))
elif isinstance(parent, vim.ComputeResource):
if isinstance(parent, vim.ClusterComputeResource):
tags.append('vsphere_cluster:{}'.format(parent_name))
tags.append('vsphere_compute:{}'.format(parent_name))
elif isinstance(parent, vim.Datacenter):
tags.append('vsphere_datacenter:{}'.format(parent_name))
parent_tags = self._get_parent_tags(parent, all_objects)
parent_tags.extend(tags)
return parent_tags
return []
def _collect_mors_and_attributes(self, server_instance):
resources = list(RESOURCE_TYPE_METRICS)
resources.extend(RESOURCE_TYPE_NO_METRIC)
content = server_instance.content
view_ref = content.viewManager.CreateContainerView(content.rootFolder, resources, True)
# Object used to query MORs as well as the attributes we require in one API call
# See https://code.vmware.com/apis/358/vsphere#/doc/vmodl.query.PropertyCollector.html
collector = content.propertyCollector
# Specify the root object from where we collect the rest of the objects
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Specify the attribute of the root object to traverse to obtain all the attributes
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.path = "view"
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
property_specs = []
# Specify which attributes we want to retrieve per object
for resource in resources:
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = resource
property_spec.pathSet = ["name", "parent", "customValue"]
if resource == vim.VirtualMachine:
property_spec.pathSet.append("runtime.powerState")
property_spec.pathSet.append("runtime.host")
property_spec.pathSet.append("guest.hostName")
property_specs.append(property_spec)
# Create our filter spec from the above specs
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = property_specs
retr_opts = vmodl.query.PropertyCollector.RetrieveOptions()
# To limit the number of objects retrieved per call.
# If batch_collector_size is 0, collect maximum number of objects.
retr_opts.maxObjects = self.batch_collector_size or None
# Collect the objects and their properties
res = collector.RetrievePropertiesEx([filter_spec], retr_opts)
objects = res.objects
# Results can be paginated
while res.token is not None:
res = collector.ContinueRetrievePropertiesEx(res.token)
objects.extend(res.objects)
mor_attrs = {}
error_counter = 0
for obj in objects:
if obj.missingSet and error_counter < 10:
for prop in obj.missingSet:
error_counter += 1
self.log.error(
"Unable to retrieve property %s for object %s: %s",
ensure_unicode(prop.path),
ensure_unicode(obj.obj),
ensure_unicode(prop.fault),
)
if error_counter == 10:
self.log.error("Too many errors during object collection, stop logging")
break
mor_attrs[obj.obj] = {prop.name: prop.val for prop in obj.propSet} if obj.propSet else {}
return mor_attrs
def _get_all_objs(self, server_instance, regexes, include_only_marked, tags, use_guest_hostname=False):
"""
Explore vCenter infrastructure to discover hosts, virtual machines, etc.
and compute their associated tags.
Start at the vCenter `rootFolder`, so as to collect every objet.
Example topology:
```
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
```
If it's a node we want to query metric for, it will be enqueued at the
instance level and will be processed by a subsequent job.
"""
start = time.time()
obj_list = defaultdict(list)
# Collect objects and their attributes
all_objects = self._collect_mors_and_attributes(server_instance)
# Add rootFolder since it is not explored by the propertyCollector
rootFolder = server_instance.content.rootFolder
all_objects[rootFolder] = {"name": rootFolder.name, "parent": None}
for obj, properties in all_objects.items():
instance_tags = []
if not self._is_excluded(obj, properties, regexes, include_only_marked) and isinstance(
obj, RESOURCE_TYPE_METRICS
):
if use_guest_hostname:
hostname = properties.get("guest.hostName", properties.get("name", "unknown"))
else:
hostname = properties.get("name", "unknown")
if properties.get("parent"):
instance_tags.extend(self._get_parent_tags(obj, all_objects))
if isinstance(obj, vim.VirtualMachine):
vsphere_type = 'vsphere_type:vm'
vimtype = vim.VirtualMachine
mor_type = "vm"
power_state = properties.get("runtime.powerState")
if power_state != vim.VirtualMachinePowerState.poweredOn:
self.log.debug("Skipping VM in state %s", ensure_unicode(power_state))
continue
host_mor = properties.get("runtime.host")
host_props = all_objects.get(host_mor, {})
host = "unknown"
if host_mor and host_props:
host = ensure_unicode(host_props.get("name", "unknown"))
if self._is_excluded(host_mor, host_props, regexes, include_only_marked):
self.log.debug(
"Skipping VM because host %s is excluded by rule %s.", host, regexes.get('host_include')
)
continue
instance_tags.append('vsphere_host:{}'.format(host))
elif isinstance(obj, vim.HostSystem):
vsphere_type = 'vsphere_type:host'
vimtype = vim.HostSystem
mor_type = "host"
elif isinstance(obj, vim.Datastore):
vsphere_type = 'vsphere_type:datastore'
instance_tags.append(
'vsphere_datastore:{}'.format(ensure_unicode(properties.get("name", "unknown")))
)
hostname = None
vimtype = vim.Datastore
mor_type = "datastore"
elif isinstance(obj, vim.Datacenter):
vsphere_type = 'vsphere_type:datacenter'
instance_tags.append(
"vsphere_datacenter:{}".format(ensure_unicode(properties.get("name", "unknown")))
)
hostname = None
vimtype = vim.Datacenter
mor_type = "datacenter"
elif isinstance(obj, vim.ClusterComputeResource):
vsphere_type = 'vsphere_type:cluster'
instance_tags.append("vsphere_cluster:{}".format(ensure_unicode(properties.get("name", "unknown"))))
hostname = None
vimtype = vim.ClusterComputeResource
mor_type = "cluster"
else:
vsphere_type = None
if vsphere_type:
instance_tags.append(vsphere_type)
obj_list[vimtype].append(
{"mor_type": mor_type, "mor": obj, "hostname": hostname, "tags": tags + instance_tags}
)
self.log.debug("All objects with attributes cached in %s seconds.", time.time() - start)
return obj_list
@staticmethod
def _is_excluded(obj, properties, regexes, include_only_marked):
"""
Return `True` if the given host or virtual machine is excluded by the user configuration,
i.e. violates any of the following rules:
* Do not match the corresponding `*_include_only` regular expressions
* Is "non-labeled" while `include_only_marked` is enabled (virtual machine only)
"""
# Host
if isinstance(obj, vim.HostSystem):
# Based on `host_include_only_regex`
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], properties.get("name", ""), re.IGNORECASE)
if not match:
return True
# VirtualMachine
elif isinstance(obj, vim.VirtualMachine):
# Based on `vm_include_only_regex`
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], properties.get("name", ""), re.IGNORECASE)
if not match:
return True
# Based on `include_only_marked`
if include_only_marked:
monitored = False
for field in properties.get("customValue", ""):
if field.value == VM_MONITORING_FLAG:
monitored = True
break # we shall monitor
if not monitored:
return True
return False
def _cache_morlist_raw(self, instance):
"""
Fill the Mor objects queue that will be asynchronously processed later.
Resolve the vCenter `rootFolder` and initiate hosts and virtual machines
discovery.
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s", i_key)
# If the queue is not completely empty, don't do anything
for resource_type in RESOURCE_TYPE_METRICS:
if self.mor_objects_queue.contains(i_key) and self.mor_objects_queue.size(i_key, resource_type):
last = self.cache_config.get_last(CacheConfig.Morlist, i_key)
self.log.debug(
"Skipping morlist collection: the objects queue for the "
"resource type '%s' is still being processed "
"(latest refresh was %ss ago)",
ensure_unicode(resource_type),
time.time() - last,
)
return
tags = ["vcenter_server:{}".format(ensure_unicode(instance.get('name')))]
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex'),
}
include_only_marked = is_affirmative(instance.get('include_only_marked', False))
# Discover hosts and virtual machines
server_instance = self._get_server_instance(instance)
use_guest_hostname = is_affirmative(instance.get("use_guest_hostname", False))
all_objs = self._get_all_objs(
server_instance, regexes, include_only_marked, tags, use_guest_hostname=use_guest_hostname
)
self.mor_objects_queue.fill(i_key, dict(all_objs))
self.cache_config.set_last(CacheConfig.Morlist, i_key, time.time())
@trace_method
def _process_mor_objects_queue_async(self, instance, mors):
"""
Process a batch of items popped from the objects queue by querying the available
metrics for these MORs and then putting them in the Mor cache
"""
t = time.time()
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
# For non realtime metrics, we need to specifically ask which counters are available for which entity,
# so we call perfManager.QueryAvailablePerfMetric for each cluster, datacenter, datastore
# This should be okay since the number of such entities shouldn't be excessively large
for mor in mors:
mor_name = str(mor['mor'])
available_metrics = {m.counterId for m in perfManager.QueryAvailablePerfMetric(entity=mor["mor"])}
try:
self.mor_cache.set_metrics(i_key, mor_name, self._compute_needed_metrics(instance, available_metrics))
except MorNotFoundError:
self.log.error("Object '%s' is missing from the cache, skipping. ", ensure_unicode(mor_name))
continue
# TEST-INSTRUMENTATION
self.histogram(
'datadog.agent.vsphere.morlist_process_atomic.time', time.time() - t, tags=instance.get('tags', [])
)
def _process_mor_objects_queue(self, instance):
"""
Pops `batch_morlist_size` items from the mor objects queue and run asynchronously
the _process_mor_objects_queue_async method to fill the Mor cache.
"""
i_key = self._instance_key(instance)
self.mor_cache.init_instance(i_key)
if not self.mor_objects_queue.contains(i_key):
self.log.debug("Objects queue is not initialized yet for instance %s, skipping processing", i_key)
return
for resource_type in RESOURCE_TYPE_METRICS:
# Batch size can prevent querying large payloads at once if the environment is too large
# If batch size is set to 0, process everything at once
batch_size = self.batch_morlist_size or self.mor_objects_queue.size(i_key, resource_type)
while self.mor_objects_queue.size(i_key, resource_type):
mors = []
for _ in range(batch_size):
mor = self.mor_objects_queue.pop(i_key, resource_type)
if mor is None:
self.log.debug("No more objects of type '%s' left in the queue", ensure_unicode(resource_type))
break
mor_name = str(mor['mor'])
mor['interval'] = REAL_TIME_INTERVAL if mor['mor_type'] in REALTIME_RESOURCES else None
# Always update the cache to account for Mors that might have changed parent
# in the meantime (e.g. a migrated VM).
self.mor_cache.set_mor(i_key, mor_name, mor)
# Only do this for non real-time resources i.e. datacenter, datastore and cluster
# For hosts and VMs, we can rely on a precomputed list of metrics
realtime_only = is_affirmative(instance.get("collect_realtime_only", True))
if mor["mor_type"] not in REALTIME_RESOURCES and not realtime_only:
mors.append(mor)
# We will actually schedule jobs for non realtime resources only.
if mors:
self.pool.apply_async(self._process_mor_objects_queue_async, args=(instance, mors))
def _cache_metrics_metadata(self, instance):
"""
Get all the performance counters metadata meaning name/group/description...
from the server instance, attached with the corresponding ID
"""
# ## <TEST-INSTRUMENTATION>
t = Timer()
# ## </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.metadata_cache.init_instance(i_key)
self.log.info("Warming metrics metadata cache for instance %s", i_key)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
custom_tags = instance.get('tags', [])
new_metadata = {}
metric_ids = []
# Use old behaviour with metrics to collect defined by our constants
if self.in_compatibility_mode(instance, log_warning=True):
for counter in perfManager.perfCounter:
metric_name = self.format_metric_name(counter, compatibility=True)
new_metadata[counter.key] = {'name': metric_name, 'unit': counter.unitInfo.key}
# Build the list of metrics we will want to collect
if instance.get("all_metrics") or metric_name in BASIC_METRICS:
metric_ids.append(vim.PerformanceManager.MetricId(counterId=counter.key, instance="*"))
else:
collection_level = instance.get("collection_level", 1)
for counter in perfManager.QueryPerfCounterByLevel(collection_level):
new_metadata[counter.key] = {"name": self.format_metric_name(counter), "unit": counter.unitInfo.key}
# Build the list of metrics we will want to collect
metric_ids.append(vim.PerformanceManager.MetricId(counterId=counter.key, instance="*"))
self.log.info("Finished metadata collection for instance %s", i_key)
# Reset metadata
self.metadata_cache.set_metadata(i_key, new_metadata)
self.metadata_cache.set_metric_ids(i_key, metric_ids)
self.cache_config.set_last(CacheConfig.Metadata, i_key, time.time())
# ## <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total(), tags=custom_tags)
# ## </TEST-INSTRUMENTATION>
@staticmethod
def format_metric_name(counter, compatibility=False):
if compatibility:
return "{}.{}".format(ensure_unicode(counter.groupInfo.key), ensure_unicode(counter.nameInfo.key))
return "{}.{}.{}".format(
ensure_unicode(counter.groupInfo.key),
ensure_unicode(counter.nameInfo.key),
ensure_unicode(SHORT_ROLLUP[str(counter.rollupType)]),
)
def in_compatibility_mode(self, instance, log_warning=False):
if instance.get("all_metrics") is not None and instance.get("collection_level") is not None:
if log_warning:
self.log.warning(
"Using both `all_metrics` and `collection_level` configuration flag."
" `all_metrics` will be ignored."
)
return False
if instance.get("all_metrics") is not None:
if log_warning:
self.warning(
"The configuration flag `all_metrics` will soon be deprecated. "
"Consider using `collection_level` instead."
)
return True
return False
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
try:
metadata = self.metadata_cache.get_metadata(i_key, counter_id)
if metadata["unit"] == "percent":
return float(value) / 100
except MetadataNotFoundError:
pass
# Defaults to return the value without transformation
return value
@trace_method
def _collect_metrics_async(self, instance, query_specs):
""" Task that collects the metrics listed in the morlist for one MOR
"""
# ## <TEST-INSTRUMENTATION>
t = Timer()
# ## </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
custom_tags = instance.get('tags', [])
results = perfManager.QueryPerf(query_specs)
if results:
for mor_perfs in results:
mor_name = str(mor_perfs.entity)
try:
mor = self.mor_cache.get_mor(i_key, mor_name)
except MorNotFoundError:
self.log.error(
"Trying to get metrics from object %s deleted from the cache, skipping. "
"Consider increasing the parameter `clean_morlist_interval` to avoid that",
mor_name,
)
continue
for result in mor_perfs.value:
counter_id = result.id.counterId
if not self.metadata_cache.contains(i_key, counter_id):
self.log.debug(
"Skipping value for counter %s, because there is no metadata about it",
ensure_unicode(counter_id),
)
continue
# Metric types are absolute, delta, and rate
metric_name = self.metadata_cache.get_metadata(i_key, result.id.counterId).get('name')
if self.in_compatibility_mode(instance):
if metric_name not in ALL_METRICS:
self.log.debug("Skipping unknown `%s` metric.", ensure_unicode(metric_name))
continue
if not result.value:
self.log.debug("Skipping `%s` metric because the value is empty", ensure_unicode(metric_name))
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
hostname = mor['hostname']
tags = ['instance:{}'.format(ensure_unicode(instance_name))]
if not hostname: # no host tags available
tags.extend(mor['tags'])
else:
hostname = to_string(hostname)
tags.extend(custom_tags)
# vsphere "rates" should be submitted as gauges (rate is
# precomputed).
self.gauge("vsphere.{}".format(ensure_unicode(metric_name)), value, hostname=hostname, tags=tags)
# ## <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total(), tags=custom_tags)
# ## </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
"""
Calls asynchronously _collect_metrics_async on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if not self.mor_cache.contains(i_key):
self.log.debug("Not collecting metrics for instance '%s', nothing to do yet.", i_key)
return
vm_count = 0
custom_tags = instance.get('tags', [])
tags = ["vcenter_server:{}".format(ensure_unicode(instance.get('name')))] + custom_tags
n_mors = self.mor_cache.instance_size(i_key)
if not n_mors:
self.gauge('vsphere.vm.count', vm_count, tags=tags)
self.log.debug("No Mor objects to process for instance '%s', skip...", i_key)
return
self.log.debug("Collecting metrics for %s mors", ensure_unicode(n_mors))
# Request metrics for several objects at once. We can limit the number of objects with batch_size
# If batch_size is 0, process everything at once
batch_size = self.batch_morlist_size or n_mors
for batch in self.mor_cache.mors_batch(i_key, batch_size):
query_specs = []
for _, mor in iteritems(batch):
if mor['mor_type'] == 'vm':
vm_count += 1
if mor['mor_type'] not in REALTIME_RESOURCES and ('metrics' not in mor or not mor['metrics']):
continue
query_spec = vim.PerformanceManager.QuerySpec()
query_spec.entity = mor["mor"]
query_spec.intervalId = mor["interval"]
query_spec.maxSample = 1
if mor['mor_type'] in REALTIME_RESOURCES:
query_spec.metricId = self.metadata_cache.get_metric_ids(i_key)
else:
query_spec.metricId = mor["metrics"]
query_specs.append(query_spec)
if query_specs:
self.pool.apply_async(self._collect_metrics_async, args=(instance, query_specs))
self.gauge('vsphere.vm.count', vm_count, tags=tags)
def check(self, instance):
try:
self.start_pool()
self.exception_printed = 0
# First part: make sure our object repository is neat & clean
if self._should_cache(instance, CacheConfig.Metadata):
self._cache_metrics_metadata(instance)
if self._should_cache(instance, CacheConfig.Morlist):
self._cache_morlist_raw(instance)
self._process_mor_objects_queue(instance)
# Remove old objects that might be gone from the Mor cache
self.mor_cache.purge(self._instance_key(instance), self.clean_morlist_interval)
# Second part: do the job
self.collect_metrics(instance)
self._query_event(instance)
self.set_external_tags(self.get_external_host_tags())
self.stop_pool()
if self.exception_printed > 0:
self.log.error("One thread in the pool crashed, check the logs")
except Exception:
self.terminate_pool()
raise
| StarcoderdataPython |
3308244 | from itertools import count
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import os
from collections import deque
from autoencoder import Autoencoder
from config import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from image_loader import ImageDataset
dataset = ImageDataset(quality=2)
BATCH_SIZE = 32
data_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
autoencoder = Autoencoder(is_variational=USE_VARIATIONAL_AUTOENCODER)
if os.path.exists(AUTOENCODER_FILENAME):
print("Found autoencoder model, resuming training on existing model.")
autoencoder.load_state_dict(torch.load(AUTOENCODER_FILENAME), strict=False)
else:
print("Found no autoencoder model, training a new one.")
autoencoder.train()
optimizer = optim.Adam(autoencoder.parameters(), lr=0.00005)
criterion = lambda a, b: torch.mean(torch.abs(a - b))
LOG_STEPS = 1000
error_history = deque(maxlen=LOG_STEPS)
kld_history = deque(maxlen=LOG_STEPS)
def kld_loss(mean, log_variance):
return -0.5 * torch.sum(1 + log_variance - mean.pow(2) - log_variance.exp()) / mean.nelement()
def train():
for epoch in count():
batch_index = 0
for sample in tqdm(data_loader):
sample = sample.to(device)
autoencoder.zero_grad()
if USE_VARIATIONAL_AUTOENCODER:
output, mean, log_variance = autoencoder.forward(sample)
kld = kld_loss(mean, log_variance)
else:
output = autoencoder.decode(autoencoder.encode(sample))
kld = 0
reconstruction_loss = criterion(output, sample)
error_history.append(reconstruction_loss.item())
kld_history.append(kld.item())
loss = reconstruction_loss + kld * 0.01
loss.backward()
optimizer.step()
batch_index += 1
if batch_index % LOG_STEPS == 0:
torch.save(autoencoder.state_dict(), AUTOENCODER_FILENAME)
tqdm.write("Epoch {:d}, batch {:d}".format(epoch, batch_index) \
+ ': reconstruction loss: {0:.5f}'.format(np.mean(error_history)) \
+ ', KLD loss: {0:.4f} (Saved model.)'.format(np.mean(kld_history)))
print("Epoch " + str(epoch) \
+ ': reconstruction loss: {0:.5f}'.format(np.mean(error_history)) \
+ ', KLD loss: {0:.4f}'.format(np.mean(kld_history)))
torch.save(autoencoder.state_dict(), AUTOENCODER_FILENAME)
torch.save(autoencoder.state_dict(), 'trained_models/checkpoints/autoencoder_{:04d}.to'.format(epoch))
train() | StarcoderdataPython |
3266369 | <filename>movie/movie.py
class Movie(object):
def __init__(self):
self.name = ''
self.src = ''
self.update_time=''
self.score = ''
self.style = ''
self.desc = ''
self.size = ''
self.area = ''
self.actors = []
self.directors = []
self.scenarists = []
def __str__(self):
str = '';
str += '名称:'+self.name+'\r\n'
str += '链接:'+self.src+'\r\n'
str += '评分:'+self.score+'\r\n'
str += '类型:'+self.style+'\r\n'
str += '导演:'+', '.join(self.directors)+'\r\n'
str += '编剧:'+', '.join(self.scenarists)+'\r\n'
str += '主演:'+', '.join(self.actors)+'\r\n'
str += '地区:'+self.area+'\r\n'
str += '更新时间:'+self.update_time+'\r\n'
str += '大小:'+self.size+'\r\n'
#str += '摘要:'+self.desc
return str
| StarcoderdataPython |
3374937 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^survey/$', views.index, name='index'),
url(r'room/(?P<room_slug>[-\w]+)/survey$', views.survey, name='survey'),
url(r'room/(?P<room_slug>[-\w]+)/thanks$', views.thanks, name='thanks'),
] | StarcoderdataPython |
3200637 | <reponame>harupy/nyaggle
import os
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import nyaggle.feature_store as fs
from nyaggle.testing import get_temp_directory
def test_save_feature():
df = pd.DataFrame()
df['a'] = np.arange(100)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
assert os.path.exists(os.path.join(tmp, '0.f'))
def test_load_feature():
df = pd.DataFrame()
df['a'] = np.arange(100)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_multi_columns():
df = pd.DataFrame()
df['a'] = np.arange(100)
df['b'] = None
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_various_dtypes():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(np.uint8)
df['d'] = np.arange(100).astype(np.uint16)
df['e'] = np.arange(100).astype(np.uint32)
df['f'] = np.arange(100).astype(np.int8)
df['g'] = np.arange(100).astype(np.int16)
df['h'] = np.arange(100).astype(np.int32)
df['i'] = np.arange(100).astype(np.int64)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp)
assert_frame_equal(df, df_loaded)
def test_load_features():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['b']], 0, tmp)
fs.save_feature(df[['c']], 1, tmp)
df_loaded = fs.load_features(df[['a']], [0, 1], tmp)
assert_frame_equal(df, df_loaded)
def test_load_features_no_base():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['b']], 0, tmp)
fs.save_feature(df[['c']], 1, tmp)
fs.save_feature(df[['a']], '2', tmp)
df_loaded = fs.load_features(None, [0, 1, '2'], tmp)
assert list(df_loaded.columns) == ['b', 'c', 'a']
def test_load_feature_ignore_columns():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
# just skip irrelevant column names
df_loaded = fs.load_feature(0, tmp, ignore_columns=['b', 'X'])
assert_frame_equal(df_loaded, df.drop('b', axis=1))
def test_load_feature_ignore_all_columns():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df, 0, tmp)
df_loaded = fs.load_feature(0, tmp, ignore_columns=['a', 'b', 'c', 'X'])
assert_frame_equal(df_loaded, df.drop(['a', 'b', 'c'], axis=1))
def test_load_features_duplicate_col_name():
df = pd.DataFrame()
df['a'] = np.arange(100).astype(float)
df['b'] = np.arange(100).astype(int)
df['c'] = np.arange(100).astype(int)
with get_temp_directory() as tmp:
fs.save_feature(df[['a', 'b']], 0, tmp)
fs.save_feature(df[['b', 'c']], 1, tmp)
fs.save_feature(df[['b', 'a']], 'X', tmp)
df_loaded = fs.load_features(None, [0, 1, 'X'], tmp, rename_duplicate=True)
assert list(df_loaded.columns) == ['a', 'b', 'b_1', 'c', 'b_X', 'a_X']
df_loaded = fs.load_features(None, [0, 1, 'X'], tmp, rename_duplicate=False)
assert list(df_loaded.columns) == ['a', 'b', 'b', 'c', 'b', 'a']
def test_invalid_feature():
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5] + [None] * 5,
'b': np.random.randint(0, 10, size=10)
})
y = pd.Series([1, 0, 1, 0, 1])
with get_temp_directory() as tmp:
with pytest.raises(RuntimeError):
fs.save_feature(df[['a']], 0, reference_target_variable=y, directory=tmp)
with pytest.raises(RuntimeError):
fs.save_feature(df, 0, reference_target_variable=y, directory=tmp)
# ok
fs.save_feature(df[['b']], 0, reference_target_variable=y, directory=tmp)
def test_feature_exists():
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5] + [None] * 5
})
with get_temp_directory() as tmp:
fs.save_feature(df[['a']], 0, directory=tmp)
with pytest.raises(RuntimeError):
fs.save_feature(df, 0, overwrite=False, directory=tmp)
def test_decorator():
with get_temp_directory() as tmp:
@fs.cached_feature('x', tmp)
def make_feature_x():
return pd.DataFrame({'a': [1, 2, 3, 4, 5]})
@fs.cached_feature('y', tmp)
def make_feature_y(n: int):
return pd.DataFrame({'b': np.arange(n)})
x = make_feature_x()
assert make_feature_x.__name__ == "make_feature_x"
assert os.path.exists(os.path.join(tmp, "x.f"))
x2 = make_feature_x()
assert_frame_equal(x, x2)
y = make_feature_y(100)
assert len(y) == 100
assert os.path.exists(os.path.join(tmp, "y.f"))
y2 = make_feature_y(100)
assert_frame_equal(y, y2)
| StarcoderdataPython |
3263127 | <filename>lab3project/street/apps/main/admin.py
from django.contrib.gis import admin
from django.db import models
from .models import *
class SegmentStreetInline(admin.TabularInline):
model = SegmentStreet
autocomplete_fields = ['street', 'segment']
extra = 1
ordering = ('id',)
# def get_queryset(self, request):
# return Segment.free_segments()
class OperationStreetInline(admin.TabularInline):
model = OperationStreet
autocomplete_fields = ['new', 'old']
extra = 1
ordering = ('id',)
class OperationSegmentStreetInline(admin.TabularInline):
model = OperationSegmentStreet
autocomplete_fields = ['new', 'old']
extra = 1
ordering = ('id',)
class OperationSegmentInline(admin.TabularInline):
model = OperationSegment
autocomplete_fields = ['new', 'old']
extra = 1
ordering = ('id',)
class StreetAlternativeNameInline(admin.TabularInline):
model = StreetAlternativeName
autocomplete_fields = ['street']
extra = 1
ordering = ('id',)
class AdminStreet(admin.ModelAdmin):
list_display = ['id', 'name', 'type', 'description']
fieldsets = [
(None, {'fields': ['name', 'description', 'significance']}),
('Дані з довідників', {'fields': ['type']}),
]
search_fields = ['name', 'streetalternativename__name']
list_filter = ['type', 'segmentstreet__segment__district']
#ОСТОРОЖНО
#inlines = (Street.free_segments(), StreetAlternativeNameInline,)
inlines = (SegmentStreetInline, StreetAlternativeNameInline,)
ordering = ('id',)
class AdminSegmentStreet(admin.ModelAdmin):
search_fields = ['id']
# class AdminOperationStreet(admin.ModelAdmin):
# list_display = ['date']
# autocomplete_fields = ['new', 'old']
# ordering = ('id',)
#
# class AdminOperationSegmentStreet(admin.ModelAdmin):
# list_display = ['date']
# autocomplete_fields = ['new', 'old']
# ordering = ('id',)
#
# class AdminOperationSegment(admin.ModelAdmin):
# list_display = ['date']
# search_fields = ['new', 'old']
# ordering = ('id',)
class AdminSegment(admin.OSMGeoAdmin):
list_display = ['id', 'district', 'description']
fieldsets = [
(None, {'fields': ['description']}),
('Дані з довідників', {'fields': ['district', 'geom_type', 'tract_mtz']}),
('Геометрія', {'fields': ['geom']}),
]
list_filter = ['district']
search_fields = ['id']
inlines = (SegmentStreetInline,)
ordering = ('id',)
class AdminDocumentsStreet(admin.ModelAdmin):
list_display = ['id', 'name']
fieldsets = [
(None, {'fields': ['document', 'date']}),
('Дані з довідників', {'fields': ['name']}),
]
list_filter = ['name']
search_fields = ['id', 'name']
inlines = ( OperationStreetInline, OperationSegmentInline, OperationSegmentStreetInline, )
ordering = ('id',)
admin.site.register(Street, AdminStreet)
admin.site.register(SegmentStreet, AdminSegmentStreet)
# admin.site.register(OperationStreet, AdminOperationStreet)
# admin.site.register(OperationSegmentStreet, AdminOperationSegmentStreet)
# admin.site.register(OperationSegment, AdminOperationSegment)
admin.site.register(DocumentsStreet, AdminDocumentsStreet)
admin.site.register(StreetAlternativeName)
admin.site.register(Segment, AdminSegment)
| StarcoderdataPython |
3351851 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.sql_v1.types import cloud_sql_resources
__protobuf__ = proto.module(
package='google.cloud.sql.v1',
manifest={
'SqlSslCertsDeleteRequest',
'SqlSslCertsGetRequest',
'SqlSslCertsInsertRequest',
'SqlSslCertsListRequest',
'SslCertsInsertRequest',
'SslCertsInsertResponse',
'SslCertsListResponse',
},
)
class SqlSslCertsDeleteRequest(proto.Message):
r"""
Attributes:
instance (str):
Cloud SQL instance ID. This does not include
the project ID.
project (str):
Project ID of the project that contains the
instance.
sha1_fingerprint (str):
Sha1 FingerPrint.
"""
instance = proto.Field(
proto.STRING,
number=1,
)
project = proto.Field(
proto.STRING,
number=2,
)
sha1_fingerprint = proto.Field(
proto.STRING,
number=3,
)
class SqlSslCertsGetRequest(proto.Message):
r"""
Attributes:
instance (str):
Cloud SQL instance ID. This does not include
the project ID.
project (str):
Project ID of the project that contains the
instance.
sha1_fingerprint (str):
Sha1 FingerPrint.
"""
instance = proto.Field(
proto.STRING,
number=1,
)
project = proto.Field(
proto.STRING,
number=2,
)
sha1_fingerprint = proto.Field(
proto.STRING,
number=3,
)
class SqlSslCertsInsertRequest(proto.Message):
r"""
Attributes:
instance (str):
Cloud SQL instance ID. This does not include
the project ID.
project (str):
Project ID of the project that contains the
instance.
body (google.cloud.sql_v1.types.SslCertsInsertRequest):
"""
instance = proto.Field(
proto.STRING,
number=1,
)
project = proto.Field(
proto.STRING,
number=2,
)
body = proto.Field(
proto.MESSAGE,
number=100,
message='SslCertsInsertRequest',
)
class SqlSslCertsListRequest(proto.Message):
r"""
Attributes:
instance (str):
Cloud SQL instance ID. This does not include
the project ID.
project (str):
Project ID of the project that contains the
instance.
"""
instance = proto.Field(
proto.STRING,
number=1,
)
project = proto.Field(
proto.STRING,
number=2,
)
class SslCertsInsertRequest(proto.Message):
r"""SslCerts insert request.
Attributes:
common_name (str):
User supplied name. Must be a distinct name
from the other certificates for this instance.
"""
common_name = proto.Field(
proto.STRING,
number=1,
)
class SslCertsInsertResponse(proto.Message):
r"""SslCert insert response.
Attributes:
kind (str):
This is always **sql#sslCertsInsert**.
operation (google.cloud.sql_v1.types.Operation):
The operation to track the ssl certs insert
request.
server_ca_cert (google.cloud.sql_v1.types.SslCert):
The server Certificate Authority's
certificate. If this is missing you can force a
new one to be generated by calling
resetSslConfig method on instances resource.
client_cert (google.cloud.sql_v1.types.SslCertDetail):
The new client certificate and private key.
"""
kind = proto.Field(
proto.STRING,
number=1,
)
operation = proto.Field(
proto.MESSAGE,
number=2,
message=cloud_sql_resources.Operation,
)
server_ca_cert = proto.Field(
proto.MESSAGE,
number=3,
message=cloud_sql_resources.SslCert,
)
client_cert = proto.Field(
proto.MESSAGE,
number=4,
message=cloud_sql_resources.SslCertDetail,
)
class SslCertsListResponse(proto.Message):
r"""SslCerts list response.
Attributes:
kind (str):
This is always **sql#sslCertsList**.
items (Sequence[google.cloud.sql_v1.types.SslCert]):
List of client certificates for the instance.
"""
kind = proto.Field(
proto.STRING,
number=1,
)
items = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=cloud_sql_resources.SslCert,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| StarcoderdataPython |
3280805 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# --------------------------------------------------------------------------
"""Common functions shared by both the sync and the async decorators."""
from contextlib import contextmanager
from azure.core.tracing.abstract_span import AbstractSpan
from azure.core.settings import settings
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Optional, Union, Callable, List, Type, Generator
def get_function_and_class_name(func, *args):
# type: (Callable, List[Any]) -> str
"""
Given a function and its unamed arguments, returns class_name.function_name. It assumes the first argument
is `self`. If there are no arguments then it only returns the function name.
:param func: the function passed in
:type func: `collections.abc.Callable`
:param args: List of arguments passed into the function
:type args: List[Any]
"""
try:
return func.__qualname__
except AttributeError:
if args:
return "{}.{}".format(args[0].__class__.__name__, func.__name__) # pylint: disable=protected-access
return func.__name__
@contextmanager
def change_context(span):
# type: (Optional[AbstractSpan]) -> Generator
"""Execute this block inside the given context and restore it afterwards.
This does not start and ends the span, but just make sure all code is executed within
that span.
If span is None, no-op.
:param span: A span
:type span: AbstractSpan
"""
span_impl_type = settings.tracing_implementation() # type: Type[AbstractSpan]
if span_impl_type is None or span is None:
yield
else:
original_span = span_impl_type.get_current_span()
try:
span_impl_type.set_current_span(span)
yield
finally:
span_impl_type.set_current_span(original_span)
def with_current_context(func):
# type: (Callable) -> Any
"""Passes the current spans to the new context the function will be run in.
:param func: The function that will be run in the new context
:return: The target the pass in instead of the function
"""
span_impl_type = settings.tracing_implementation() # type: Type[AbstractSpan]
if span_impl_type is None:
return func
return span_impl_type.with_current_context(func)
| StarcoderdataPython |
3285163 | import json
def readJsonFile(fileName):
json_data = open(fileName).read()
data = json.loads(json_data)
return data
def writeJsonFile(fileName,data):
with open(fileName,'w+') as file:
json.dump(data,file,indent=4) | StarcoderdataPython |
152394 | <reponame>baszalmstra/rosty<gh_stars>1-10
#!/usr/bin/python2
from __future__ import print_function
import sys
from rosgraph_msgs.msg import Log
def read_message():
"""Read the rust ROS message and check the values"""
name = "Test"
msg = "This is a test"
topics = ["Topic1", "Topic2"]
errors = ""
file = open("tests/log_rust.bytes", mode="r")
log = Log()
log.deserialize(file.read())
if log.name != name:
errors += "\nname error: Expected '{}' Got '{}'".format(name, log.name)
if log.msg != msg:
errors += "\nmsg error: Expected '{}' Got '{}'".format(msg, log.msg)
if log.topics != topics:
errors += "\ntopics error: Expected '{}' Got '{}'".format(msg, log.topics)
return errors
if __name__ == "__main__":
errors = read_message()
if len(errors) == 0:
sys.exit(0)
else:
print(sys.stderr, "Got errors:")
print(errors)
| StarcoderdataPython |
1726444 | <gh_stars>0
# -*- coding: utf-8 -*-
import uqra, unittest,warnings,os, sys
from tqdm import tqdm
import numpy as np, scipy as sp
from uqra.solver.PowerSpectrum import PowerSpectrum
from uqra.environment import Kvitebjorn as Kvitebjorn
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
import pickle
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/examples/JupyterNotebook'
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_mPCE(self):
foo = lambda x: x**3 + 0.5*x + np.random.randn(*x.shape)
dist = cp.Normal()
x = dist.sample(1000).reshape(1,-1)
print(x.shape)
y = np.squeeze(np.array([foo(x), foo(x)]).T)
print(y.shape)
# basis = cp.orth_ttr(5, dist)
foo_hat = uqra.PCE(5, dist)
foo_hat.fit(x, y, method='OLS')
y_pred = foo_hat.predict(x)
print(y_pred.shape)
foo_hat = uqra.mPCE(5, dist)
foo_hat.fit(x, y, method='OLS')
y_pred = foo_hat.predict(x)
print(y_pred.shape)
def test_moments(self):
# np.set_printoptions(precision=3)
data_dir = '/Volumes/External/MUSE_UQ_DATA/Ishigami/Data'
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set[-1,:])
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> GLK')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE6_GLK.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> OLS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> OLSLARS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLSLARS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> LASSOLARS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_LASSOLARS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
def test_loo(self):
# Loading some example data
X, y = datasets.load_boston(return_X_y=True)
# X = X[:100,:2]
# y = y[:100]
# X = np.array([[0, 0], [1, 1], [2, 2]])
# y = np.array([0, 1, 2])
# print(X.shape)
print(y[:5])
# Training classifiers
reg1 = LinearRegression()
reg1.fit(X,y)
y1 = reg1.predict(X)
# print(reg1.coef_)
print(y1[:5])
# b = np.linalg.lstsq(X,y)[0]
# # print(b)
# y2 = np.dot(X, np.array(b))
# print(y2[:5])
mse = []
kf = KFold(n_splits=X.shape[0])
residual = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# H1 = np.linalg.inv(np.dot(X_train.T, X_train))
# H2 = np.dot(H1, X_train.T)
# H3 = np.dot(H2, y_train)
# y_hat = np.dot(X_test, H3)
# residual.append(y_test[0]- y_hat[0])
reg1.fit(X_train, y_train)
y_pred = reg1.predict(X_test)
residual.append(y_test[0] - y_pred[0])
# mse.append(uqra.metrics.mean_squared_error(y_test, y_pred))
Q, R = np.linalg.qr(X)
H = np.dot(Q, Q.T)
h = np.diagonal(H)
y_hat = np.dot(H, y)
e = (y-y_hat)/(1-h)
print(y_hat[:5])
print('e:')
print(np.mean(np.array(residual)**2))
print(np.mean(np.array(e)**2))
# print(uqra.metrics.leave_one_out_error(X,y,is_adjusted=False))
# print(np.mean(mse))
def test_QuadratureDesign(self):
print('>>> 1D quadrature design:')
p = 4
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',])
doe.samples()
print(' Legendre:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['normal',])
doe.samples()
print(' Hermite:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
print('>>> 1D quadrature design: Changing interval ')
a = -np.pi
b = np.pi
loc = a
scale = b - loc
print(' Legendre ({},{})'.format(np.around(a,2), np.around(b,2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',])
doe.samples()
print(' From chaning inverval after uqra.doe:')
print(' {:<15s} : {}'.format('Abscissa', np.around((b-a)/2*doe.u + (a+b)/2, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around((b-a)/2*doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',], dist_theta=[(loc, scale)])
doe.samples()
print(' Directly from uqra.doe:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
print('>>> 2D quadrature design:')
p = 4
doe = uqra.QuadratureDesign(p, ndim=2, dist_names=['uniform',])
doe.samples()
print(' Legendre:')
print(' {:<15s} :\n {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} :\n {}'.format('Weights' , np.around(doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=2, dist_names=['normal',])
doe.samples()
print(' Hermite:')
print(' {:<15s} :\n {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} :\n {}'.format('Weights' , np.around(doe.w, 2)))
def test_RandomDesign(self):
doe = uqra.RandomDesign('MCS', n_samples=1e6, ndim=3, dist_names='uniform', dist_theta=[(-np.pi, 2*np.pi),]*3)
doe.samples()
def test_LatinHyperCube(self):
doe = uqra.LHS(distributions=[sp.stats.norm,]*2)
doe_u, doe_x = doe.samples(2000)
print(doe_x.shape)
print(np.mean(doe_x, axis=1))
print(np.std(doe_x, axis=1))
np.save('/Users/jinsongliu/BoxSync/PhD_UT/Working_Papers/AdaptiveSparsePCE_OED/Data/LHS_Normal_2000', doe_x)
# doe = uqra.LHS(n_samples=1e3,dist_names=['uniform', 'norm'],ndim=2,dist_theta=[(-1, 2*2), (2,1)])
# doe.samples()
# print(np.mean(doe.x, axis=1))
# print(np.std(doe.x, axis=1))
def test_OptimalDesign(self):
"""
Optimal Design
"""
### Ishigami function
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/Ishigami/Data'
### SDOF system
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# data_dir = 'E:\Run_MUSEUQ'
np.random.seed(100)
# dist_x = cp.Normal()
dist_u= cp.Iid(cp.Normal(),2)
u_samples = dist_u.sample(100)
basis = cp.orth_ttr(10,dist_u)
X = basis(*u_samples).T
doe = uqra.OptimalDesign('D', n_samples=10)
doe_index = doe.samples(X, is_orth=True)
doe_index = doe.adaptive(X, n_samples=10)
print(doe_index)
doe_index = doe.adaptive(X, n_samples=10)
print(doe_index)
### 2D
# quad_orders = range(4,11)
# alpha = [1.0, 1.1, 1.3, 1.5, 2.0,2.5, 3.0,3.5, 5]
# dist_u= cp.Iid(cp.Normal(),2)
# for iquad_orders in quad_orders:
# basis = cp.orth_ttr(iquad_orders-1,dist_u)
# for r in range(10):
# filename = 'DoE_McsE6R{:d}_stats.npy'.format(r)
# data_set = np.load(os.path.join(data_dir, filename))
# samples_y = np.squeeze(data_set[:,4,:]).T
# filename = 'DoE_McsE6R{:d}.npy'.format(r)
# data_set = np.load(os.path.join(data_dir, filename))
# samples_u = data_set[0:2, :]
# samples_x = data_set[2:4, :]
# # samples_y = data_set[6 , :].reshape(1,-1)
# print('Quadrature Order: {:d}'.format(iquad_orders))
# print('Candidate samples filename: {:s}'.format(filename))
# print(' >> Candidate sample set shape: {}'.format(samples_u.shape))
# design_matrix = basis(*samples_u).T
# print(' >> Candidate Design matrix shape: {}'.format(design_matrix.shape))
# for ia in alpha:
# print(' >> Oversampling rate : {:.2f}'.format(ia))
# doe_size = min(int(len(basis)*ia), 10000)
# doe = uqra.OptimalDesign('S', n_samples = doe_size )
# doe.samples(design_matrix, u=samples_u, is_orth=True)
# data = np.concatenate((doe.I.reshape(1,-1),doe.u,samples_x[:,doe.I], samples_y[:,doe.I]), axis=0)
# filename = os.path.join(data_dir, 'DoE_McsE6R{:d}_p{:d}_OptS{:d}'.format(r,iquad_orders,doe_size))
# np.save(filename, data)
# for ia in alpha:
# print(' >> Oversampling rate : {:.2f}'.format(ia))
# doe_size = min(int(len(basis)*ia), 10000)
# doe = uqra.OptimalDesign('D', n_samples = doe_size )
# doe.samples(design_matrix, u=samples_u, is_orth=True)
# data = np.concatenate((doe.I.reshape(1,-1),doe.u,samples_x[:,doe.I], samples_y[:,doe.I]), axis=0)
# filename = os.path.join(data_dir, 'DoE_McsE6R{:d}_p{:d}_OptD{:d}'.format(r,iquad_orders,doe_size))
# np.save(filename, data)
def test_gauss_quadrature(self):
"""
https://keisan.casio.com/exec/system/1329114617
"""
print('========================TESTING: 1D GAUSS QUADRATURE=======================')
dists2test = [cp.Uniform(-1,1), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
rules2test = ['leg', 'hem', 'lag', 'jacobi']
order2test = [2,3,4,5,6,7,8]
for idist2test, irule2test in zip(dists2test, rules2test):
print('-'*50)
print('>>> Gauss Quadrature with polynominal: {}'.format(const.DOE_RULE_FULL_NAMES[irule2test.lower()]))
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', irule2test, order2test, idist2test)
uqra_samples = quad_doe.get_samples()
# quad_doe.disp()
uqra.enablePrint()
if irule2test == 'hem':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d_e, weight1d_e = np.polynomial.hermite_e.hermegauss(iorder)
print('{:<15s}: {}'.format('probabilist', np.around(coord1d_e,2)))
coord1d, weight1d = np.polynomial.hermite.hermgauss(iorder)
print('{:<15s}: {}'.format('physicist', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'leg':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d, weight1d = np.polynomial.legendre.leggauss(iorder)
print('{:<15s}: {}'.format('numpy ', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'lag':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d, weight1d = np.polynomial.laguerre.laggauss(iorder)
print('{:<15s}: {}'.format('numpy ', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'jacobi':
print('NOT TESTED YET')
print('Compared results here: https://keisan.casio.com/exec/system/1329114617')
def test_gpce(self):
print('==================TESTING: Generalized PCE (Not using SurrogateModel) ===================')
gpce_dist_to_test = [cp.Normal(), cp.Normal(2,3), cp.Gamma(1,1), cp.Beta(1,1)]
gpce_opt_dist = [cp.Normal(), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
gpce_opt_rule = ['hem', 'hem', 'lag', 'jacobi']
npoly_orders = range(2,5)
dist_zeta0 = cp.Normal()
for i, igpce_dist in enumerate(gpce_dist_to_test):
dist_zeta1 = gpce_opt_dist[i]
print('>>> Testing # {:d}: gpce: {}, zeta0: {} , zeta1: {}'.format(i, igpce_dist, dist_zeta0, dist_zeta1 ))
for ipoly_order in npoly_orders:
print(' Polynomial order: {:d}'.format(ipoly_order))
## gPCE with hermite chaos
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', 'hem', [ipoly_order+1], dist_zeta0)
samples_zeta= quad_doe.get_samples()
zeta_cor, zeta_weight = samples_zeta[0]
zeta_cor = zeta_cor.reshape((len(dist_zeta0),-1))
x_cor = igpce_dist.inv(dist_zeta0.cdf(zeta_cor))
zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta0, retall=True)
x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight,np.squeeze(x_cor),retall=True)
uqra.enablePrint()
print('\t Hermite: {}'.format( np.around(coeffs,4)))
## gPCE with optimal chaos
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', gpce_opt_rule[i], [ipoly_order+1], dist_zeta1)
samples_zeta= quad_doe.get_samples()
zeta_cor, zeta_weight = samples_zeta[0]
zeta_cor = zeta_cor.reshape((len(dist_zeta1),-1))
x_cor = igpce_dist.inv(dist_zeta1.cdf(zeta_cor))
zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta1, retall=True)
x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
uqra.enablePrint()
print('\t Optimal: {}'.format( np.around(coeffs,4)))
def test_PowerSpectrum(self):
print('========================TESTING: Power Spectrum =======================')
powerspecturms2test = ['jonswap']
powerspecturms_args = [(8, 10)]
df = 0.00001
f = np.arange(0, 10, df)
for psd_name, psd_args in zip(powerspecturms2test, powerspecturms_args):
psd = PowerSpectrum(psd_name, *psd_args)
psd_f, psd_pxx = psd.get_pxx(f)
psd_area = np.sum(psd_pxx * df)
np.save(os.path.join(data_dir,psd_name+'_psd_f'), psd_f)
np.save(os.path.join(data_dir,psd_name+'_psd_pxx'), psd_pxx)
tau, acf = psd.get_acf()
np.save(os.path.join(data_dir,psd_name+'_tau'), tau)
np.save(os.path.join(data_dir,psd_name+'_acf'), acf)
t, eta = psd.gen_process()
np.save(os.path.join(data_dir,psd_name+'_t'), t)
np.save(os.path.join(data_dir,psd_name+'_eta'), eta)
print(t, eta)
# t, eta = psd._gen_process_sum()
print('PSD name: {:s}, args: {}, Area: {:.2f}, 4*std:{}'.format(psd_name, psd_args, psd_area, 4*np.std(eta)))
def test_weighted_exceedance(self):
print('========================TESTING: Weighted Exceedance =======================')
# x = np.random.normal(size=1000).reshape(1,-1)
# res1 = stats.cumfreq(x)
# cdf_x = res1.lowerlimit + np.linspace(0, res1.binsize*res1.cumcount.size, res1.cumcount.size)
# cdf_y = res1.cumcount/x.size
# ecdf_y = 1- cdf_y
# ecdf_x = cdf_x
# print(np.around(ecdf_x,2))
# print(np.around(ecdf_y,2))
# res2 = uqhelpers.get_weighted_exceedance(x)
# print(res2.shape)
# print(np.around(res2[0],2))
# print(np.around(res2[1],2))
# orders = [4] ## mcs
orders = range(3,10) ## quad
repeat = range(10)
data_dir_out= '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
data_dir_in = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
for iorder in orders:
for r in repeat:
filename = 'DoE_IS_McRE6R{:d}_weight.npy'.format(r)
weights = np.load(os.path.join(data_dir_out, filename))
##>>> MCS results from true model
# filename = 'DoE_IS_McRE{:d}R{:d}_stats.npy'.format(iorder,r)
# data_out = np.load(os.path.join(data_dir_out, filename))
# y = np.squeeze(data_out[:,4,:]).T
filename = 'DoE_IS_QuadHem{:d}_PCE_pred_E6R{:d}.npy'.format(iorder, r)
data_out = np.load(os.path.join(data_dir_out, filename))
y = data_out
print(y.shape)
# filename = 'DoE_McRE{:d}R{:d}_stats.npy'.format(iorder, r)
# data_out = np.load(os.path.join(data_dir, filename))
# y = np.squeeze(data_out[:,4,:]).T
print(r' - exceedance for y: {:s}'.format(filename))
for i, iy in enumerate(y):
print('iy.shape = {}'.format(iy.shape))
print('weights.shape = {}'.format(weights.shape))
res = stats.cumfreq(iy,numbins=iy.size, weights=weights)
cdf_x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, res.cumcount.size)
cdf_y = res.cumcount/res.cumcount[-1]
excd = np.array([cdf_x, cdf_y])
np.save(os.path.join(data_dir_out,filename[:-4]+'_y{:d}_ecdf'.format(i)), excd)
def test_exceedance(self):
print('========================TESTING: Lienar Oscillator =======================')
# print('Testing: 1D')
# a = np.random.randint(0,10,size=10)
# print(a)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3)
# print('1D: return_index=False')
# print(a_excd)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3, return_index=True)
# print('1D: return_index=True')
# print(a_excd)
# print('Testing: 2D')
# a = np.random.randint(0,10,size=(2,10))
# print(a)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3)
# print('2D: isExpand=False, return_index=False')
# print(a_excd)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3, return_index=True)
# print('2D: isExpand=False, return_index=True')
# print(a_excd)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3, isExpand=True, return_index=True)
# print('2D: isExpand=True, return_index=True')
# print(a_excd)
# # return_period= [1,5,10]
# # prob_fails = [1/(p *365.25*24*3600/1000) for p in return_period]
# return_period= [1]
# prob_fails = [1e-5]
# quad_orders = range(3,10)
# mcs_orders = [6]
# repeat = range(10)
# orders = mcs_orders
# # orders = quad_orders
# return_all = False
# data_dir_out= '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# data_dir_in = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# # data_dir_out= '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
# # data_dir_in = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
# # data_dir_in = '/Users/jinsongliu/Google Drive File Stream/My Drive/MUSE_UQ_DATA/linear_oscillator'
# for ipf, ip in zip(prob_fails, return_period):
# print('Target exceedance prob : {:.1e}'.format(ipf))
# for iorder in orders:
# for r in repeat:
# ## input
# filename = 'DoE_McRE6R{:d}.npy'.format(r)
# # filename = 'DoE_McRE7R{:d}.npy'.format(r)
# data_in = np.load(os.path.join(data_dir_in, filename)) # [u1, u2,..., x1, x2...]
# ##>>> MCS results from surrogate model
# filename = 'DoE_QuadHem{:d}_GPR_pred_E6R{:d}.npy'.format(iorder, r)
# filename = 'DoE_QuadHem{:d}R24_mPCE_Normal_pred_E7R{:d}.npy'.format(iorder, r)
# filename = 'DoE_QuadHem{:d}_PCE_Normal_pred_E7R{:d}.npy'.format(iorder, r)
# data_out = np.load(os.path.join(data_dir_out, filename))
# y = data_out
# ##>>> MCS results from true model
# ## bench 4
# # filename = 'DoE_McRE{:d}R{:d}_y_Normal.npy'.format(iorder,r)
# # data_out = np.load(os.path.join(data_dir_out, filename))
# # y = data_out.reshape(1,-1)
# filename = 'DoE_McRE6R{:d}_stats.npy'.format(r)
# data_out = np.load(os.path.join(data_dir_out, filename))
# y = np.squeeze(data_out[:,4,:]).T
# print(y.shape)
# # filename = 'DoE_McRE{:d}R{:d}_stats.npy'.format(iorder, r)
# # data_out = np.load(os.path.join(data_dir, filename))
# # y = np.squeeze(data_out[:,4,:]).T
# print(r' - exceedance for y: {:s}'.format(filename))
# for i, iy in enumerate(y):
# data_ = np.vstack((iy.reshape(1,-1), data_in))
# iexcd = uqhelpers.get_exceedance_data(data_, ipf, isExpand=True, return_all=return_all)
# return_all_str = '_all' if return_all else ''
# np.save(os.path.join(data_dir_out,filename[:-4]+'_y{:d}_ecdf_P{:d}{}'.format(i, ip, return_all_str )), iexcd)
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
# p = 1e-5
# print('Target exceedance prob : {:.1e}'.format(p))
# # error_name = 'None'
# # error_name = 'Normal'
# error_name = 'Gumbel'
# for r in range(10):
# # filename = 'DoE_McRE7R{:d}_y_{:s}.npy'.format(r, error_name.capitalize())
# # filename = 'DoE_QuadHem5_PCE_{:s}_pred_r{:d}.npy'.format(error_name.capitalize(), r)
# filename = 'DoE_QuadHem5R24_mPCE_{:s}_pred_r{:d}.npy'.format(error_name.capitalize(), r)
# data_set = np.load(os.path.join(data_dir, filename))
# y = np.squeeze(data_set)
# print(r' - exceedance for y: {:s}'.format(filename))
# y_excd=uqhelpers.get_exceedance_data(y, p)
# np.save(os.path.join(data_dir, filename[:-4]+'_ecdf_pf5.npy'), y_excd)
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/Ishigami/Data'
excd_prob= [1e-6]
print('Target exceedance prob : {}'.format(excd_prob))
y_excd = []
for iexcd_prob in excd_prob:
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLS.npy'.format(r)
print(r' - exceedance for y: {:s}'.format(filename))
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
y_ecdf = uqhelpers.ECDF(np.array(y).T, alpha=iexcd_prob, is_expand=False)
filename = os.path.join(data_dir,'DoE_McsE6_PCE9_OLS_pf6_ecdf.pickle')
with open(filename, 'wb') as handle:
pickle.dump(y_ecdf, handle)
y_excd = []
for iexcd_prob in excd_prob:
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_LASSOLARS.npy'.format(r)
print(r' - exceedance for y: {:s}'.format(filename))
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
y_ecdf = uqhelpers.ECDF(np.array(y).T, alpha=iexcd_prob, is_expand=False)
filename = os.path.join(data_dir,'DoE_McsE6_PCE9_LASSOLARS_pf6_ecdf.pickle')
with open(filename, 'wb') as handle:
pickle.dump(y_ecdf, handle)
y_excd = []
for iexcd_prob in excd_prob:
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLSLARS.npy'.format(r)
print(r' - exceedance for y: {:s}'.format(filename))
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
y_ecdf = uqhelpers.ECDF(np.array(y).T, alpha=iexcd_prob, is_expand=False)
filename = os.path.join(data_dir,'DoE_McsE6_PCE9_OLSLARS_pf6_ecdf.pickle')
with open(filename, 'wb') as handle:
pickle.dump(y_ecdf, handle)
def test_bench4(self):
print('========================TESTING: BENCH 4 =======================')
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
model_name = 'BENCH4'
# ### grid points
# x = np.linspace(-10,20,600).reshape((1,-1))
# solver = uqra.Solver(model_name, x)
# y = solver.run()
# res = np.concatenate((x,y), axis=0)
# np.save(os.path.join(data_dir,model_name.lower()), res)
### data from files
for r in range(10):
filename = 'DoE_McRE6R{:d}.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
zeta = data_set[0,:].reshape(1,-1)
x = data_set[1,:].reshape(1,-1)
solver = uqra.Solver(model_name, x)
y = solver.run()
np.save(os.path.join(data_dir,'DoE_McRE6R{:d}_y_None.npy'.format(r)), y)
def test_Solver(self):
x = np.arange(12).reshape(2,-1)
np.random.seed(100)
# x = (Hs,Tp) = np.array((4, 12)).reshape(2,1)
x = (Hs,Tp) = np.arange(12).reshape(2,-1)
solver = uqra.linear_oscillator()
print(solver)
y_raw, y_QoI = solver.run(x)
# print(y_raw.shape)
# print(y_QoI.shape)
# x = np.arange(30).reshape(3,10)
# solver = uqra.Ishigami()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30)
# solver = uqra.xsinx()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30)
# solver = uqra.poly4th()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.polynomial_square_root_function()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.four_branch_system()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.polynomial_product_function()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
### General Solver run testing
# print('========================TESTING: Solver =======================')
# model_name = 'linear_oscillator'
# kwargs = {
# 'time_max' : 100,
# 'dt' : 0.2,
# }
# tmax,dt = 1000, 0.1
# t = np.arange(0,tmax, dt)
# zeta = 0.01
# omega_n = 2 # rad/s
# m = 1
# k = (omega_n/2/np.pi) **2 * m
# c = zeta * 2 * np.sqrt(m * k)
# mck = (m,c,k)
# solver = uqra.Solver(model_name, x)
# y = solver.run(**kwargs)
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# np.save(os.path.join(data_dir,'Kvitebjørn_EC_P{:d}_{:d}'.format(P, nsim)), EC_y)
# ## run solver for EC cases
# P, nsim = 10, 25
# data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/environment'
# data_set = np.load(os.path.join(data_dir, 'Kvitebjørn_EC_P{:d}.npy'.format(P)))
# EC_x = data_set[2:,:]
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, EC_x)
# EC_y = np.array([solver.run(doe_method = 'EC') for _ in range(nsim)])
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# np.save(os.path.join(data_dir,'Kvitebjørn_EC_P{:d}_{:d}'.format(P, nsim)), EC_y)
# ## run solver for Hs Tp grid points
# nsim = 30
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# filename = 'HsTp_grid118.npy'
# data_set = np.load(os.path.join(data_dir, filename))
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, data_set)
# grid_out = np.array([solver.run(doe_method = 'GRID') for _ in range(nsim)])
# np.save(os.path.join(data_dir,'HsTp_grid118_out'), grid_out)
# data_set = np.load('DoE_McRE3R0.npy')
# x_samples = data_set[2:,:]
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, x_samples)
# kwargs = {'doe_method': 'MCS'}
# samples_y = solver.run(**kwargs )
# np.save('test_linear_oscillator_y', samples_y)
# # filename_tags = ['R0']
# # filename_tags = [itag+'_y' for itag in filename_tags]
# # uqra_dataio.save_data(samples_y, 'test_linear_oscillator', os.getcwd(), filename_tags)
# samples_y_stats = solver.get_stats()
# np.save('test_linear_oscillator_y_stats', samples_y_stats)
# # filename_tags = [itag+'_y_stats' for itag in filename_tags]
# # uqra_dataio.save_data(samples_y_stats, 'test_linear_oscillator', os.getcwd(), filename_tags)
def test_surrogate_model(self):
print('========================TESTING: SurrogateModel.fit(), ~Normal =======================')
solver1 = lambda x: x
solver2 = lambda x: x**2 + 1
solver3 = lambda x: x**3 + x**2 + x + 3
solver4 = lambda x: cp.Gamma(1,1).inv(cp.Normal(0,1).cdf(x))
solver5 = lambda x: cp.Gamma(1,1).inv(cp.Gamma(1,1).cdf(x))
upper_tail_probs= [0.999,0.9999,0.99999]
moment2cal = [1,2,3,4]
metrics2cal = [ 'explained_variance_score', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'r2_score', 'r2_score_adj', 'moment', 'mquantiles']
sample_weight = None
multioutput = 'uniform_average'
# squared = True
solvers2test= [solver1,solver2,solver3, solver4, solver5]
solver_strs = ['x', '1 + x**2', '3 + x + x**2 + x**3', 'Gamma(1,1), Hermite', 'Gamma(1,1), Optimal']
poly_orders = range(2,5)
dist_zeta = cp.Normal()
dist_x = cp.Normal()
fit_method = 'GLK'
for isolver , isolver_str in zip(solvers2test, solver_strs):
for ipoly_order in poly_orders:
# uqra.blockPrint()
doe = uqra.QuadratureDesign(ipoly_order+1, ndim = 1, dist_names=['normal'])
doe.samples()
doe.x = doe.u
train_y = np.squeeze(isolver(doe.x))
train_y = np.array([train_y,train_y]).T
pce_model = uqra.PCE(ipoly_order, dist_zeta)
print(len(pce_model.basis[0]))
pce_model.fit(doe.u, train_y, w=doe.w, fit_method=fit_method)
pce_model.predict(doe.u, train_y, metrics=metrics2cal, prob=upper_tail_probs, moment=moment2cal, sample_weight=sample_weight, multioutput=multioutput)
# pce_model.fit(x_train, y_train, weight=x_weight)
# uqra.enablePrint()
# pce_model_scores = pce_model.score(x_train, y_train, metrics=metrics, moment=np.arange(1,5))
# print('Target: {}'.format(isolver_str))
# for i, ipoly_coeffs in enumerate(pce_model.poly_coeffs):
# print('{:<6s}: {}'.format('uqra'*(i==0), np.around(ipoly_coeffs,4)))
def test_LassoLars(self):
from sklearn import linear_model
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC, LassoLars
from sklearn import datasets
solver3 = lambda x: x**4 + x**2 + 3
np.random.seed(100)
dist_u = cp.Normal()
u_samples = dist_u.sample(1000)
y_samples = solver3(u_samples)
print('y mean: {}'.format(np.mean(y_samples)))
pce_model = uqra.PCE(10,dist_u)
pce_model.fit(u_samples, y_samples, method='LassoLars')
# print(pce_model.active_)
# print(pce_model.metamodels)
y_pred = pce_model.predict(u_samples.reshape(1,-1))
print(y_pred[:4])
pce_model.fit(u_samples, y_samples, method='OlsLars')
# print(pce_model.active_)
# print(pce_model.metamodels)
y_pred = pce_model.predict(u_samples.reshape(1,-1))
print(y_pred[:4])
def test_Kvitebjorn(self):
print('========================TESTING: Kvitebjorn =======================')
data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/environment'
# hs1 = np.linspace(0,2.9,291)
# hs2 = np.linspace(2.90,20, 1711)
# hs = np.hstack((hs1, hs2))
# hs_pdf = Kvitebjorn.hs_pdf(hs)
# np.save(os.path.join(data_dir, 'Kvitebjorn_hs'), np.vstack((hs, hs_pdf)))
# n = 1e6
# samples_x = Kvitebjorn.samples(n)
# np.save(os.path.join(data_dir, 'Kvitebjorn_samples_n'), samples_x)
# return EC from Kvitebjorn
P = 10
EC_samples = Kvitebjorn.EC(P)
np.save(os.path.join(data_dir, 'Kvitebjorn_EC_P{:d}'.format(P)), EC_samples)
# ## test cdf method for Kvitebjørn
# u = np.array([np.linspace(0,0.99999,11), np.linspace(0,0.99999,11)])
# x = Kvitebjorn.samples(u)
# u_= Kvitebjorn.cdf(x)
# print(np.around(u,2))
# print(np.around(x,2))
# print(np.around(u_,2))
# print('========================TESTING: SurrogateModel.fit(), Generalized ====================')
# gpce_dist_to_test = [cp.Normal(), cp.Normal(2,3), cp.Gamma(1,1), cp.Beta(1,1)]
# gpce_opt_dist = [cp.Normal(), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
# gpce_opt_rule = ['hem', 'hem', 'lag', 'jacobi']
# npoly_orders = range(2,5)
# dist_zeta0 = cp.Normal()
# for i, igpce_dist in enumerate(gpce_dist_to_test):
# dist_zeta1 = gpce_opt_dist[i]
# print('>> Testing # {:d}: gpce: {}, zeta0: {} , zeta1: {}'.format(i, igpce_dist, dist_zeta0, dist_zeta1 ))
# for ipoly_order in npoly_orders:
# print(' Polynomial order: {:d}'.format(ipoly_order))
# ## gPCE with hermite chaos
# uqra.blockPrint()
# quad_doe = uqra.DoE('QUAD', 'hem', [ipoly_order+1], dist_zeta0)
# samples_zeta= quad_doe.get_samples()
# zeta_cor, zeta_weight = samples_zeta[0]
# x_cor = igpce_dist.inv(dist_zeta0.cdf(zeta_cor))
# zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta0, retall=True)
# x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
# uqra.enablePrint()
# print('\t Hermite: {}'.format( np.around(coeffs,4)))
# ## gPCE with optimal chaos
# uqra.blockPrint()
# quad_doe = uqra.DoE('QUAD', gpce_opt_rule[i], [ipoly_order+1], dist_zeta1)
# samples_zeta= quad_doe.get_samples()
# zeta_cor, zeta_weight = samples_zeta[0]
# x_cor = igpce_dist.inv(dist_zeta1.cdf(zeta_cor))
# zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta1, retall=True)
# x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
# uqra.enablePrint()
# print('\t Optimal: {}'.format( np.around(coeffs,4)))
def test_surrogate_model_scores(self):
print('========================TESTING: SurrogateModel.scores() =======================')
def test_absolute_truth_and_meaning(self):
assert True
def test_acfPsd(self):
## refer to file test_acfPsd.py
pass
def test_gen_gauss_time_series(self):
## refer to file test_gen_gauss_time_series
pass
def test_sdof_var(self):
## refer to file: test_sdof_var
pass
def test_poly5(self):
## refer to file: test_poly5
pass
def test_solver(self):
## refer to file: test_solver
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
34191 | <reponame>shuklaayush/badger-system
from brownie import interface
from rich.console import Console
from helpers.utils import snapBalancesMatchForToken
from .StrategyBaseSushiResolver import StrategyBaseSushiResolver
console = Console()
class StrategySushiDiggWbtcLpOptimizerResolver(StrategyBaseSushiResolver):
def confirm_rebase(self, before, after, value):
"""
Lp token balance should stay the same.
Sushi balances stay the same.
xSushi balances stay the same.
"""
super().confirm_rebase(before, after, value)
assert snapBalancesMatchForToken(before, after, "want")
assert snapBalancesMatchForToken(before, after, "sushi")
assert snapBalancesMatchForToken(before, after, "xsushi")
def add_balances_snap(self, calls, entities):
calls = super().add_balances_snap(calls, entities)
strategy = self.manager.strategy
digg = interface.IERC20(strategy.digg())
calls = self.add_entity_balances_for_tokens(calls, "digg", digg, entities)
calls = self.add_entity_shares_for_tokens(calls, "digg", digg, entities)
return calls
| StarcoderdataPython |
1666685 | # Copyright 2014
| StarcoderdataPython |
3238585 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import random
import string
from nose import tools
from ..core import compat
from ..core import driver
from ..core import exceptions
logger = logging.getLogger(__name__)
class Driver(object):
def __init__(self, scheme=None, path=None, config=None):
self.scheme = scheme
self.path = path
self.config = config
# Load the requested driver
def setUp(self):
storage = driver.fetch(self.scheme)
self._storage = storage(self.path, self.config)
def tearDown(self):
pass
def gen_random_string(self, length=16):
return ''.join([random.choice(string.ascii_uppercase + string.digits)
for x in range(length)]).lower()
def simplehelp(self, path, content, expected, size=0):
self._storage.put_content(path, content)
assert self._storage.get_content(path) == expected
assert self._storage.get_content(path) == expected
if size:
assert self._storage.get_size(path) == size
def unicodehelp(self, path, content, expected):
self._storage.put_unicode(path, content)
assert self._storage.get_unicode(path) == expected
assert self._storage.get_unicode(path) == expected
def jsonhelp(self, path, content, expected):
self._storage.put_json(path, content)
assert self._storage.get_json(path) == expected
assert self._storage.get_json(path) == expected
def test_exists_non_existent(self):
filename = self.gen_random_string()
assert not self._storage.exists(filename)
def test_exists_existent(self):
filename = self.gen_random_string()
self._storage.put_content(filename, b'')
assert self._storage.exists(filename)
# get / put
def test_write_read_1(self):
filename = self.gen_random_string()
content = b'a'
expected = b'a'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_2(self):
filename = self.gen_random_string()
content = b'\xc3\x9f'
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_3(self):
filename = self.gen_random_string()
content = u'ß'.encode('utf8')
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_4(self):
filename = self.gen_random_string()
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
content = content.encode('utf8')
expected = b'\xc3\x9f'
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_5(self):
filename = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
expected = content
self.simplehelp(filename, content, expected, len(expected))
def test_write_read_6(self):
filename = self.gen_random_string()
content = self.gen_random_string(1024 * 1024).encode('utf8')
expected = content
self.simplehelp(filename, content, expected, len(expected))
# get / put unicode
def test_unicode_1(self):
filename = self.gen_random_string()
content = 'a'
expected = u'a'
self.unicodehelp(filename, content, expected)
def test_unicode_2(self):
filename = self.gen_random_string()
content = b'\xc3\x9f'.decode('utf8')
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_3(self):
filename = self.gen_random_string()
content = u'ß'
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_4(self):
filename = self.gen_random_string()
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
expected = u'ß'
self.unicodehelp(filename, content, expected)
def test_unicode_5(self):
filename = self.gen_random_string()
content = self.gen_random_string()
expected = content
self.unicodehelp(filename, content, expected)
def test_unicode_6(self):
filename = self.gen_random_string()
content = self.gen_random_string(1024 * 1024)
expected = content
self.unicodehelp(filename, content, expected)
# JSON
def test_json(self):
filename = self.gen_random_string()
content = {u"ß": u"ß"}
expected = {u"ß": u"ß"}
self.jsonhelp(filename, content, expected)
# Removes
def test_remove_existent(self):
filename = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content(filename, content)
self._storage.remove(filename)
assert not self._storage.exists(filename)
def test_remove_folder(self):
dirname = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content('%s/%s' % (dirname, filename1), content)
self._storage.put_content('%s/%s' % (dirname, filename2), content)
self._storage.remove(dirname)
assert not self._storage.exists(filename1)
assert not self._storage.exists(filename2)
assert not self._storage.exists(dirname)
# Check the lru is ok
try:
self._storage.get_content(filename1)
assert False
except Exception:
pass
try:
self._storage.get_content(filename2)
assert False
except Exception:
pass
@tools.raises(exceptions.FileNotFoundError)
def test_remove_inexistent(self):
filename = self.gen_random_string()
self._storage.remove(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_read_inexistent(self):
filename = self.gen_random_string()
self._storage.get_content(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_get_size_inexistent(self):
filename = self.gen_random_string()
self._storage.get_size(filename)
def test_stream(self):
filename = self.gen_random_string()
# test 7MB
content = self.gen_random_string(7).encode('utf8') # * 1024 * 1024
# test exists
io = compat.StringIO(content)
logger.debug("%s should NOT exists still" % filename)
assert not self._storage.exists(filename)
self._storage.stream_write(filename, io)
io.close()
logger.debug("%s should exist now" % filename)
assert self._storage.exists(filename)
# test read / write
data = compat.bytes()
for buf in self._storage.stream_read(filename):
data += buf
assert content == data
# test bytes_range only if the storage backend suppports it
if self._storage.supports_bytes_range:
b = random.randint(0, math.floor(len(content) / 2))
bytes_range = (b, random.randint(b + 1, len(content) - 1))
data = compat.bytes()
for buf in self._storage.stream_read(filename, bytes_range):
data += buf
expected_content = content[bytes_range[0]:bytes_range[1] + 1]
assert data == expected_content
# logger.debug("Content length is %s" % len(content))
# logger.debug("And retrieved content length should equal it: %s" %
# len(data))
# logger.debug("got content %s" % content)
# logger.debug("got data %s" % data)
# test remove
self._storage.remove(filename)
assert not self._storage.exists(filename)
@tools.raises(exceptions.FileNotFoundError)
def test_stream_read_inexistent(self):
filename = self.gen_random_string()
data = compat.bytes()
for buf in self._storage.stream_read(filename):
data += buf
@tools.raises(exceptions.FileNotFoundError)
def test_inexistent_list_directory(self):
notexist = self.gen_random_string()
iterator = self._storage.list_directory(notexist)
next(iterator)
# XXX only elliptics return StopIteration for now - though we should
# return probably that for all
@tools.raises(exceptions.FileNotFoundError, StopIteration)
def test_empty_list_directory(self):
path = self.gen_random_string()
content = self.gen_random_string().encode('utf8')
self._storage.put_content(path, content)
iterator = self._storage.list_directory(path)
next(iterator)
def test_list_directory(self):
base = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fb1 = '%s/%s' % (base, filename1)
fb2 = '%s/%s' % (base, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
assert sorted([fb1, fb2]
) == sorted(list(self._storage.list_directory(base)))
# def test_root_list_directory(self):
# fb1 = self.gen_random_string()
# fb2 = self.gen_random_string()
# content = self.gen_random_string()
# self._storage.put_content(fb1, content)
# self._storage.put_content(fb2, content)
# print(list(self._storage.list_directory()))
# assert sorted([fb1, fb2]
# ) == sorted(list(self._storage.list_directory()))
@tools.raises(exceptions.FileNotFoundError, StopIteration)
def test_empty_after_remove_list_directory(self):
base = self.gen_random_string()
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
fb1 = '%s/%s' % (base, filename1)
fb2 = '%s/%s' % (base, filename2)
content = self.gen_random_string().encode('utf8')
self._storage.put_content(fb1, content)
self._storage.put_content(fb2, content)
self._storage.remove(fb1)
self._storage.remove(fb2)
iterator = self._storage.list_directory(base)
next(iterator)
def test_paths(self):
namespace = 'namespace'
repository = 'repository'
tag = 'sometag'
image_id = 'imageid'
p = self._storage.images_list_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.image_json_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_mark_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_checksum_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_layer_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_ancestry_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_files_path(image_id)
assert not self._storage.exists(p)
p = self._storage.image_diff_path(image_id)
assert not self._storage.exists(p)
p = self._storage.repository_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.tag_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.tag_path(namespace, repository, tag)
assert not self._storage.exists(p)
p = self._storage.repository_json_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.repository_tag_json_path(namespace, repository, tag)
assert not self._storage.exists(p)
p = self._storage.index_images_path(namespace, repository)
assert not self._storage.exists(p)
p = self._storage.private_flag_path(namespace, repository)
assert not self._storage.exists(p)
| StarcoderdataPython |
1662439 | <reponame>eragasa/pypospack
import numpy as np
from pypospack.crystal import SimulationCell
class Diamond(SimulationCell):
def __init__(self,symbols=['Si'],a0=5.431,cell_type='cubic'):
SimulationCell.__init__(self)
cell_initializers = {}
cell_initializers['cubic'] = self.initialize_cubic_cell
cell_initializers['primitive'] = self.initialize_primitive_cell
cell_initializers[cell_type](symbol=symbols[0],a0=a0)
def initialize_cubic_cell(self,symbol='Si',a0=5.431):
self.a0 = a0
a1 = np.array([1,0,0])
a2 = np.array([0,1,0])
a3 = np.array([0,0,1])
self.H = np.stack((a1,a2,a3))
self.add_atom(symbol=symbol,position=[0.000000, 0.000000, 0.000000])
self.add_atom(symbol=symbol,position=[0.250000, 0.750000, 0.750000])
self.add_atom(symbol=symbol,position=[0.500000, 0.000000, 0.500000])
self.add_atom(symbol=symbol,position=[0.000000, 0.500000, 0.500000])
self.add_atom(symbol=symbol,position=[0.500000, 0.500000, 0.000000])
self.add_atom(symbol=symbol,position=[0.750000, 0.250000, 0.750000])
self.add_atom(symbol=symbol,position=[0.750000, 0.750000, 0.250000])
def initialize_primitive_cell(self,symbol='Si',a0=5.431):
self.a0 = a0
a1 = 1/2 * np.array([0,1,1])
a2 = 1/2 * np.array([1,0,1])
a3 = 1/2 * np.array([1,1,0])
self.H = np.stack((a1,a2,a3))
self.add_atom(symbol=symbol,position=[0.7500,0.75000,0.75000])
self.add_atom(symbol=symbol,position=[0.5000,0.50000,0.50000])
if __name__ == "__main__":
from pypospack.io.vasp import Poscar
o = Poscar(obj_cell=Diamond(symbols=['Si'],a0=5.431,cell_type='cubic'))
o.write(filename="Si_dia_unit.vasp")
o = Poscar(obj_cell=Diamond(symbols=['Si'],a0=5.431,cell_type='primitive'))
o.write(filename='Si_dia_prim.vasp')
o = Diamond(symbols=['Si'],a0=5.431,cell_type='primitive')
print(o.b1)
print(o.b2)
print(o.b3)
| StarcoderdataPython |
1671493 | <reponame>liaomars/douban_login
import requests
from PIL import Image
from pyquery import PyQuery as pq
HEADER = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36',
'Referer': 'https://accounts.douban.com/login',
'Host': 'accounts.douban.com',
'Origin': 'https://accounts.douban.com',
}
class DoubanLogin(object):
def __init__(self, email, password):
# 登录地址
self.login_url = 'https://www.douban.com/accounts/login'
# session对象
self._session = requests.session()
# form data
self.form_data = {
'redir': 'https://www.douban.com',
'form_email': email,
'form_password': password,
'login': '登录'
}
def login(self):
# 访问登录url
res = self._session.get(self.login_url, headers=HEADER)
# 判断是否需要验证码
jq = pq(res.text)
captcha_url = jq('#captcha_image').attr('src')
# 如果有验证码,form_data要增增加 captcha-solution,captcha-id
if captcha_url:
captcha_id = self.get_captcha(jq)
if captcha_id:
# 显示验证码图片
self.get_captcha_img(captcha_id)
img = Image.open('.captcha.png')
img.show()
captcha = input('请输入图片验证码')
self.form_data.update({'captcha-solution': captcha, 'captcha-id': captcha_id})
else:
print('没有获取到captcha_id参数')
# 发起登录请求
html = self._session.post('https://accounts.douban.com/login', data=self.form_data, headers=HEADER)
print(html.text)
def get_captcha(self, jq):
"""
获取captcha_id 提交和获取验证码图片要用到
:param jq:
:return:
"""
captcha_id = jq('input[name="captcha-id"]').val()
return captcha_id
def get_captcha_img(self, captcha_id):
"""
获取验证码图片
:param captcha_id:
:return:
"""
url = 'https://www.douban.com/misc/captcha?id={}&size=s'
res = self._session.get(url.format(captcha_id), headers=HEADER)
with open('.captcha.png', 'wb') as f:
f.write(res.content)
if __name__ == '__main__':
email = input('请输入注册时的邮箱/用户名/手机:')
password = input('请输入密码:')
douban = DoubanLogin(email, password)
douban.login()
| StarcoderdataPython |
2489 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from core.models import Post, Category, Tag
from backend.forms import PostForm, CategoryForm, TagForm
# Create your views here.
@login_required()
def index(request):
context = {}
context['nav_active'] = 'index'
return render(request, 'backend/index.html', context)
@login_required()
def posts(request):
context = {}
context['nav_active'] = 'posts'
post_list = Post.objects.all()
paginator = Paginator(list(reversed(post_list)), 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context['posts'] = posts
return render(request, 'backend/posts.html', context)
@login_required()
def add_post(request):
context = {}
context['nav_active'] = 'posts'
form = PostForm()
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Post created.')
return HttpResponseRedirect(reverse('user_panel_posts'))
context['form'] = form
return render(request, 'backend/edit_post.html', context)
@login_required()
def edit_post(request, post_id):
context = {}
context['nav_active'] = 'posts'
post = Post.objects.get(pk=post_id)
context['post'] = post
form = PostForm(instance=post)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
messages.success(request, 'Post updated.')
return HttpResponseRedirect(reverse('user_panel_posts'))
context['form'] = form
return render(request, 'backend/edit_post.html', context)
@login_required()
def delete_post(request, post_id):
context = {}
context['nav_active'] = 'posts'
post = Post.objects.get(pk=post_id)
post.delete()
messages.success(request, 'Post deleted.')
return HttpResponseRedirect(reverse('user_panel_posts'))
@login_required()
def categories(request):
context = {}
context['nav_active'] = 'categories'
categories_list = Category.objects.all()
paginator = Paginator(list(reversed(categories_list)), 10)
page = request.GET.get('page')
try:
categories = paginator.page(page)
except PageNotAnInteger:
categories = paginator.page(1)
except EmptyPage:
categories = paginator.page(paginator.num_pages)
context['categories'] = categories
return render(request, 'backend/categories.html', context)
@login_required()
def add_category(request):
context = {}
context['nav_active'] = 'categories'
form = CategoryForm()
if request.method == 'POST':
form = CategoryForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Category created.')
return HttpResponseRedirect(reverse('user_panel_categories'))
context['form'] = form
return render(request, 'backend/edit_category.html', context)
@login_required()
def edit_category(request, category_id):
context = {}
context['nav_active'] = 'categories'
category = Category.objects.get(pk=category_id)
context['category'] = category
form = CategoryForm(instance=category)
if request.method == 'POST':
form = CategoryForm(request.POST, request.FILES, instance=category)
if form.is_valid():
form.save()
messages.success(request, 'Category updated.')
return HttpResponseRedirect(reverse('user_panel_categories'))
context['form'] = form
return render(request, 'backend/edit_category.html', context)
@login_required()
def delete_category(request, category_id):
context = {}
context['nav_active'] = 'categories'
category = Category.objects.get(pk=category_id)
category.delete()
messages.success(request, 'Category deleted.')
return HttpResponseRedirect(reverse('user_panel_categories'))
@login_required()
def tags(request):
context = {}
context['nav_active'] = 'tags'
tags_list = Tag.objects.all()
paginator = Paginator(list(reversed(tags_list)), 10)
page = request.GET.get('page')
try:
tags = paginator.page(page)
except PageNotAnInteger:
tags = paginator.page(1)
except EmptyPage:
tags = paginator.page(paginator.num_pages)
context['tags'] = tags
return render(request, 'backend/tags.html', context)
@login_required()
def add_tag(request):
context = {}
context['nav_active'] = 'tags'
form = TagForm()
if request.method == 'POST':
form = TagForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Tag created.')
return HttpResponseRedirect(reverse('user_panel_tags'))
context['form'] = form
return render(request, 'backend/edit_tag.html', context)
@login_required()
def edit_tag(request, tag_id):
context = {}
context['nav_active'] = 'tags'
tag = Tag.objects.get(pk=tag_id)
context['tag'] = tag
form = TagForm(instance=tag)
if request.method == 'POST':
form = TagForm(request.POST, request.FILES, instance=tag)
if form.is_valid():
form.save()
messages.success(request, 'Tag updated.')
return HttpResponseRedirect(reverse('user_panel_tags'))
context['form'] = form
return render(request, 'backend/edit_tag.html', context)
@login_required()
def delete_tag(request, tag_id):
context = {}
context['nav_active'] = 'tags'
tag = Tag.objects.get(pk=tag_id)
tag.delete()
messages.success(request, 'Tag deleted.')
return HttpResponseRedirect(reverse('user_panel_tags')) | StarcoderdataPython |
3275811 | import os
import numpy as np
import tensorflow as tf
from keras.preprocessing.image import Iterator, img_to_array, array_to_img
from keras import backend as K
import logging
from utils.reporting.logging import log_message
from utils.data_and_files.file_utils import get_file_path
import lmdb
import pickle
from utils.data_and_files.data_utils import convert_img
class LMDB_ImageIterator(Iterator):
def __init__(self,
num_images,
category,
lmdb_dir,
batch_size,
episode_len=20,
episode_shift=10,
shuffle=True,
seed=None,
save_to_dir=None,
class_mode='categorical',
save_prefix='',
save_format='jpeg'
):
self.category = category
self.batch_size = batch_size
self.lmdb_dir = lmdb_dir
self.episode_len = episode_len
self.episode_shift = episode_shift
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
print("Initializing Iterator " + category + " Number of images " + str(num_images))
#print(category, lmdb_dir, batch_size, shuffle, seed)
self.env = lmdb.open(lmdb_dir, readonly=True)
Iterator.__init__(self, num_images, batch_size, shuffle, seed)
def __del__(self):
self.env.close()
def _get_batches_of_transformed_samples(self, index_array):
#print(index_array)
images, labels = [], {}
if len(index_array) < self.batch_size:
diff = self.batch_size // len(index_array) + 1
index_array = np.repeat(index_array, diff, axis=0)[:self.batch_size]
with self.env.begin() as txn:
if self.class_mode == 'episode':
batch_x = np.zeros((len(index_array), self.episode_len) + (53, 70, 3), dtype=np.float32) #self.image_shape
batch_gt = np.zeros((len(index_array), self.episode_len) + (53, 70, 3), dtype=np.float32)
# build batch of image data
for i, j in enumerate(index_array):
data = txn.get(f"{j:08}".encode("ascii"))
start_frame = pickle.loads(data)
last_frame_idx =j + self.episode_len + self.episode_shift
try:
last_frame_data = txn.get(f"{last_frame_idx:08}".encode("ascii"))
except:
j = j - (self.episode_len + self.episode_shift)
last_frame = pickle.loads(last_frame_data)
if last_frame.dir != start_frame.dir:
j = j - (self.episode_len + self.episode_shift)
imgs = []
for ix in range(j, j + self.episode_len):
img_data = txn.get(f"{ix:08}".encode("ascii"))
frame = pickle.loads(img_data)
imgs += [frame.get_image()]
imgs = np.array(imgs)
batch_x[i] = imgs
imgs = []
for ix in range(j + self.episode_shift, j + self.episode_len + self.episode_shift):
img_data = txn.get(f"{ix:08}".encode("ascii"))
frame = pickle.loads(img_data)
imgs += [frame.get_image()]
imgs = np.array(imgs)
batch_gt[i] = imgs
return {"xt0": batch_x, "xt1": batch_gt}
elif self.class_mode == 'episode_flat':
if self.class_mode == 'episode':
batch_x = np.zeros((len(index_array), self.episode_len) + self.image_shape, dtype=self.dtype)
batch_gt = np.zeros((len(index_array), self.episode_len) + self.image_shape, dtype=self.dtype)
# build batch of image data
for i, j in enumerate(index_array):
data = txn.get(f"{j:08}".encode("ascii"))
start_frame = pickle.loads(data)
last_frame_idx = j + self.episode_len + self.episode_shift
try:
last_frame_data = txn.get(f"{last_frame_idx:08}".encode("ascii"))
except:
j = j - (self.episode_len + self.episode_shift)
last_frame = pickle.loads(last_frame_data)
if last_frame.dir != start_frame.dir:
j = j - (self.episode_len + self.episode_shift)
imgs = []
for ix in range(j, j + self.episode_len):
img_data = txn.get(f"{ix:08}".encode("ascii"))
frame = pickle.loads(img_data)
imgs += [frame.get_image()]
imgs = np.array(imgs)
batch_x[i] = imgs
imgs = []
for ix in range(j + self.episode_shift, j + self.episode_len + self.episode_shift):
img_data = txn.get(f"{ix:08}".encode("ascii"))
frame = pickle.loads(img_data)
imgs += [frame.get_image()]
imgs = np.array(imgs)
batch_gt[i] = imgs
return {"xt0": np.reshape(batch_x, (-1,) + self.image_shape), "xt1": np.reshape(batch_gt, (-1,) + self.image_shape)}
elif self.class_mode == 'categorical':
for image_id in index_array:
data = txn.get(f"{image_id:08}".encode("ascii"))
dataset = pickle.loads(data)
images.append(dataset.get_image())
labels_list = [attr for attr in dir(dataset) if
not callable(getattr(dataset, attr)) and (not attr.startswith("__")) and
(not attr in ['image', 'channels', 'size'])]
for label in labels_list:
if label in labels.keys():
labels[label].append(eval(f'dataset.{label}'))
else:
labels.update({label: [eval(f'dataset.{label}')]})
return {'images': images, **labels} | StarcoderdataPython |
3295996 | <filename>tests/conftest.py
import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import edi_835_parser
current_path = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def blue_cross_nc_sample():
path = current_path + '/test_edi_835_files/blue_cross_nc_sample.txt'
return edi_835_parser.parse(path)
@pytest.fixture
def emedny_sample():
path = current_path + '/test_edi_835_files/emedny_sample.txt'
return edi_835_parser.parse(path)
@pytest.fixture
def sample_835():
path = current_path + '/test_edi_835_files/sample_835.txt'
return edi_835_parser.parse(path)
# @pytest.fixture
# def sample2_835():
# path = current_path + '/test_edi_835_files/sample2_835.txt'
# return edi_835_parser.parse(path)
#
#
# @pytest.fixture
# def sample3_835():
# path = current_path + '/test_edi_835_files/sample3_835.txt'
# return edi_835_parser.parse(path)
| StarcoderdataPython |
3366551 | <filename>multidim_image_augmentation/python/kernel_tests/cubic_interpolation3d_op_test.py
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class CubicInterpolationTest(tf.test.TestCase):
def test_3DInterpolation(self):
with self.session():
grid = np.ndarray([5, 5, 5, 2], dtype=np.float32)
c = 0
for x0 in range(grid.shape[0]):
for x1 in range(grid.shape[1]):
for x2 in range(grid.shape[2]):
for channel in range(grid.shape[3]):
grid[x0, x1, x2, channel] = c
c += 1
dense = augmentation_ops.cubic_interpolation3d(
input=grid,
factors=[10, 10, 10],
output_spatial_shape=[21, 21, 21]).eval()
precision = 4
self.assertAlmostEqual(grid[1, 1, 1, 0], dense[0, 0, 0, 0], precision)
self.assertAlmostEqual(grid[1, 1, 3, 0], dense[0, 0, 20, 0], precision)
self.assertAlmostEqual(grid[1, 3, 1, 0], dense[0, 20, 0, 0], precision)
self.assertAlmostEqual(grid[3, 1, 1, 0], dense[20, 0, 0, 0], precision)
self.assertAlmostEqual(grid[2, 2, 2, 0], dense[10, 10, 10, 0], precision)
self.assertAlmostEqual(grid[3, 3, 3, 0], dense[20, 20, 20, 0], precision)
self.assertAlmostEqual(grid[1, 1, 1, 1], dense[0, 0, 0, 1], precision)
self.assertAlmostEqual(grid[1, 1, 3, 1], dense[0, 0, 20, 1], precision)
self.assertAlmostEqual(grid[1, 3, 1, 1], dense[0, 20, 0, 1], precision)
self.assertAlmostEqual(grid[3, 1, 1, 1], dense[20, 0, 0, 1], precision)
self.assertAlmostEqual(grid[2, 2, 2, 1], dense[10, 10, 10, 1], precision)
self.assertAlmostEqual(grid[3, 3, 3, 1], dense[20, 20, 20, 1], precision)
def test_3DInterpolationSingleSlice(self):
with self.session():
grid = np.ndarray([3, 5, 5, 2], dtype=np.float32)
c = 0
for x0 in range(grid.shape[0]):
for x1 in range(grid.shape[1]):
for x2 in range(grid.shape[2]):
for channel in range(grid.shape[3]):
grid[x0, x1, x2, channel] = c
c += 1
dense = augmentation_ops.cubic_interpolation3d(
input=grid,
factors=[1, 10, 10],
output_spatial_shape=[1, 21, 21],
).eval()
precision = 4
self.assertAlmostEqual(grid[1, 1, 1, 0], dense[0, 0, 0, 0], precision)
self.assertAlmostEqual(grid[1, 1, 3, 0], dense[0, 0, 20, 0], precision)
self.assertAlmostEqual(grid[1, 3, 1, 0], dense[0, 20, 0, 0], precision)
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
3231776 | <gh_stars>0
with open("advent2.txt", "r") as file:
input_ = file.read().split('\n')
modified_input = [[int((a := x.split('-'))[0]), int((b := a[1].split(' ', 1))[0]), b[1].split(': ')[0], b[1].split(': ')[1]] for x in input_]
correct_passwords = [x for x in modified_input if x[0] <= x[3].count(x[2]) <= x[1]]
print(len(correct_passwords))
correct_passwords2 = [x for x in modified_input if x[3][x[0]-1].count(x[2]) + x[3][x[1]-1].count(x[2]) == 1]
print(len(correct_passwords2))
| StarcoderdataPython |
1741908 | # As we've called out earlier writing loops allows us to get our computer
# to do repetitive work for us. So this is one of the main benefit of writing scripts
# in IT is to save time by automating repetitive tasks, loops are super useful. So
# let's make sure you avoid some of the most common mistakes people make
# when writing loops. One of the most common errors is forgetting to initialize
# variables with the right value. We've all made this mistake when starting to
# code. Remember how in the earlier examples we initialized the variable
# two different things can happen the first possible outcome and the easiest to
# catch is that Python might raise an error telling us that we're using a variable
# we haven't defined, which looks like this. As we've done with other errors we've
# come across, we can look at the last line to understand what's going on. This
# error type is a name error and the message that comes after it says we're using
# an undefined variable. It's straightforward to fix, we just need to initialize the
# variable before using it like this,
while my_variable < 10:
print("Hello")
my_variable += 1
my_variable = 5
while my_variable < 10:
print("Hello")
my_variable += 1
# Fixed. Now, there's second issue we might face if we forget to initialize
# variables with the right value. We might have already used the variable in our
# program. In this case, if we reuse the variable without setting the correct value
# from the start, it will still have the value from before. This can lead to some
# pretty unexpected behavior.
x = 1
sum = 0
while x < 10:
sum += x
x += 1
product = 1
while x < 10:
product = product * x
x += 1
print(sum, product)
# Check out the script, can you spot the problem? In
# the first block, we correctly initialize x to 1 and sum to 0 and then iterate until x
# equals 10 summing up all the values in between. So by the end of that block,
# sum equals the result of adding all the numbers from 1 to 10 and x is 10. In the
# second part of the code, the original intention was to get that we're initializing
# product but forgetting to initialize x. So x is still 10, this means that when the
# while condition gets checked, x is already 10 at the start of the iteration. The
# while condition is false before it even starts and the body never executes. Let's
# see how this problem would look.
# In this case, it might be harder to catch the problem because python doesn't
# raise an error. The problem here is that our product variable has the wrong
# value. If you have a loop that's gone rogue and not behaving as expected, it's a
# good idea to check if all the variables are correctly initialized. In this example,
# we need to set x back to 1 before starting the second loop. As always ,the best
# way to learn is to practice it yourself.
# Makes sense? Remember, if you ever feel stuck or a little unsure about
# something you can always ask for hep in the discussion forums. These forums
# are there to let you get the help you need when you need it, so don't forget to
# use them. So, to recap, whenever you're writing a loop check that you're
# initializing all the variables you want to use before you use them. And don't
# worry if you don't get it right the first time, we've all been there when learning
# how to code. As we've called out before, the way to master programming is to
# practice, practice, practice. Keep practicing until you're comfortable and even
# then it's still okay to make mistakes. So don't feel like you can't or loop back
# around to review and practice everything we've covered so far.
# In this code, there's n initialization problem that's causing our function to be
# behave incorrectly . Can you find the problem and fix it?
def count_down(start_number):
current = start_number
while current > 0:
print(current)
current -= 1
print("Zero!")
count_down(3)
| StarcoderdataPython |
4550 | <reponame>shane-breeze/AlphaTwirl
# <NAME> <<EMAIL>>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.concurrently import TaskPackageDropbox
##__________________________________________________________________||
@pytest.fixture()
def workingarea():
return mock.MagicMock()
@pytest.fixture()
def dispatcher():
return mock.MagicMock()
@pytest.fixture()
def obj(workingarea, dispatcher):
ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
ret.open()
yield ret
ret.close()
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
def test_open_terminate_close(workingarea, dispatcher):
obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
assert 0 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.open()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.terminate()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
obj.close()
assert 1 == workingarea.open.call_count
assert 1 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
def test_put(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run.side_effect = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert 0 == obj.put(package0)
assert 1 == obj.put(package1)
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list
def test_put_multiple(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run_multiple.return_value = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert [0, 1] == obj.put_multiple([package0, package1])
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list
##__________________________________________________________________||
| StarcoderdataPython |
1790697 | import sqlite3
DB_FILEPATH = ("northwind_small.sqlite3")
conn = sqlite3.connect(DB_FILEPATH)
curs = conn.cursor()
# Begin Part 2 queries
most_expensive_prod = """
SELECT *
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10;
"""
curs.execute(most_expensive_prod)
avg_emp_age = """
SELECT AVG(
CAST((julianday(HireDate) - julianday(BirthDate)) / 365 AS INTEGER))
FROM Employee
"""
curs.execute(avg_emp_age)
avg_emp_age_stretch = """
SELECT AVG(
CAST((julianday(HireDate) - julianday(BirthDate)) / 365 AS INTEGER))
FROM Employee
GROUP BY City
"""
curs.execute(avg_emp_age_stretch)
# Begin Part 3 queries
most_expensive_plus_suppliers = """
SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
FROM Product
JOIN Supplier on Supplier.Id = Product.SupplierId
ORDER BY Product.UnitPrice DESC
LIMIT 10
"""
curs.execute(most_expensive_plus_suppliers)
largest_category = """
SELECT MAX(ProductCount), CategoryName
FROM(SELECT COUNT(DISTINCT ProductName) as ProductCount,
Category.CategoryName as CategoryName
FROM Product
LEFT JOIN Category on Category.Id = Product.CategoryId
GROUP BY CategoryId)
"""
curs.execute(largest_category)
emp_w_most_territories = """
SELECT MAX(MostTerritories) as TerritoryCount, FirstName, LastName
FROM(SELECT COUNT(EmployeeID) as MostTerritories,
Employee.FirstName as FirstName, Employee.LastName as LastName
FROM EmployeeTerritory
LEFT JOIN Employee on Employee.Id = EmployeeTerritory.EmployeeId
)
"""
| StarcoderdataPython |
3391042 | # -*- coding: utf-8 -*-
from shortcuts import *
from utils import default, bv, testValue, equalExcept1BV, verboseIt, evalVec, strList2Str, getIt, testValueVec, ifthenelse, ifthenelseFct, extract1BV
######################################
## UTILS for GRN inference problem ##
######################################
#' Build regulator/interaction dictionaries
#' keys: indices of regulators r of input gene g in the GRN
#' values: indices of associated interactions r->g if optional
#' else a default value is set
#'
#' @param Iset interaction set
#' @param gene gene which regulators shsould be considered
#' @param C set of genes/nodes
#' @param regIntActivators optionally, the partially built
#' activator/interaction dictionary for gene
#' @param regIntRepressors optionally, the partially built
#' repressor/interaction dictionary for gene
#' @param opt logical if set to TRUE the index of the optional
#' interaction corresponding to the regulator identifier (key) is set as value in
#' the dictionary, else a default value is set
#' @return res a list containing the activator/interaction
#' and repressor/interaction dictionaries
def buildInteractionDict(Iset, gene, C, regIntActivators=None, regIntRepressors=None, opt=False):
## Initialization of dictionaries ##
if (regIntActivators==None and regIntRepressors==None):
regIntActivators = dict()
regIntRepressors = dict()
for i in range(len(Iset)):
[regulator, out, sign] = Iset[i]
## Look for interactions of type r -> gene ##
if (out == gene):
idx = C.index(regulator)
## value is the index of the interaction ##
## if optional ##
value = i if (opt) else default
if (sign=="+"):
regIntActivators.setdefault(idx, value)
else:
regIntRepressors.setdefault(idx, value)
return([regIntActivators, regIntRepressors])
######################################
## SMT-CONDITIONS ##
######################################
## Condition: is Rs[c] == i for a given node c ##
testRS = lambda rs, i : rs == BitVecVal(i, 5)
## Returns the string associated with a state variable ##
## for step n in experiment exp ##
getState = lambda n, exp : (("q_step%s_" % n) + "%s") % exp
## Returns bit-vector selecting value x[idx] ##
## for x of size ngenes ##
getCState = lambda x, idx, ngenes : x & bv(idx, ngenes)
#_______________________________#
# CONDITIONS ON uniqueness #
#_______________________________#
#' Build condition for interaction selection uniqueness
#' for model solutions
#'
#' @param s solver
#' @param intVar bit-vector variable associated with
#' selected optional interactions
#' @param model a solution model found be the solver
#' @return cond condition (BoolRef object) for interaction
#' uniqueness
def diff_interactions(s, intVar, model):
m = intVar[0] if (model[intVar[0]]==None) else model[intVar[0]]
return(intVar[0] != m)
#' Build condition of uniqueness for other cases than interaction
#' selection (i.e. difference in regulation conditions or in
#' experiment paths)
#'
#' @param s solver
#' @param var variable list on which uniqueness should be applied
#' (i.e. list of selected regulation conditions, list of instancied states)
#' @param model a model solution returned by the solver
#' @return cond uniqueness condition on variables in @var
def different(s, var, model):
cond = False
for v in var:
cond = Or(cond, v != model[v])
return(cond)
#_______________________________#
# CONDITIONS ON experiments #
#_______________________________#
#' Build the constraint on the known values of states
#' according to the experiment file
#'
#' @param s solver
#' @param q state variable list associated with one given experiment
#' @param n step in which a given state value is known
#' @param C set of genes/nodes
#' @param gene gene which state value is known
#' @param value value that appears in the experimental constraints
#' @return s updated solver with condition q[n](gene) == value
def experiment_condition(s, q, n, C, gene, value):
s.add(testValue(q[n], C.index(gene), BitVecVal(value, 1)))
return(s)
#____________________________#
# CONDITIONS ON regulation #
#____________________________#
#' Build condition on allowed regulation conditions
#'
#' @param s solver
#' @param Rs regulation condition variable list
#' @param R list of allowed regulation condition list for each gene/node
#' @param ngenes number of nodes in the abstract model
#' @return s updated solver with conditions that strictly restrict the values
#' of a regulation condition variable to ONE of the allowed values for each gene/node
def regulation_condition(s, Rs, R, ngenes):
for c in range(ngenes):
cond = False
for r in R[c]:
cond = Or(cond, testRS(Rs[c], r))
s.add(cond)
return(s)
#_______________________________#
# CONDITIONS ON perturbations #
#_______________________________#
#' Build perturbation condition
#'
#' @param s solver
#' @param SETP list of (associated experiment, known perturbations) pairs in the experiment file
#' @param setp perturbation on which the condition should be generated
#' @param exp_names list of all experiment names in the experiment file
#' @param SETEXPR list of (gene, value) pairs in the experiment
#' @param typeP type of perturbation for the gene/node of interest
#' @return s updated solver with conditions that definitely set the known values of all perturbed gene
#' variables in a given experiment
def perturbation_condition(s, SETP, setp, exp_names, SETEXPR, typeP):
for [e, p] in SETP:
p_e = setp[exp_names.index(e)]
for pp in p:
g = SETEXPR.index(getIt(pp[1], typeP + "(", ")"))
x = pp[2]
s.add(testValue(p_e, g, BitVecVal(x, 1)))
return(s)
#' Creates an equivalent Bit Vector of size #nodes
#' from a bit-vector of size #perturbed genes
#'
#' @param s solver
#' @param pert_e bit-vector of size #perturbed genes
#' @param chi dictionary of keys: perturbed gene indices,
#' values: known value of gene states
#' @param name name to give to the new bit-vector perturbation variable
#' @param ngenes number of genes/nodes in the abstract model
#' @return res pair of bit-vector variable and updated solver
def pert2full(s, pert_e, chi, name, ngenes):
pert = BitVec(name, ngenes)
for i in range(ngenes):
## Non-perturbed genes ##
if (not (i in chi.keys())):
s.add(testValue(pert, i, buildZERO(1)))
## Perturbed genes ##
else:
s.add(testValueVec(pert, pert_e, i, c1=chi.get(i)))
return([pert, s])
#_______________________________#
# CONDITIONS ON regulators #
#_______________________________#
#' Build condition on the regulators in the model
#' depending on the selected optional interactions
#'
#' @param s solver
#' @param Is bit-vector of selected interactions
#' @param regulators list of present regulator variables (activators/repressors) for each node
#' @param regInt list of regulator/interaction dictionaries for each node
#' @param default default value present in @regInt
#' @return cond condition Is[regInt[i]] == regulators[i] for all i regInt[i]!=default
def regulators_condition(s, Is, regulators, regInt, default):
return(evalVec(s, Is, regulators, regInt, default))
#_______________________________#
# CONDITIONS ON transitions #
#_______________________________#
#' Build transition condition
#'
#' @param i regulation condition number
#' @param g gene on which the transition condition should be built
#' @param q1 output state variable of the transition
#' @param res wrapper for old state updated by input rule
#' @param ngenes number of genes
#' @param pcg pre-computation associated with gene g, respectively terms
#' allActivators, allRepressors, noRepressors, notAllActivators, notnoActivators
#' notnoRepressors, threshold regulation condition
#' @return cond transition condition q1[g] == res(rule_i)[g] \in 0, 1
def aux_transition(i, g, q1, res, ngenes, pcg):
[aA, aR, nA, nR, naA, naR, nnA, nnR, gc] = pcg
rulei = If(diCompute.get(i)(aA, aR, nA, nR, naA, naR, nnA, nnR, gc), bv(g, ngenes), buildZERO(ngenes))
return(getCState(q1, g, ngenes) == getCState(res(rulei), g, ngenes))
#' Build synchronous transition condition
#'
#' @param Rs regulation condition variable list
#' @param g gene on which the transition condition should be built
#' @param i regulation condition number
#' @param auxtr function that builds the transition condition part on gene values
#' @return cond condition Rs[g] == i => gene value condition
def transition_condition_sync(Rs, g, i, auxtr):
return(Implies(testRS(Rs[g], i), auxtr(i)))
#' Build asynchronous transition condition
#'
#' @param Rs regulation condition variable list
#' @param g gene on which the transition condition should be built
#' @param i regulation condition number
#' @param q0 input state variable for transition
#' @param q1 output state variable for transition
#' @param auxtr function that builds the transition condition part on gene values
#' @return cond condition q0[g'] == q1[g'] for g'!=g => (Rs[g] == i => gene value condition)
def transition_condition_async(Rs, g, i, q0, q1, auxtr):
return(Implies(equalExcept1BV(q0, q1, g), transition_condition_sync(Rs, g, i, auxtr)))
#' Build the full transition condition for all genes at a given step of the experiment
#'
#' @param s solver
#' @param n step of the experiment where q0 -> q1
#' @param length maximal length of experiment
#' @param q0 input state variable for transition
#' @param q1 output state variable for transition
#' @param preComputation list of precomputed terms for each node
#' @param ngenes number of genes/nodes
#' @param R list of allowed regulation conditions for each node
#' @param Rs list of regulation condition variables for each node
#' @param res wrapper for input state variable according to selected regulation condition
#' @param typeT type of transition, either "asynchronous" or "synchronous"
#' @param regulators list of bit-vectors variables associated with present regulators for each node
#' @return s updated solver with conditions q0 -> q1
def transition_condition(s, n, length, q0, q1, preComputation, ngenes, R, Rs, res, typeT, regulators):
condAsync = False
for g in range(ngenes):
pcg = preComputation[g]
auxtrr = lambda i : aux_transition(i, g, q1, res, ngenes, pcg)
[a, r] = regulators[g]
auxtr17 = lambda i : ifthenelse(i==18, rule18(q0, a, r, g), rule19(q0, a, r, g))
for i in R[g]:
## Different auxiliary functions are used to generate transition condition ##
## when threshold rules are selected ##
auxtr = ifthenelse(i<18, auxtrr, auxtr17)
if (typeT == "asynchronous"):
cond = transition_condition_async(Rs, g, i, q0, q1, auxtr)
elif (typeT == "synchronous"):
cond = transition_condition_sync(Rs, g, i, auxtr)
else:
print("ERROR: Wrong transition type.")
return(None)
s.add(cond)
if (typeT == "asynchronous"):
condAsync = Or(condAsync, equalExcept1BV(q0, q1, g))
if (typeT == "asynchronous"):
s.add(condAsync)
return(s)
#' Build the full transition condition for all genes for several steps of the experiment
#'
#' @param s solver
#' @param expname name of the current experiment (only for printing purposes)
#' @param prepreComputation list of pre-precomputed terms for each node
#' @param startstep starting step of transition
#' @param endstep ending step of transition
#' @param q list of state variables associated with current experiment
#' @param typeT type of transition, either "asynchronous" or "synchronous"
#' @param length maximal length of experiment
#' @param regulators list of bit-vectors variables associated with present regulators for each node
#' @param ngenes number of genes/nodes
#' @param R list of allowed regulation conditions for each node
#' @param Rs list of regulation condition variables for each node
#' @param res wrapper for input state variable according to selected regulation condition
#' @param verbose logical if set to TRUE prints status messages
#' @return s updated solver with conditions q_startstep -> q_(startstep+1) -> ... -> q_endstep
def to_next_state(s, expname, prepreComputation, startstep, endstep, q, typeT, length, regulators, ngenes, R, Rs, res, verbose):
if (typeT == "fixpoint"):
for i in range(startstep, endstep):
verboseIt("+++++++ FIXPOINT STEP #" + str(i) + " TO #" + str(i+1), verbose)
verboseIt("@ " + getState(i, expname) + " = " + getState(i+1, expname), verbose)
s.add(q[i] == q[i+1])
typeT = ifthenelse(typeT == "fixpoint", "synchronous", typeT)
for i in range(startstep, endstep):
verboseIt("******* TRAJECTORY STEP #" + str(i) + " TO #" + str(i+1), verbose)
verboseIt("@ T(" + getState(i, expname) + ", " + getState(i+1, expname)
+ ") <=> " + typeT + " transition", verbose)
preComputation = [preCompute(q[i], prepreComputation[ci]) for ci in range(ngenes)]
s = transition_condition(s, i, length, q[i], q[i+1], preComputation, ngenes, R, Rs, res, typeT, regulators)
if (startstep == endstep):
preComputation = [preCompute(q[endstep], prepreComputation[ci]) for ci in range(ngenes)]
s = transition_condition(s, endstep, length, q[endstep], q[endstep],
preComputation, ngenes, R, Rs, res, typeT, regulators)
return(s)
#_______________________________#
# CONDITIONS ON interactions #
#_______________________________#
#' Build optional condition on the number of selected interactions
#'
#' @param s solver
#' @param interaction_limit integer of the maximum number of selected
#' interactions
#' @param Iopt list of optional interactions
#' @param Is bit-vector of selected interactions
#' @return s updated solver with condition on the maximum number of
#' selected interactions
def interaction_condition(s, interaction_limit, Iopt, Is):
s.add(UGT(BitVecVal(interaction_limit+1, len(Iopt)), sub(Is)))
return(s)
#' Implements optional condition "this gene must have at least one activator"
#'
#' @param s solver
#' @param activators bit-vector associated with present activators for a given gene
#' @param ngenes number of genes
#' @return s updated solver with condition
def mustHaveActivator_condition(s, activators, ngenes):
s.add(UGT(activators, buildZERO(ngenes)))
return(s)
#' Implements conditions on regulatory modules:
#' a TF->RM interaction is selected <-> corresponding RM->gene interaction is selected
#'
#' @param s solver
#' @param Is bit-vector variable associated with selected optional interactions
#' @param Idef set of definite interactions
#' @param Iopt set of optional interactions
#' @param CRM set of regulatory modules
#' @param C set of genes/nodes
#' @return s updated solver with conditions on regulatory modules
def crmInteractions_condition(s, Is, Idef, Iopt, CRM, C):
for e in Idef:
## Interaction TF->RM is definite ##
if (len(CRM[C.index(e[1])]) > 0):
idx = ifthenelseFct(lambda i : Iopt[i][0]==e[1] and Iopt[i][1]==CRM[C.index(e[1])],
lambda i : i, Iopt)
if (len(idx) > 0):
idx = idx[0]
## Selects automatically the associated RM->gene ##
s.add(extract1BV(Is, idx) == 1)
for i in range(len(Iopt)):
e = Iopt[i]
## Interaction RM->gene is optional ##
if (len(CRM[C.index(e[0])]) > 0):
## Finds all associated TF->RM interactions ##
idx = ifthenelseFct(lambda i : Iopt[i][1]==e[1], lambda i : i, Iopt)
condAllNot = True
for ii in idx:
condAllNot = And(condAllNot, extract1BV(Is, ii) == 0)
s.add(Implies(extract1BV(Is, ii) == 1, extract1BV(Is, i) == 1))
s.add(Implies(condAllNot, extract1BV(Is, i) == 0))
return(s)
#_______________________________#
# CONDITIONS ON fix points #
#_______________________________#
#' Build conditions on fix points
#'
#' @param s solver
#' @param expname current experiment name (only for printing purposes)
#' @param prepreComputation list of pre-precomputed terms for each node
#' @param sstep step at which starts the fix point condition
#' @param q list of state variables associated with current experiment
#' @param typeT type of transition, either "asynchronous" or "synchronous"
#' @param length maximum length of experiment
#' @param regulators list of present regulator bit-vector for each gene
#' @param ngenes number of genes/nodes
#' @param R list of allowed regulation conditions for each node
#' @param Rs list of selected regulation condition bit-vector for each node
#' @param res wrapper for input state transition
#' @param verbose logical for printing messages
#' @return s updated solver with conditions on fix points from step sstep to
#' the end of the experiment
def fixpoint_condition(s, expname, prepreComputation, sstep, q, typeT, length, regulators, ngenes, R, Rs, res, verbose):
if (sstep < length+1):
s = to_next_state(s, expname, prepreComputation, sstep, length, q,
"fixpoint", length, regulators, ngenes, R, Rs, res, verbose)
return(s)
| StarcoderdataPython |
182199 | <gh_stars>0
#!/usr/bin/env python
import os
import sys
import argparse
from math import log,pow
from data_tools.lib.files import findNumber,ParameterParser
from data_tools.lib.group import Group,run_grouping
class MeanGroup(Group):
def __init__(self, tup):
super(MeanGroup, self).__init__(tup)
self.sums = [0]*len(args.columns)
self.count = [0]*len(args.columns)
self.add = self.addVal if args.bins is None else self.addBin
def addVal(self, chunks):
for i,c in enumerate(args.columns):
self.sums[i] += findNumber(chunks[c])
self.count[i] += 1
def addBin(self, chunks):
for i,c in enumerate(args.columns):
b = findNumber(chunks[args.bins[i]])
self.sums[i] += b*findNumber(chunks[c])
self.count[i] += b
def done(self):
args.outfile.write(self.tup + [s / c for s,c in zip(self.sums, self.count)])
class GeometricGroup(Group):
def __init__(self, tup):
super(GeometricGroup, self).__init__(tup)
self.sums = [1]*len(args.columns)
self.count = [0]*len(args.columns)
self.add = self.addVal if args.bins is None else self.addBin
def addVal(self, chunks):
for i,c in enumerate(args.columns):
self.sums[i] *= findNumber(chunks[c])
self.count[i] += 1
def addBin(self, chunks):
for i,c in enumerate(args.columns):
b = findNumber(chunks[args.bins[i]])
self.sums[i] *= pow(findNumber(chunks[c]), b)
self.count[i] += b
def done(self):
args.outfile.write(self.tup + [pow(s, 1 / c) for s,c in zip(self.sums, self.count)])
class InvertedMeanGroup(Group):
def __init__(self, tup):
super(InvertedMeanGroup, self).__init__(tup)
from collections import defaultdict
self.vals = [[] for _ in range(len(args.columns))]
self.add = self.addVal if args.bins is None else self.addBin
def addVal(self, chunks):
for i,c in enumerate(args.columns):
v = findNumber(chunks[c])
self.vals[i].append((v, v))
def addBin(self, chunks):
for i,c in enumerate(args.columns):
self.vals[i].append((findNumber(chunks[c]), findNumber(chunks[args.bins[i]])))
def done(self):
import numpy as np
m = []
for val in self.vals:
s = [v[0] for v in val]
c = np.array([v[1] for v in val])
w = (np.sum(c) / c) / np.sum(np.sum(c) / c)
m.append(np.dot(s,w))
args.outfile.write(self.tup + m)
if __name__ == "__main__":
pp = ParameterParser('Compute mean of columns', columns = '*', labels = [None])
pp.parser.add_argument('-b', '--bins', default = None, nargs='+')
pp.parser.add_argument('-e', '--geometric', action='store_true', default=False, help='compute geometric mean')
pp.parser.add_argument('-i', '--invert', action='store_true', default=False, help='invert the bins per value in the mean')
args = pp.parseArgs()
if not any(args.labels):
args.labels = [cn + ('_gmean' if args.geometric else '_mean') for cn in args.columns_names]
args = pp.getArgs(args)
if args.bins is not None:
args.bins = args.infile.header.indexes(args.bins)
if args.geometric and args.invert:
raise ValueError('Cannot specify both --geometric and --invert')
if args.geometric:
cls = GeometricGroup
elif args.invert:
cls = InvertedMeanGroup
else:
cls = MeanGroup
run_grouping(args.infile, cls, args.group, args.ordered)
| StarcoderdataPython |
1729 | from server import roles
def hasRole(member, roleID):
role = member.guild.get_role(roleID)
return role in member.roles
def gainedRole(before, after, roleID):
role = before.guild.get_role(roleID)
return (role not in before.roles) and (role in after.roles)
def isExplorer(ctx):
return hasRole(ctx.author, roles["explorer"])
def isNetwork(ctx):
return hasRole(ctx.author, roles["network"])
def isLeader(ctx):
return hasRole(ctx.author, roles["leader"])
def isAdmin(ctx):
return hasRole(ctx.author, roles["admin"])
def isBot(ctx):
return hasRole(ctx.author, roles["bot"])
class Colours:
DEFAULT = 0
AQUA = 1752220
GREEN = 3066993
BLUE = 3447003
PURPLE = 10181046
GOLD = 15844367
ORANGE = 15105570
RED = 15158332
GREY = 9807270
DARKER_GREY = 8359053
NAVY = 3426654
DARK_AQUA = 1146986
DARK_GREEN = 2067276
DARK_BLUE = 2123412
DARK_PURPLE = 7419530
DARK_GOLD = 12745742
DARK_ORANGE = 11027200
DARK_RED = 10038562
DARK_GREY = 9936031
LIGHT_GREY = 12370112
DARK_NAVY = 2899536
LUMINOUS_VIVID_PINK = 16580705
DARK_VIVID_PINK = 12320855
| StarcoderdataPython |
3322509 | # https://leetcode.com/problems/largest-rectangle-in-histogram/
# Given an array of integers heights representing the histogram's bar height where
# the width of each bar is 1, return the area of the largest rectangle in the
# histogram.
################################################################################
# for each ele, find the frist ele on left and right that is smaller
# use stack to find numbers faster
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
if len(heights) == 1: return heights[0]
heights.append(0) # for popping all prev ele
ans = 0
stack = [0] # store idx
for i in range(1, len(heights)):
while stack and heights[stack[-1]] > heights[i]: # pop the prev bar since found its right limit
idx = stack.pop() # calculate area of this bar
height = heights[idx]
if stack:
prev_idx = stack[-1]
else:
prev_idx = - 1
width = i - prev_idx - 1 # left limit idx = prev_idx, right limit idx = i
ans = max(ans, height * width)
stack.append(i)
return ans
| StarcoderdataPython |
4840286 | from .shapenet import *
from .modelnet import *
from .scan2cad import *
from .shrec import *
from .scannet import *
| StarcoderdataPython |
3353808 | from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
import olympia.core.logger
from olympia.access.models import Group, GroupUser
from olympia.users.models import UserProfile
class Command(BaseCommand):
help = 'Add a new user to a group.'
log = olympia.core.logger.getLogger('z.users')
def add_arguments(self, parser):
parser.add_argument('user', type=unicode, help='User id or email')
parser.add_argument('group_id', type=int, help='Group id')
def handle(self, *args, **options):
do_adduser(options['user'], options['group_id'])
msg = 'Adding {user} to {group}\n'.format(
user=options['user'], group=options['group_id'])
self.log.info(msg)
self.stdout.write(msg)
def do_adduser(user, group):
try:
if '@' in user:
user = UserProfile.objects.get(email=user)
elif user.isdigit():
user = UserProfile.objects.get(pk=user)
else:
raise CommandError('Unknown input for user.')
group = Group.objects.get(pk=group)
GroupUser.objects.create(user=user, group=group)
except IntegrityError, e:
raise CommandError('User is already in that group? %s' % e)
except UserProfile.DoesNotExist:
raise CommandError('User ({user}) does not exist.'.format(user=user))
except Group.DoesNotExist:
raise CommandError('Group ({group}) does not exist.'
.format(group=group))
| StarcoderdataPython |
192244 | """Crie um programa que leia um número inteiro e mostre na tela se ele é PAR ou ÍMPAR."""
num = int(input('Digite um número: '))
resultado = num % 2
if resultado == 0:
print('O NUMERO {} É PAR'.format(num))
else:
print('O NUMERO {} É IMPAR'.format(num))
| StarcoderdataPython |
1625087 | <filename>skills/echo.py
from typing import Text
from linebot.models import TextSendMessage
from models.message_request import MessageRequest
from skills import add_skill
@add_skill('{not_match}')
def get(message_request: MessageRequest):
return [
TextSendMessage(text=f'You said: {message_request.message}')
]
| StarcoderdataPython |
1700634 | from datetime import date
print('\033[:33m=====\033[1:34mBem vindo ao serviço militar faça o seu registro\033[:33m=====\033[m')
print('\033[1mRegistro')
sexo = str(input('Qual é o seu sexo? ')).strip()
if sexo.lower() == 'masculino':
ano = int(input('\033[1:32mQual é o seu ano de nascimento?\033[m '))
atual = date.today().year
idade = atual - ano
print('Você tem {} anos em {}. '.format(idade, atual))
if idade > 18:
print('Já se passaram {} ano(s) do tempo que devia se alistar.'.format(idade - 18))
print('O seu alistamento foi em {}.'.format(atual - (idade - 18)))
elif idade < 18:
print('Ainda falta {} ano(s) para o seu alistamento.'.format(18 - idade))
print('O seu alistamento será em {}.'.format(atual + (18 - idade)))
else:
print('Está na alutura certa de alistar, aliste-se \033[1:31mAGORA.')
else:
print('Você não é obrigada a realizar o alistamento.')
| StarcoderdataPython |
3294308 | <reponame>Abluceli/ConnectSix
import random
from .bot_base import Bot
class RandomBot(Bot):
def __init__(self, dim, name='random_bot'):
super().__init__(dim, name)
""" Example bot that runs randomly. """
def choose_action(self, state):
x = random.randrange(0, self.dim)
y = random.randrange(0, self.dim)
return x, y | StarcoderdataPython |
1636527 | <filename>utils/config.py
class Config(object):
DATA_BASE_PATH = '../data'
RAW_PATH = '../data/LabelingTool/'
DATA_IMAGE_PATH = DATA_BASE_PATH + '/Images'
DATA_MASK_PATH = DATA_BASE_PATH + '/Masks'
SHAPE = (512,768) | StarcoderdataPython |
1603998 | <filename>chapter4.py
# Condition
num = 100
if num % 2 == 0:
print("Even Number")
print("Thank You")
num = input("Please enter a number : ")
num = int(num)
if num % 2 == 0:
print("Even Number")
print("Thank You")
else:
print("Odd Number")
print("Come Again")
num = input("Please enter a number : ")
num = int(num)
if num == 50:
print("Half Century")
elif num == 100:
print("Century")
elif num > 100:
print("Century +")
else:
print("Unknown number")
# Logical operators AND OR
num = 6
if num >= 3 and num < 5:
print('3 to 5')
else :
print('5 +')
num = -2
if num >= 3 or num == -2:
print('3 + or -2')
#Compare String
name1 = input("Enter name 1 : ")
name2 = input("Enter name 2 : ")
if name1.lower() == name2.lower():
print("Same Name")
else:
print("Name doesn't match")
# Not equals to
name ="Unknown Person"
if name != '<NAME>':
print(name)
# Nested condition
x= 5
if x < 2:
print('less than 2')
else:
if x == 3:
print('x is 3')
else :
if x == 5:
print('x is 5')
# Iteration
# while loop
'''
while condition:
body
'''
x =1
while x <= 500:
print(x)
x += 10
# Infinite loop
x = 1
while True:
print(x)
x += 10
if x > 1000:
break
# Omit even number 1 to 20
x= 0
while x < 20:
x += 1
if x % 2 == 0:
continue
print(x)
# for loop
'''
for element in iterable:
body
'''
# sum 1 to 10
sum = 0
for num in range(1, 22):
print(num)
sum += num
print("Sum is {sum}".format(sum=sum))
# string characters by for loop
title = "<NAME>"
for char in title:
print(char)
| StarcoderdataPython |
192359 | <reponame>rafaelbarretomg/Curso-Python-3<filename>Exercicios/mundo2-exercicios-36-71/ex066.py<gh_stars>0
# Crie um programa que leia varios numeros
# inteiros pelo teclado. O programa so
# vai parar quando o usuario digitar o
# valor 999, que eh a condicao de parada.
# No final, mostre quantos numeros foram
# digitados e qual foi a soma entre eles
# (desconsiderando o flag).
n = s = c = 0
while True:
n = int(input('Digite um numero [999 para parar]: '))
if n == 999:
break
s += n
c += 1
print(f'A soma dos numeros eh: {s}')
print(f'A quantidade de numero digitada foi: {c}')
| StarcoderdataPython |
1796755 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
('abbreviation', models.CharField(help_text=b'Assessment abbreviation', max_length=250)),
('version', models.CharField(help_text=b'version', max_length=10)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasConcept',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
('definition', models.CharField(default=None, max_length=200)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CognitiveAtlasTask',
fields=[
('name', models.CharField(max_length=200)),
('cog_atlas_id', models.CharField(max_length=200, serialize=False, primary_key=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.CharField(max_length=500)),
('label', models.CharField(help_text=b'question unique label', unique=True, max_length=250)),
('required', models.BooleanField(default=True, verbose_name=b'Required', choices=[(False, b'Not required'), (True, b'Required')])),
('data_type', models.CharField(help_text=b'Data type of the question answer', max_length=200, verbose_name=b'Data Type', choices=[(b'LONGINT', b'Long Integer'), (b'DATETIME', b'Date/Time'), (b'TEXT', b'Text'), (b'INT', b'Integer'), (b'DOUBLE', b'Double')])),
('options', models.CharField(default=None, max_length=500)),
('assessment', models.ForeignKey(to='assessments.Assessment')),
('cognitive_atlas_concept', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Concept', to='assessments.CognitiveAtlasConcept', help_text=b"Concept defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='QuestionOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numerical_score', models.IntegerField()),
('text', models.CharField(max_length=250)),
('questions', models.ManyToManyField(to='assessments.Question')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='assessment',
name='cognitive_atlas_task',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, verbose_name=b'Cognitive Atlas Task', to='assessments.CognitiveAtlasTask', help_text=b"Assessment defined in the <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a>", null=True),
preserve_default=True,
),
]
| StarcoderdataPython |
4810083 | # Title: 사분면 고르기
# Link: https://www.acmicpc.net/problem/14681
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
def solution(x: int, y: int):
if 0 < x:
return 1 if y > 0 else 4
else:
return 2 if y > 0 else 3
def main():
x = read_single_int()
y = read_single_int()
print(solution(x, y))
if __name__ == '__main__':
main() | StarcoderdataPython |
1750454 | <reponame>teknogeek/pyRedditWatch
#!/usr/bin/python
import json
from urllib import FancyURLopener
import urllib
import socket
import time
import ssl
import random
import threading
import os
import unidecode
import ConfigParser
genNick = "pyRedditChecker" + str(random.randint(0, 1000))
confParser = ConfigParser.ConfigParser({
'server': 'irc.freenode.net',
'port': 6667,
'useSSL': False,
'isZNC': False,
'nickname': genNick,
'username': genNick,
'realName': genNick,
'password': "",
'zncPass': ""
})
confParser.read(r'./settings.conf')
#Load options from config file#
server = str(confParser.get('irc', 'server'))
port = int(confParser.get('irc', 'port'))
useSSL = bool(confParser.get('irc', 'ssl'))
isZNC = bool(confParser.get('irc', 'isZNC'))
nickname = str(confParser.get('irc', 'nickname'))
username = str(confParser.get('irc', 'username'))
realName = str(confParser.get('irc', 'realName'))
identPass = str(confParser.get('irc', 'identPass'))
zncPass = str(confParser.get('irc', 'zncPass'))
channels = str(confParser.get('irc', 'channels'))
if server == "":
print "====No server set. Defaulting to freenode.===="
server = "irc.freenode.net"
if port == "" or not isinstance(port, int):
print "====No or invalid port set. Defaulting to 6667.===="
port = 6667
if useSSL == "":
print "====SSL not specified. True and False are case sensitive! Defaulting to no.===="
useSSL = False
if isZNC == "":
print "====ZNC not specified. True and False are case sensitive! Defaulting to no.===="
isZNC = False
if nickname == "":
print "====Nickname not specified. Defaulting to pyRedditChecker followed by a random number.===="
nickname = "pyRedditChecker" + str(random.randint(0, 1000))
if username == "":
print "====Username not specified. Defaulting to nickname.===="
username = nickname
if realName == "":
print "====Real name not specified. Defaulting to nickname.===="
realName = nickname
if zncPass == "" and isZNC == True:
print "WARNING: ====ZNC password not specified!!===="
if useSSL == True:
ircSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc = ssl.wrap_socket(ircSocket)
else:
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
irc.connect((server, port))
except Exception as e:
print "Bad hostname or port: " + server + ":" + str(port)
if isZNC == True:
irc.write('PASS %s\r\n' % zncPass)
irc.write('NICK %s\r\n' % nickname)
irc.write('USER %s %s %s %s:%s\r\n' % (username, nickname, nickname, nickname, realName))
else:
time.sleep(1)
irc.send('NICK %s\r\n' % nickname)
irc.send('USER %s %s %s %s:%s\r\n' % (username, nickname, nickname, nickname, realName))
time.sleep(1)
irc.recv(8192)
identString = "PRIVMSG NickServ :IDENTIFY %s %s\r\n" % (username, identPass)
irc.send(identString)
time.sleep(1)
chanlist = channels.split(", ")
for chan in chanlist:
joinString = "JOIN %s \r\n" % str(chan)
irc.send(joinString)
irc.write(joinString)
time.sleep(1)
noColor = "\x03"
checkFile = open('checks.txt', 'r+')
runningChecks = []
colorDict = {
'white': 0,
'black': 1,
'blue': 2,
'green': 3,
'red': 4,
'brown': 5,
'purple': 6,
'orange': 7,
'yellow': 8,
'light-green': 9,
'teal': 10,
'light-cyan': 11,
'light-blue': 12,
'pink': 13,
'gray': 14,
'light-gray': 15
}
def getTiny(url):
reqUrl = urllib.urlopen('http://tinyurl.com/api-create.php?' + urllib.urlencode({'url': url}))
tinyUrl = reqUrl.read()
return tinyUrl
def listColors():
colorString = "Available Colors: "
for color in colorDict.keys():
colorString += randomColor(color) + color + noColor + ", "
colorString = colorString.rstrip(", ")
return colorString
class checkThread(threading.Thread):
def __init__(self, subreddit, color, delay, channel):
threading.Thread.__init__(self)
self.subreddit = subreddit
self.color = color
self.delay = delay
self.channel = channel
self._stop = threading.Event()
def run(self):
checkNewLoop(self.subreddit, self.color, self.delay, self.channel)
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
class customOpener(FancyURLopener, object):
version = "Swagbot/9000.0"
def checkNewLoop(subreddit, color, delay, channel):
while subreddit in runningChecks:
checkNew(subreddit, color, delay, channel)
time.sleep(0.1)
def checkNew(subreddit, color, delay, channel):
time.sleep(delay)
if subreddit in runningChecks:
urlopen = customOpener().open
subJson = urlopen("http://www.reddit.com/r/" + subreddit + "/new.json?sort=new")
newPosts = json.loads(subJson.read())
postList = newPosts['data']['children']
doneIDs = open('doneid.txt', 'a+')
readPosts = []
for line in doneIDs:
if line != "":
readPosts.append(line.rstrip("\n"))
for key, post in enumerate(postList):
id = post['data']['name']
title = post['data']['title']
url = post['data']['url']
domain = post['data']['domain']
domain = domain.lower()
if id not in readPosts:
textColor = randomColor(color)
doneIDs.write(id + "\n")
shortID = post['data']['id']
if domain != "imgur.com" and domain != "i.imgur.com" and domain != "self." + subreddit:
shortUrl = " [" + getTiny(url) + "]"
elif domain == "imgur.com" or domain == "i.imgur.com":
shortUrl = " [" + url +"]"
elif domain == "self." + subreddit:
shortUrl = " [self." + subreddit + "]"
else:
shortUrl = "[Tek: url bork]"
title = unidecode.unidecode(title)
message = textColor + "/r/" + subreddit + noColor + " - " + title + " (http://redd.it/" + shortID + ")" + shortUrl
sendMessage(channel, message)
time.sleep(1)
def randomColor(color):
if color in colorDict:
colorInt = "\x03" + str(colorDict[color])
else:
colorInt = "\x03" + str(color)
return colorInt
def addCheck(subreddit, color, delay, channel):
checkFile.write(subreddit + " | " + str(color) + " | " + str(delay) + " | " + channel + "\n")
thread = checkThread(subreddit, color, delay, channel)
thread.daemon = True
thread.name = "Thread-" + subreddit
runningChecks.append(subreddit)
thread.start()
def loadChecks():
for line in checkFile:
if line != "":
checkInfo = line.split(" | ")
addCheck(checkInfo[0], checkInfo[1], float(checkInfo[2]), checkInfo[3].rstrip("\n"))
def sendMessage(channel, message):
irc.send(("PRIVMSG " + channel + " :" + str(message) + "\r\n"))
def processMessage(chanMessage, userMessage, channel):
if chanMessage == "!add":
try:
subreddit = userMessage.split(' ')[1]
except:
subreddit = "error"
try:
color = userMessage.split(' ')[2]
except:
color = str(random.randint(1, 15))
if subreddit == "error":
sendMessage(channel, "Usage: !add [subreddit] (optional: color)")
else:
if subreddit not in runningChecks:
addCheck(subreddit, color, 10, channel)
else:
sendMessage(channel, "That subreddit is already being checked!")
if chanMessage == "!del":
try:
subreddit = userMessage.split(' ')[1]
except:
subreddit = "error"
if subreddit == "error":
sendMessage(channel, "Usage: !del [subreddit]")
else:
if subreddit not in runningChecks:
sendMessage(channel, "Check for that channel not found!")
else:
with open('checks.txt') as oldfile, open('checkstemp.txt', 'w') as newfile:
for line in oldfile:
if not subreddit in line:
newfile.write(line)
os.remove("checks.txt")
os.rename("checkstemp.txt", "checks.txt")
runningChecks.remove(subreddit)
if chanMessage == "!list":
sendMessage(channel, "Currently Checking:")
for check in runningChecks:
sendMessage(channel, "/r/" + check)
if chanMessage == "!color":
try:
subreddit = userMessage.split(' ')[1]
except:
subreddit = "error"
try:
color = userMessage.split(' ')[2]
except:
color = str(random.randint(1, 15))
if subreddit == "error":
sendMessage(channel, "Usage: !color [subreddit] [new color]")
else:
if subreddit not in runningChecks:
sendMessage(channel, "Check not found.")
else:
with open('checks.txt') as oldfile, open('checkstemp.txt', 'w') as newfile:
for line in oldfile:
if subreddit in line:
changeColor = line.split(" | ")
newfile.write(changeColor[0] + " | " + color + " | " + changeColor[2] + " | " + changeColor[3])
runningChecks.remove(subreddit)
addCheck(changeColor[0], color, float(changeColor[2]), changeColor[3].rstrip("\n"))
else:
newfile.write(line)
os.remove("checks.txt")
os.rename("checkstemp.txt", "checks.txt")
if chanMessage == "!listcolors":
sendMessage(channel, listColors())
loadChecks()
while True:
data = irc.recv(8192)
print data.rstrip("\n")
if data.find('PING') != -1:
irc.send('PONG ' +data.split()[1]+'\r\n')
userMessage = data.split(':')[-1].strip()
chanMessage = userMessage.split(' ')[0].lower()
channel = data.split(' ')
if len(channel) > 2:
channel = channel[2]
else:
channel = ""
processMessage(chanMessage, userMessage, channel)
| StarcoderdataPython |
150159 | <reponame>kaiden8/depthai-ros-examples<gh_stars>0
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription, launch_description_sources
from launch.actions import IncludeLaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
import launch_ros.actions
import launch_ros.descriptions
def generate_launch_description():
default_rviz = os.path.join(get_package_share_directory('depthai_examples'),
'rviz', 'pointCloud.rviz')
urdf_launch_dir = os.path.join(get_package_share_directory('depthai_bridge'), 'launch')
camera_model = LaunchConfiguration('camera_model', default = 'OAK-D')
tf_prefix = LaunchConfiguration('tf_prefix', default = 'oak')
base_frame = LaunchConfiguration('base_frame', default = 'oak-d_frame')
parent_frame = LaunchConfiguration('parent_frame', default = 'oak-d-base-frame')
cam_pos_x = LaunchConfiguration('cam_pos_x', default = '0.0')
cam_pos_y = LaunchConfiguration('cam_pos_y', default = '0.0')
cam_pos_z = LaunchConfiguration('cam_pos_z', default = '0.0')
cam_roll = LaunchConfiguration('cam_roll', default = '0.0')
cam_pitch = LaunchConfiguration('cam_pitch', default = '0.0')
cam_yaw = LaunchConfiguration('cam_yaw', default = '0.0')
camera_param_uri = LaunchConfiguration('camera_param_uri', default = 'package://depthai_examples/params/camera')
sync_nn = LaunchConfiguration('sync_nn', default = True)
subpixel = LaunchConfiguration('subpixel', default = True)
nn_path = LaunchConfiguration('nn_path', default = "")
confidence = LaunchConfiguration('confidence', default = 200)
lrCheckTresh = LaunchConfiguration('lrCheckTresh', default = 5)
monoResolution = LaunchConfiguration('monoResolution', default = '400p')
declare_camera_model_cmd = DeclareLaunchArgument(
'camera_model',
default_value=camera_model,
description='The model of the camera. Using a wrong camera model can disable camera features. Valid models: `OAK-D, OAK-D-LITE`.')
declare_tf_prefix_cmd = DeclareLaunchArgument(
'tf_prefix',
default_value=tf_prefix,
description='The name of the camera. It can be different from the camera model and it will be used in naming TF.')
declare_base_frame_cmd = DeclareLaunchArgument(
'base_frame',
default_value=base_frame,
description='Name of the base link.')
declare_parent_frame_cmd = DeclareLaunchArgument(
'parent_frame',
default_value=parent_frame,
description='Name of the parent link from other a robot TF for example that can be connected to the base of the OAK.')
declare_pos_x_cmd = DeclareLaunchArgument(
'cam_pos_x',
default_value=cam_pos_x,
description='Position X of the camera with respect to the base frame.')
declare_pos_y_cmd = DeclareLaunchArgument(
'cam_pos_y',
default_value=cam_pos_y,
description='Position Y of the camera with respect to the base frame.')
declare_pos_z_cmd = DeclareLaunchArgument(
'cam_pos_z',
default_value=cam_pos_z,
description='Position Z of the camera with respect to the base frame.')
declare_roll_cmd = DeclareLaunchArgument(
'cam_roll',
default_value=cam_roll,
description='Roll orientation of the camera with respect to the base frame.')
declare_pitch_cmd = DeclareLaunchArgument(
'cam_pitch',
default_value=cam_pitch,
description='Pitch orientation of the camera with respect to the base frame.')
declare_yaw_cmd = DeclareLaunchArgument(
'cam_yaw',
default_value=cam_yaw,
description='Yaw orientation of the camera with respect to the base frame.')
declare_camera_param_uri_cmd = DeclareLaunchArgument(
'camera_param_uri',
default_value=camera_param_uri,
description='Sending camera yaml path')
declare_sync_nn_cmd = DeclareLaunchArgument(
'sync_nn',
default_value=sync_nn,
description='Syncs the image output with the Detection.')
declare_subpixel_cmd = DeclareLaunchArgument(
'subpixel',
default_value=subpixel,
description='Enables subpixel stereo detection.')
declare_nn_path_cmd = DeclareLaunchArgument(
'nn_path',
default_value=nn_path,
description='Path to the object detection blob needed for detection')
declare_confidence_cmd = DeclareLaunchArgument(
'confidence',
default_value=confidence,
description='Confidence that the disparity from the feature matching was good. 0-255. 255 being the lowest confidence.')
declare_lrCheckTresh_cmd = DeclareLaunchArgument(
'lrCheckTresh',
default_value=lrCheckTresh,
description='LR Threshold is the threshod of how much off the disparity on the l->r and r->l ')
declare_monoResolution_cmd = DeclareLaunchArgument(
'monoResolution',
default_value=monoResolution,
description='Contains the resolution of the Mono Cameras. Available resolutions are 800p, 720p & 400p for OAK-D & 480p for OAK-D-Lite.')
urdf_launch = IncludeLaunchDescription(
launch_description_sources.PythonLaunchDescriptionSource(
os.path.join(urdf_launch_dir, 'urdf_launch.py')),
launch_arguments={'tf_prefix' : tf_prefix,
'camera_model': camera_model,
'base_frame' : base_frame,
'parent_frame': parent_frame,
'cam_pos_x' : cam_pos_x,
'cam_pos_y' : cam_pos_y,
'cam_pos_z' : cam_pos_z,
'cam_roll' : cam_roll,
'cam_pitch' : cam_pitch,
'cam_yaw' : cam_yaw}.items())
yolov4_spatial_node = launch_ros.actions.Node(
package='depthai_examples', executable='yolov4_spatial_node',
output='screen',
parameters=[{'tf_prefix': tf_prefix},
{'camera_param_uri': camera_param_uri},
{'sync_nn': sync_nn},
{'nn_path': nn_path},
{'monoResolution': monoResolution}])
rviz_node = launch_ros.actions.Node(
package='rviz2', executable='rviz2', output='screen',
arguments=['--display-config', default_rviz])
ld = LaunchDescription()
ld.add_action(declare_tf_prefix_cmd)
ld.add_action(declare_camera_model_cmd)
ld.add_action(declare_base_frame_cmd)
ld.add_action(declare_parent_frame_cmd)
ld.add_action(declare_pos_x_cmd)
ld.add_action(declare_pos_y_cmd)
ld.add_action(declare_pos_z_cmd)
ld.add_action(declare_roll_cmd)
ld.add_action(declare_pitch_cmd)
ld.add_action(declare_yaw_cmd)
ld.add_action(declare_camera_param_uri_cmd)
ld.add_action(declare_sync_nn_cmd)
ld.add_action(declare_subpixel_cmd)
ld.add_action(declare_nn_path_cmd)
ld.add_action(declare_confidence_cmd)
ld.add_action(declare_lrCheckTresh_cmd)
ld.add_action(declare_monoResolution_cmd)
ld.add_action(yolov4_spatial_node)
ld.add_action(urdf_launch)
return ld
| StarcoderdataPython |
180175 | <filename>pyminer/network/regressors.py
__author__ = 'Ralph'
import pandas as pd
from base import Node
from base import InputPort
from base import OutputPort
class Regressor(Node):
def __init__(self, name):
super(Regressor, self).__init__(name)
self.add_input_port(
InputPort(name='input', data_type=pd.DataFrame))
self.add_output_port(
OutputPort(name='mode', data_type=str))
self.add_output_port(
OutputPort(name='performance', data_type=str))
class LinearRegression(Regressor):
def __init__(self):
super(LinearRegression, self).__init__('LinearRegression')
def execute(self):
pass
class SupportVectorRegression(Regressor):
def __init__(self):
super(SupportVectorRegression, self).__init__('SupportVectorRegression')
def execute(self):
pass
class GaussianProcesses(Regressor):
def __init__(self):
super(GaussianProcesses, self).__init__('GaussianProcesses')
def execute(self):
pass | StarcoderdataPython |
1648013 | #########################################################################
#-*- coding:utf-8 -*-
# File Name: hello.py
# Author: wayne
# mail: <EMAIL>
# Created Time: 2015年08月17日 星期一 16时40分53秒
#########################################################################
#!/bin/python
print "hello"
| StarcoderdataPython |
3265245 | <filename>image-classification.py<gh_stars>0
"""
A TensorFlow Exercise That based on this URL:
Copyright By TensorFlow, under Apache and MIT License
Modified By <NAME> (bl6)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
# Import TensorFlow Datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# Helper libraries
import math
import numpy as np
import matplotlib.pyplot as plt
dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
# Take a single image, and remove the color dimension by reshaping
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28,28))
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(100, activation=tf.nn.relu), # New Dense Layer
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
BATCH_SIZE = 32
train_dataset = train_dataset.repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
model.fit(train_dataset, epochs=1, steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE))
test_loss, test_accuracy = model.evaluate(test_dataset, steps=math.ceil(num_test_examples/32))
print('Accuracy on test dataset:', test_accuracy)
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
predictions.shape
print("Computer:", np.argmax(predictions[0]), "Actual:", test_labels[0])
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img[...,0], cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
plt.show()
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
print(np.argmax(predictions_single[0])) | StarcoderdataPython |
3339834 | from trueskill import Rating, quality_1vs1, rate_1vs1, rate, TrueSkill
import pandas as pd
import numpy as np
import csv
import time
todaytime = time.strftime("%d/%m/%Y")
class MarioBoard(object):
def __init__(self,db):
self.mariodb = db.mariodb
self.db = db
self.playerdf = self.load_db_table()
self.playerdf.index = np.arange(1,len(self.playerdf)+1)
self.playerdf.sort_values(by='Name', ascending=0)
#print(self.playerdf)
# print(self.playerdf.loc[1,'Name'])
self.active = []
self.nowinners = []
self.norunners = []
self.nothird = []
self.response = []
self.ratings = []
self.newsigmas = []
self.newmeans = []
def load_db_table(self):
dbdoc = self.mariodb.find_one({'docType':'table'})
df = pd.DataFrame.from_records(dbdoc['table'])
return df
def save_db_table(self,df):
dfDict = list(df.T.to_dict().values())
self.mariodb.update_one({'docType':'table'},{'$set':{'table':dfDict}})
return
def Leaderboard(self):
self.playerdf.sort_values('Name', ascending=0)
self.db.log.info(self.playerdf.to_html()) #.sort('mu', ascending=False))
def RankPlayers(self,nump,winner,loser,mid1=False,mid2=False):
self.db.log.info(self.playerdf.to_html())
#nump = input("Pick number of players (2-4): ")
self.response = nump
# Two players
if int(nump) == 2:
rank1 = winner
if len(rank1) > 1:
self.active.append(rank1[0])
self.active.append(rank1[2])
self.nowinners.append(2)
if len(rank1) == 1:
rank2 = loser
self.active.append(rank1)
self.active.append(rank2)
self.nowinners.append(1)
self.norunners.append(1)
#print(self.active)
elif int(nump) == 3:
rank1 = winner
# All Three rank differently
if len(rank1) == 1:
rank2 = mid1
if len(rank2) > 1:
self.active.append(rank1)
self.active.append(rank2[0])
self.active.append(rank2[2])
self.nowinners.append(1)
self.norunners.append(2)
if len(rank2) == 1:
rank3 = loser
self.active.append(rank1)
self.active.append(rank2)
self.active.append(rank3)
self.nowinners.append(1)
self.norunners.append(1)
self.nothird.append(1)
elif int(nump) == 4:
rank1 = winner
rank2 = mid1
rank3 = mid2
rank4 = loser
self.active.append(rank1)
self.active.append(rank2)
self.active.append(rank3)
self.active.append(rank4)
# To Congratulate the players
def BuildCurrentRating(self):
# 2p
if int(self.response) == 2:
Winmu=float(self.playerdf.loc[int(self.active[0]),'mu'])
Winsig=float(self.playerdf.loc[int(self.active[0]),'sigma'])
Runmu=float(self.playerdf.loc[int(self.active[1]),'mu'])
Runsig=float(self.playerdf.loc[int(self.active[1]),'sigma'])
WinEnv = TrueSkill(mu=Winmu, sigma=Winsig, draw_probability=0.05, backend=None)
WinEnv.make_as_global()
r1=Rating()
RunEnv = TrueSkill(mu=Runmu, sigma=Runsig, draw_probability=0.05, backend=None)
RunEnv.make_as_global()
r2=Rating()
self.ratings.append(r1)
self.ratings.append(r2)
# 3p
elif int(self.response) == 3:
oneMu=float(self.playerdf.loc[int(self.active[0]),'mu'])
oneSig=float(self.playerdf.loc[int(self.active[0]),'sigma'])
twoMu=float(self.playerdf.loc[int(self.active[1]),'mu'])
twoSig=float(self.playerdf.loc[int(self.active[1]),'sigma'])
thrMu=float(self.playerdf.loc[int(self.active[2]),'mu'])
thrSig=float(self.playerdf.loc[int(self.active[2]),'sigma'])
WinEnv = TrueSkill(mu=oneMu, sigma=oneSig, draw_probability=0.05, backend=None)
WinEnv.make_as_global()
r1=Rating()
RunEnv = TrueSkill(mu=twoMu, sigma=twoSig, draw_probability=0.05, backend=None)
RunEnv.make_as_global()
# print(Rating())
r2=Rating()
ThrEnv = TrueSkill(mu=thrMu, sigma=thrSig, draw_probability=0.05, backend=None)
ThrEnv.make_as_global()
# print(Rating())
r3=Rating()
self.ratings.append(r1)
self.ratings.append(r2)
self.ratings.append(r3)
elif int(self.response) == 4:
oneMu=float(self.playerdf.loc[int(self.active[0]),'mu'])
oneSig=float(self.playerdf.loc[int(self.active[0]),'sigma'])
twoMu=float(self.playerdf.loc[int(self.active[1]),'mu'])
twoSig=float(self.playerdf.loc[int(self.active[1]),'sigma'])
thrMu=float(self.playerdf.loc[int(self.active[2]),'mu'])
thrSig=float(self.playerdf.loc[int(self.active[2]),'sigma'])
frMu=float(self.playerdf.loc[int(self.active[3]),'mu'])
frSig=float(self.playerdf.loc[int(self.active[3]),'sigma'])
WinEnv = TrueSkill(mu=oneMu, sigma=oneSig, draw_probability=0.05, backend=None)
WinEnv.make_as_global()
# print(Rating())
r1=Rating()
RunEnv = TrueSkill(mu=twoMu, sigma=twoSig, draw_probability=0.05, backend=None)
RunEnv.make_as_global()
# print(Rating())
r2=Rating()
ThrEnv = TrueSkill(mu=thrMu, sigma=thrSig, draw_probability=0.05, backend=None)
ThrEnv.make_as_global()
# print(Rating())
r3=Rating()
FrEnv = TrueSkill(mu=frMu, sigma=frSig, draw_probability=0.05, backend=None)
FrEnv.make_as_global()
# print(Rating())
r4=Rating()
self.ratings.append(r1)
self.ratings.append(r2)
self.ratings.append(r3)
self.ratings.append(r4)
def Play(self):
# 2p: game between 2 players and only one winner
if int(self.response) == 2 and int(self.nowinners[0]) == 1:
new_r1, new_r2 = rate_1vs1(self.ratings[0],self.ratings[1])
print(new_r1)
print(new_r2)
# make rating object a string
k1a = str(new_r1)
k2a = str(new_r2)
# New sigmas
newr1sig = k1a[k1a.index("sigma=") + len("sigma="):][0:-1]
newr2sig = k2a[k2a.index("sigma=") + len("sigma="):][0:-1]
self.newsigmas.append(newr1sig)
self.newsigmas.append(newr2sig)
# New Means truncated to support those below 100
newr1mean = k1a[20:26]
newr2mean = k2a[20:26]
self.newmeans.append(newr1mean)
self.newmeans.append(newr2mean)
print(newr1mean)
print(newr2mean)
# 2p: game between 2 players and two winners (draw)
if int(self.response) == 2 and int(self.nowinners[0]) == 2:
new_r1, new_r2 = rate_1vs1(self.ratings[0],self.ratings[1], drawn=True)
print(new_r1)
print(new_r2)
# leaderboard = sorted(self.ratings, key=env.expose, reverse=True)
# make rating object a string
k1a = str(new_r1)
k2a = str(new_r2)
# New sigmas
newr1sig = k1a[k1a.index("sigma=") + len("sigma="):][0:-1]
newr2sig = k2a[k2a.index("sigma=") + len("sigma="):][0:-1]
self.newsigmas.append(newr1sig)
self.newsigmas.append(newr2sig)
# New Means truncated to support those below 100
newr1mean = k1a[20:26]
newr2mean = k2a[20:26]
self.newmeans.append(newr1mean)
self.newmeans.append(newr2mean)
# 3p: game between 3 players with two winners (draws)
if int(self.response) == 3 and int(self.nowinners[0]) == 2:
(new_r1,), (new_r2,), (new_r3,) = rate([(self.ratings[0],),(self.ratings[1],),(self.ratings[2],)], ranks=(0,0,1))
print(new_r1)
print(new_r2)
print(new_r3)
# make rating object a string
k1b = str(new_r1)
k2b = str(new_r2)
k3b = str(new_r3)
# New sigmas using an index to get the number after sigma in the string of the rating.
newr1sig = k1b[k1b.index("sigma=") + len("sigma="):][0:-1]
newr2sig = k2b[k2b.index("sigma=") + len("sigma="):][0:-1]
newr3sig = k3b[k2b.index("sigma=") + len("sigma="):][0:-1]
self.newsigmas.append(newr1sig)
self.newsigmas.append(newr2sig)
self.newsigmas.append(newr3sig)
# New Means truncated to support those below 100
newr1mean = k1b[20:26]
newr2mean = k2b[20:26]
newr3mean = k3b[20:26]
self.newmeans.append(newr1mean)
self.newmeans.append(newr2mean)
self.newmeans.append(newr3mean)
# 3p: game between 3 players no draws
if int(self.response) == 3 and int(self.nowinners[0]) == 1 and int(self.norunners[0]) == 1:
(new_r1,), (new_r2,), (new_r3,) = rate([(self.ratings[0],),(self.ratings[1],),(self.ratings[2],)], ranks=(0,1,2))
print(new_r1)
print(new_r2)
print(new_r3)
# make rating object a string
k1b = str(new_r1)
k2b = str(new_r2)
k3b = str(new_r3)
# New sigmas
newr1sig = k1b[k1b.index("sigma=") + len("sigma="):][0:-1]
newr2sig = k2b[k2b.index("sigma=") + len("sigma="):][0:-1]
newr3sig = k3b[k3b.index("sigma=") + len("sigma="):][0:-1]
self.newsigmas.append(newr1sig)
self.newsigmas.append(newr2sig)
self.newsigmas.append(newr3sig)
# New Means truncated to support those below 100
newr1mean = k1b[20:26]
newr2mean = k2b[20:26]
newr3mean = k3b[20:26]
self.newmeans.append(newr1mean)
self.newmeans.append(newr2mean)
self.newmeans.append(newr3mean)
# 3p: one winner two runners up in 3p
if int(self.response) == 3 and int(self.nowinners[0]) == 1 and int(self.norunners[0]) == 2:
(new_r1,), (new_r2,), (new_r3,) = rate([(self.ratings[0],),(self.ratings[1],),(self.ratings[2],)], ranks=(0,1,1))
print(new_r1)
print(new_r2)
print(new_r3)
# make rating object a string
k1b = str(new_r1)
k2b = str(new_r2)
k3b = str(new_r3)
# New sigmas
newr1sig = k1b[k1b.index("sigma=") + len("sigma="):][0:-1]
newr2sig = k2b[k2b.index("sigma=") + len("sigma="):][0:-1]
newr3sig = k3b[k3b.index("sigma=") + len("sigma="):][0:-1]
self.newsigmas.append(newr1sig)
self.newsigmas.append(newr2sig)
self.newsigmas.append(newr3sig)
# New Means truncated to support those below 100
newr1mean = k1b[20:26]
newr2mean = k2b[20:26]
newr3mean = k3b[20:26]
self.newmeans.append(newr1mean)
self.newmeans.append(newr2mean)
self.newmeans.append(newr3mean)
# 4p:
if int(self.response) == 4:
(new_r1,), (new_r2,), (new_r3,), (new_r4,) = rate([(self.ratings[0],),(self.ratings[1],),(self.ratings[2],),(self.ratings[3],)], ranks=(0,1,2,3))
print(new_r1)
print(new_r2)
print(new_r3)
print(new_r4)
# make rating object a string
k1c = str(new_r1)
k2c = str(new_r2)
k3c = str(new_r3)
k4c = str(new_r4)
# New sigmas
newr1sig = k1c[k1c.index("sigma=") + len("sigma="):][0:-1]
newr2sig = k2c[k2c.index("sigma=") + len("sigma="):][0:-1]
newr3sig = k3c[k3c.index("sigma=") + len("sigma="):][0:-1]
newr4sig = k4c[k4c.index("sigma=") + len("sigma="):][0:-1]
self.newsigmas.append(newr1sig)
self.newsigmas.append(newr2sig)
self.newsigmas.append(newr3sig)
self.newsigmas.append(newr4sig)
# New Means truncated to support those below 100
newr1mean = k1c[20:26]
newr2mean = k2c[20:26]
newr3mean = k3c[20:26]
newr4mean = k4c[20:26]
self.newmeans.append(newr1mean)
self.newmeans.append(newr2mean)
self.newmeans.append(newr3mean)
self.newmeans.append(newr4mean)
def UpdateFrame(self):
# Three player Scenario: Counting Plays, Draws, Losses and Wins. SOMETHING WRONG WITH WIN COUNTING three player
if int(self.response) == 3:
# print(playerdf)
print(self.active[0])
# everyone gets a played count
self.playerdf.loc[int(self.active[0]), 'played'] = int(self.playerdf.loc[int(self.active[0]), 'played']) + 1
self.playerdf.loc[int(self.active[1]), 'played'] = int(self.playerdf.loc[int(self.active[1]), 'played']) + 1
self.playerdf.loc[int(self.active[2]), 'played'] = int(self.playerdf.loc[int(self.active[2]), 'played']) + 1
self.playerdf.loc[int(self.active[0]), 'sigma'] = self.newsigmas[0]
self.playerdf.loc[int(self.active[1]), 'sigma'] = self.newsigmas[1]
self.playerdf.loc[int(self.active[2]), 'sigma'] = self.newsigmas[2]
self.playerdf.loc[int(self.active[0]), 'mu'] = self.newmeans[0]
self.playerdf.loc[int(self.active[1]), 'mu'] = self.newmeans[1]
self.playerdf.loc[int(self.active[2]), 'mu'] = self.newmeans[2]
self.playerdf.loc[int(self.active[0]), 'LastGame'] = todaytime
self.playerdf.loc[int(self.active[1]), 'LastGame'] = todaytime
self.playerdf.loc[int(self.active[2]), 'LastGame'] = todaytime
# p1/p2 joint win (draw and a win for p1 and p2, loss for p3)
if int(self.response) == 3 and int(self.nowinners[0]) == 2:
self.playerdf.loc[int(self.active[0]), 'draws'] = int(self.playerdf.loc[int(self.active[0]), 'draws']) + 1
self.playerdf.loc[int(self.active[1]), 'draws'] = int(self.playerdf.loc[int(self.active[1]), 'draws']) + 1
self.playerdf.loc[int(self.active[0]), 'wins'] = int(self.playerdf.loc[int(self.active[0]), 'wins']) + 1
self.playerdf.loc[int(self.active[1]), 'wins'] = int(self.playerdf.loc[int(self.active[1]), 'wins']) + 1
self.playerdf.loc[int(self.active[2]), 'losses'] = int(self.playerdf.loc[int(self.active[2]), 'losses']) + 1
# p2/p3 joint loss (draw and a loss counted for p2/p3, win for p1)
if int(self.norunners[0]) == 2 and int(self.nowinners[0]) == 1:
self.playerdf.loc[int(self.active[0]), 'wins'] = int(self.playerdf.loc[int(self.active[0]), 'wins']) + 1
self.playerdf.loc[int(self.active[1]), 'draws'] = int(self.playerdf.loc[int(self.active[1]), 'draws']) + 1
self.playerdf.loc[int(self.active[2]), 'draws'] = int(self.playerdf.loc[int(self.active[2]), 'draws']) + 1
self.playerdf.loc[int(self.active[1]), 'losses'] = int(self.playerdf.loc[int(self.active[1]), 'losses']) + 1
self.playerdf.loc[int(self.active[2]), 'losses'] = int(self.playerdf.loc[int(self.active[2]), 'losses']) + 1
if int(self.norunners[0]) == 1 and int(self.nowinners[0]) == 1:
self.playerdf.loc[int(self.active[0]), 'wins'] = int(self.playerdf.loc[int(self.active[0]), 'wins']) + 1
self.playerdf.loc[int(self.active[2]), 'losses'] = int(self.playerdf.loc[int(self.active[2]), 'losses']) + 1
self.db.log.info(self.playerdf.to_html())
self.save_db_table(self.playerdf)
# Two player scenario: Counting Plays, Draws, Losses and Wins.
if int(self.response) == 2:
# print(self.playerdf)
print(self.active[0])
self.playerdf.loc[int(self.active[0]), 'played'] = int(self.playerdf.loc[int(self.active[0]), 'played']) + 1
self.playerdf.loc[int(self.active[1]), 'played'] = int(self.playerdf.loc[int(self.active[1]), 'played']) + 1
self.playerdf.loc[int(self.active[0]), 'sigma'] = self.newsigmas[0]
self.playerdf.loc[int(self.active[1]), 'sigma'] = self.newsigmas[1]
self.playerdf.loc[int(self.active[0]), 'mu'] = self.newmeans[0]
self.playerdf.loc[int(self.active[1]), 'mu'] = self.newmeans[1]
self.playerdf.loc[int(self.active[0]), 'LastGame'] = todaytime
self.playerdf.loc[int(self.active[1]), 'LastGame'] = todaytime
if int(self.response) == 2 and int(self.nowinners[0]) == 2:
self.playerdf.loc[int(self.active[0]), 'draws'] = int(self.playerdf.loc[int(self.active[0]), 'draws']) + 1
self.playerdf.loc[int(self.active[1]), 'draws'] = int(self.playerdf.loc[int(self.active[1]), 'draws']) + 1
elif int(self.nowinners[0]) == 1:
self.playerdf.loc[int(self.active[0]), 'wins'] = int(self.playerdf.loc[int(self.active[0]), 'wins']) + 1
self.playerdf.loc[int(self.active[1]), 'losses'] = int(self.playerdf.loc[int(self.active[1]), 'losses']) + 1
# 2/3 draw
self.db.log.info(self.playerdf.to_html())
self.save_db_table(self.playerdf)
if int(self.response) == 4:
# self.db.log.info(self.playerdf.to_html())
print(self.active[0])
self.playerdf.loc[int(self.active[0]), 'played'] = int(self.playerdf.loc[int(self.active[0]), 'played']) + 1
self.playerdf.loc[int(self.active[1]), 'played'] = int(self.playerdf.loc[int(self.active[1]), 'played']) + 1
self.playerdf.loc[int(self.active[2]), 'played'] = int(self.playerdf.loc[int(self.active[2]), 'played']) + 1
self.playerdf.loc[int(self.active[3]), 'played'] = int(self.playerdf.loc[int(self.active[3]), 'played']) + 1
self.playerdf.loc[int(self.active[0]), 'sigma'] = self.newsigmas[0]
self.playerdf.loc[int(self.active[1]), 'sigma'] = self.newsigmas[1]
self.playerdf.loc[int(self.active[2]), 'sigma'] = self.newsigmas[2]
self.playerdf.loc[int(self.active[3]), 'sigma'] = self.newsigmas[3]
self.playerdf.loc[int(self.active[0]), 'mu'] = self.newmeans[0]
self.playerdf.loc[int(self.active[1]), 'mu'] = self.newmeans[1]
self.playerdf.loc[int(self.active[2]), 'mu'] = self.newmeans[2]
self.playerdf.loc[int(self.active[3]), 'mu'] = self.newmeans[3]
self.playerdf.loc[int(self.active[0]), 'LastGame'] = todaytime
self.playerdf.loc[int(self.active[1]), 'LastGame'] = todaytime
self.playerdf.loc[int(self.active[2]), 'LastGame'] = todaytime
self.playerdf.loc[int(self.active[3]), 'LastGame'] = todaytime
self.db.log.info(self.playerdf.to_html())
self.save_db_table(self.playerdf)
return self.playerdf
# print('{:.1%} chance to draw'.format(quality_1vs1(r1, r2)))
| StarcoderdataPython |
3210453 | import threadsafe_tkinter as tk
import tkinter.ttk as ttk
from copy import deepcopy
from traceback import format_exc
from binilla import editor_constants as e_c
from binilla.widgets.scroll_menu import ScrollMenu
from binilla.widgets.field_widgets import field_widget, container_frame,\
data_frame
class ArrayFrame(container_frame.ContainerFrame):
'''Used for array nodes. Displays a single element in
the ArrayBlock represented by it, and contains a combobox
for selecting which array element is displayed.'''
sel_index = -1
sel_menu = None
populated = False
option_cache = None
options_sane = False
def __init__(self, *args, **kwargs):
kwargs.update(relief='flat', bd=0, highlightthickness=0,
bg=self.default_bg_color)
field_widget.FieldWidget.__init__(self, *args, **kwargs)
tk.Frame.__init__(self, *args, **e_c.fix_kwargs(**kwargs))
show_frame = bool(kwargs.pop('show_frame', not self.blocks_start_hidden))
if self.is_empty and self.hide_if_blank:
show_frame = False
self.show = tk.BooleanVar()
self.show.set(show_frame)
self.options_sane = False
node_len = 0
try: node_len = len(self.node)
except Exception: pass
self.sel_index = (node_len > 0) - 1
# make the title, element menu, and all the buttons
self.controls = tk.Frame(self, relief='raised', bd=self.frame_depth)
self.title = title = tk.Frame(self.controls, relief='flat', bd=0)
self.buttons = buttons = tk.Frame(self.controls, relief='flat', bd=0)
toggle_text = '-' if show_frame else '+'
self.title_label = tk.Label(
title, text=self.gui_name, justify='left', anchor='w',
width=self.title_size, font=self.get_font("frame_title"),
disabledforeground=self.text_disabled_color)
self.title_label.font_type = "frame_title"
self.show_btn = ttk.Checkbutton(
title, width=3, text=toggle_text, command=self.toggle_visible,
style='ShowButton.TButton')
self.sel_menu = ScrollMenu(
title, f_widget_parent=self,
sel_index=self.sel_index, max_index=node_len-1,
option_getter=self.get_options, callback=self.select_option)
self.shift_up_btn = ttk.Button(
title, width=7, text='Shift ▲',
command=self.shift_entry_up)
self.shift_down_btn = ttk.Button(
buttons, width=7, text='Shift ▼',
command=self.shift_entry_down)
self.add_btn = ttk.Button(
buttons, width=4, text='Add',
command=self.add_entry)
self.insert_btn = ttk.Button(
buttons, width=6, text='Insert',
command=self.insert_entry)
self.duplicate_btn = ttk.Button(
buttons, width=9, text='Duplicate',
command=self.duplicate_entry)
self.delete_btn = ttk.Button(
buttons, width=6, text='Delete',
command=self.delete_entry)
self.delete_all_btn = ttk.Button(
buttons, width=10, text='Delete all',
command=self.delete_all_entries)
self.import_btn = ttk.Button(
buttons, width=6, text='Import',
command=self.import_node)
self.export_btn = ttk.Button(
buttons, width=6, text='Export',
command=self.export_node)
# pack the title, menu, and all the buttons
for w in (self.shift_down_btn, self.export_btn, self.import_btn,
self.delete_all_btn, self.delete_btn, self.duplicate_btn,
self.insert_btn, self.add_btn):
w.pack(side="right", padx=(0, 4), pady=(2, 2))
self.show_btn.pack(side="left")
if self.gui_name != '':
self.title_label.pack(side="left", fill="x", expand=True)
self.sel_menu.pack(side="left", fill="x", expand=True, padx=(0, 4))
self.shift_up_btn.pack(side="right", padx=(0, 1), pady=(2, 2))
self.title.pack(fill="x", expand=True, padx=0)
self.buttons.pack(fill="x", expand=True, padx=0)
self.controls.pack(fill="x", expand=True, padx=0)
self.populate()
self._initialized = True
@property
def is_empty(self):
if getattr(self, "node", None) is None:
return True
return len(self.node) == 0
def load_node_data(self, parent, node, attr_index, desc=None):
field_widget.FieldWidget.load_node_data(
self, parent, node, attr_index, desc)
sub_node = attr_index = None
if self.node:
attr_index = self.sel_index
if attr_index in range(len(self.node)):
sub_node = self.node[attr_index]
else:
attr_index = len(self.node) - 1
if attr_index < 0:
attr_index = None
if self.sel_menu is not None:
self.options_sane = self.sel_menu.options_menu_sane = False
for wid in self.f_widgets:
# there must be only one entry in self.f_widgets
w = self.f_widgets[wid]
if w.load_node_data(self.node, sub_node, attr_index):
return True
return False
def unload_node_data(self):
self.sel_menu.update_label(" ")
container_frame.ContainerFrame.unload_node_data(self)
def set_disabled(self, disable=True):
disable = disable or not self.editable
if self.node is None and not disable:
return
if getattr(self, "sel_menu", None):
self.sel_menu.set_disabled(disable)
if bool(disable) == self.disabled:
pass
elif not disable:
self.set_all_buttons_disabled(False)
self.disable_unusable_buttons()
else:
new_state = tk.DISABLED if disable else tk.NORMAL
for w in (self.shift_up_btn, self.shift_down_btn,
self.add_btn, self.insert_btn, self.duplicate_btn,
self.delete_btn, self.delete_all_btn):
if w:
w.config(state=new_state)
container_frame.ContainerFrame.set_disabled(self, disable)
def apply_style(self, seen=None):
container_frame.ContainerFrame.apply_style(self, seen)
self.controls.config(bd=self.frame_depth, bg=self.frame_bg_color)
self.title.config(bg=self.frame_bg_color)
self.title_label.config(bg=self.frame_bg_color)
self.buttons.config(bg=self.frame_bg_color)
#if self.show.get():
# self.pose_fields()
def destroy(self):
# These will linger and take up RAM, even if the widget is destroyed.
# Need to remove the references manually
self.option_cache = None
container_frame.ContainerFrame.destroy(self)
def export_node(self):
try:
# pass call to the export_node method of the array entry's widget
w = self.f_widgets[self.f_widget_ids[0]]
except Exception:
return
w.export_node()
def import_node(self):
try:
# pass call to the import_node method of the array entry's widget
w = self.f_widgets[self.f_widget_ids[0]]
except Exception:
return
w.import_node()
def get_options(self, opt_index=None):
'''
Returns a list of the option strings sorted by option index.
'''
if (self.option_cache is None or not self.options_sane or
opt_index is not None):
result = self.generate_options(opt_index)
if opt_index is not None:
return result
if opt_index is None:
return self.option_cache
elif opt_index == e_c.ACTIVE_ENUM_NAME:
opt_index = self.sel_index
if opt_index < 0: opt_index = -1
return self.option_cache.get(opt_index)
def generate_options(self, opt_index=None):
# sort the options by value(values are integers)
options = {i: n for n, i in self.desc.get('NAME_MAP', {}).items()}
if self.node:
node, desc = self.node, self.desc
sub_desc = desc['SUB_STRUCT']
def_struct_name = sub_desc['NAME']
if self.use_gui_names and 'GUI_NAME' in sub_desc:
def_struct_name = sub_desc['GUI_NAME']
options_to_generate = range(len(node))
if opt_index is not None:
options_to_generate = (
(opt_index, ) if opt_index in options_to_generate else ())
for i in options_to_generate:
if i in options:
continue
sub_node = node[i]
if not hasattr(sub_node, 'desc'):
continue
sub_desc = sub_node.desc
sub_struct_name = sub_desc.get('GUI_NAME', sub_desc['NAME'])
if sub_struct_name == def_struct_name:
continue
options[i] = sub_struct_name
if opt_index is None:
self.options_sane = True
self.option_cache = options
if self.sel_menu is not None:
self.sel_menu.options_menu_sane = False
self.sel_menu.max_index = len(node) - 1
return options
return options.get(opt_index, None)
def set_shift_up_disabled(self, disable=True):
'''
Disables the move up button if disable is True. Enables it if not.
'''
if disable: self.shift_up_btn.config(state="disabled")
else: self.shift_up_btn.config(state="normal")
def set_shift_down_disabled(self, disable=True):
'''
Disables the move down button if disable is True. Enables it if not.
'''
if disable: self.shift_down_btn.config(state="disabled")
else: self.shift_down_btn.config(state="normal")
def set_add_disabled(self, disable=True):
'''Disables the add button if disable is True. Enables it if not.'''
if disable: self.add_btn.config(state="disabled")
else: self.add_btn.config(state="normal")
def set_insert_disabled(self, disable=True):
'''Disables the insert button if disable is True. Enables it if not.'''
if disable: self.insert_btn.config(state="disabled")
else: self.insert_btn.config(state="normal")
def set_duplicate_disabled(self, disable=True):
'''
Disables the duplicate button if disable is True. Enables it if not.
'''
if disable: self.duplicate_btn.config(state="disabled")
else: self.duplicate_btn.config(state="normal")
def set_delete_disabled(self, disable=True):
'''Disables the delete button if disable is True. Enables it if not.'''
if disable: self.delete_btn.config(state="disabled")
else: self.delete_btn.config(state="normal")
def set_delete_all_disabled(self, disable=True):
'''
Disables the delete_all button if disable is True. Enables it if not.
'''
if disable: self.delete_all_btn.config(state="disabled")
else: self.delete_all_btn.config(state="normal")
def edit_apply(self=None, *, edit_state, undo=True):
state = edit_state
edit_type = state.edit_type
i = state.attr_index
undo_node = state.undo_node
redo_node = state.redo_node
edit_info = state.edit_info
sel_index = edit_info.get('sel_index', 0)
w, node = field_widget.FieldWidget.get_widget_and_node(
nodepath=state.nodepath, tag_window=state.tag_window)
if edit_type == 'shift_up':
node[i], node[i - 1] = node[i - 1], node[i]
elif edit_type == 'shift_down':
node[i], node[i + 1] = node[i + 1], node[i]
elif edit_type in ('add', 'insert', 'duplicate'):
if undo:
sel_index = None
node.pop(i)
else:
node.insert(i, redo_node)
elif edit_type == 'delete':
if undo:
node.insert(i, undo_node)
else:
sel_index = None
node.pop(i)
elif edit_type == 'delete_all':
if undo:
node[:] = undo_node
else:
del node[:]
sel_index = None
else:
raise TypeError('Unknown edit_state type')
if w is not None:
try:
if w.desc is not state.desc:
return
if sel_index is None:
pass
elif edit_type in ('add', 'insert', 'duplicate', 'delete'):
w.sel_index = sel_index
elif edit_type in ('shift_up', 'shift_down'):
w.sel_index = sel_index
if undo:
pass
elif 'down' in edit_type:
w.sel_index += 1
else:
w.sel_index -= 1
max_index = len(node) - 1
w.sel_menu.max_index = max_index
w.options_sane = w.sel_menu.options_menu_sane = False
if w.sel_index < 0:
w.select_option(0, force=True)
elif w.sel_index > max_index:
w.select_option(max_index, force=True)
else:
w.select_option(w.sel_index, force=True)
w.needs_flushing = False
w.set_edited()
except Exception:
print(format_exc())
def edit_create(self, **kwargs):
# add own stuff
kwargs.setdefault("sel_index", self.sel_index)
field_widget.FieldWidget.edit_create(self, **kwargs)
def shift_entry_up(self):
if not hasattr(self.node, '__len__') or len(self.node) < 2:
return
node = self.node
index = self.sel_index
if index <= 0:
return
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
self.edit_create(edit_type='shift_up', attr_index=index)
node[index], node[index - 1] = node[index - 1], node[index]
self.sel_index = self.sel_menu.sel_index = index - 1
self.options_sane = self.sel_menu.options_menu_sane = False
self.sel_menu.update_label()
def shift_entry_down(self):
if not hasattr(self.node, '__len__') or len(self.node) < 2:
return
node = self.node
index = self.sel_index
if index >= len(node) - 1:
return
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
self.edit_create(edit_type='shift_down', attr_index=index)
node[index], node[index + 1] = node[index + 1], node[index]
self.sel_index = self.sel_menu.sel_index = index + 1
self.options_sane = self.sel_menu.options_menu_sane = False
self.sel_menu.update_label()
def add_entry(self):
if not hasattr(self.node, '__len__'):
return
field_max = self.field_max
if field_max is not None and len(self.node) >= field_max:
if self.enforce_max:
return
attr_index = len(self.node)
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
self.node.append()
self.edit_create(edit_type='add', attr_index=attr_index,
redo_node=self.node[attr_index], sel_index=attr_index)
self.options_sane = self.sel_menu.options_menu_sane = False
self.set_all_buttons_disabled(self.disabled)
self.disable_unusable_buttons()
self.select_option(len(self.node) - 1, True)
def insert_entry(self):
if not hasattr(self.node, '__len__'):
return
field_max = self.field_max
if field_max is not None and len(self.node) >= field_max:
if self.enforce_max:
return
attr_index = self.sel_index = max(self.sel_index, 0)
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
self.node.insert(attr_index)
self.edit_create(edit_type='insert', attr_index=attr_index,
redo_node=self.node[attr_index], sel_index=attr_index)
self.options_sane = self.sel_menu.options_menu_sane = False
self.set_all_buttons_disabled(self.disabled)
self.disable_unusable_buttons()
self.select_option(attr_index, True) # select the new entry
def duplicate_entry(self):
if not hasattr(self.node, '__len__') or len(self.node) < 1:
return
field_max = self.field_max
if field_max is not None and len(self.node) >= field_max:
if self.enforce_max:
return
self.sel_index = self.sel_menu.sel_index = max(self.sel_index, 0)
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
new_subnode = deepcopy(self.node[self.sel_index])
attr_index = len(self.node)
self.edit_create(edit_type='duplicate', attr_index=attr_index,
redo_node=new_subnode, sel_index=attr_index)
self.node.append(new_subnode)
self.options_sane = self.sel_menu.options_menu_sane = False
self.set_all_buttons_disabled(self.disabled)
self.disable_unusable_buttons()
self.select_option(attr_index, True)
def delete_entry(self):
if not hasattr(self.node, '__len__') or len(self.node) == 0:
return
field_min = self.field_min
if field_min is None:
field_min = 0
if len(self.node) <= field_min:
if self.enforce_min:
return
if not len(self.node):
self.sel_menu.disable()
return
attr_index = max(self.sel_index, 0)
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
self.edit_create(edit_type='delete', undo_node=self.node[attr_index],
attr_index=attr_index, sel_index=attr_index)
del self.node[attr_index]
attr_index = max(-1, min(len(self.node) - 1, attr_index))
self.options_sane = self.sel_menu.options_menu_sane = False
self.select_option(attr_index, True)
self.set_all_buttons_disabled(self.disabled)
self.disable_unusable_buttons()
def delete_all_entries(self):
if not hasattr(self.node, '__len__') or len(self.node) == 0:
return
field_min = self.field_min
if field_min is None:
field_min = 0
if len(self.node) <= field_min:
if self.enforce_min:
return
if not len(self.node):
self.sel_menu.disable()
return
self.set_edited() # do this first so the TagWindow detects that
# the title needs to be updated with an asterisk
self.edit_create(edit_type='delete_all', undo_node=tuple(self.node[:]))
del self.node[:]
self.options_sane = self.sel_menu.options_menu_sane = False
self.set_all_buttons_disabled(self.disabled)
self.disable_unusable_buttons()
self.select_option(self.sel_index, True)
def set_all_buttons_disabled(self, disable=False):
for btn in (self.add_btn, self.insert_btn, self.duplicate_btn,
self.delete_btn, self.delete_all_btn,
self.import_btn, self.export_btn,
self.shift_up_btn, self.shift_down_btn):
if disable:
btn.config(state=tk.DISABLED)
else:
btn.config(state=tk.NORMAL)
def disable_unusable_buttons(self):
no_node = not hasattr(self.node, '__len__')
if no_node or len(self.node) < 2:
self.set_shift_up_disabled()
self.set_shift_down_disabled()
if no_node or (isinstance(self.desc.get('SIZE'), int)
and self.enforce_min):
self.set_add_disabled()
self.set_insert_disabled()
self.set_duplicate_disabled()
self.set_delete_disabled()
self.set_delete_all_disabled()
return
field_max = self.field_max
field_min = self.field_min
if field_min is None or field_min < 0: field_min = 0
enforce_min = self.enforce_min or field_min == 0
enforce_max = self.enforce_max
if field_max is not None and len(self.node) >= field_max and enforce_max:
self.set_add_disabled()
self.set_insert_disabled()
self.set_duplicate_disabled()
if len(self.node) <= field_min and (enforce_min or not self.node):
self.set_delete_disabled()
self.set_delete_all_disabled()
if not self.node:
self.set_export_disabled()
self.set_import_disabled()
self.set_duplicate_disabled()
def populate(self):
node = self.node
desc = self.desc
sub_node = None
sub_desc = desc['SUB_STRUCT']
if node and self.sel_index in range(len(node)):
sub_node = node[self.sel_index]
sub_desc = desc['SUB_STRUCT']
if hasattr(sub_node, 'desc'):
sub_desc = sub_node.desc
if self.content in (None, self):
self.content = tk.Frame(self, relief="sunken", bd=self.frame_depth,
bg=self.default_bg_color)
self.sel_menu.default_text = sub_desc.get(
'GUI_NAME', sub_desc.get('NAME', ""))
self.sel_menu.update_label()
self.disable_unusable_buttons()
rebuild = not bool(self.f_widgets)
if hasattr(node, '__len__') and len(node) == 0:
# disabling existing widgets
self.sel_index = -1
self.sel_menu.max_index = -1
if self.f_widgets:
self.unload_child_node_data()
else:
for w in self.f_widgets:
if getattr(w, "desc", None) != sub_desc:
rebuild = True
break
if rebuild:
# destroy existing widgets and make new ones
self.populated = False
self.f_widget_ids = []
self.f_widget_ids_map = {}
self.f_widget_ids_map_inv = {}
# destroy any child widgets of the content
for c in list(self.f_widgets.values()):
c.destroy()
for w in (self, self.content, self.title, self.title_label,
self.controls, self.buttons):
w.tooltip_string = self.desc.get('TOOLTIP')
self.display_comment(self.content)
widget_cls = self.widget_picker.get_widget(sub_desc)
try:
widget = widget_cls(
self.content, node=sub_node, parent=node,
show_title=False, dont_padx_fields=True,
attr_index=self.sel_index, tag_window=self.tag_window,
f_widget_parent=self, disabled=self.disabled)
except Exception:
print(format_exc())
widget = data_frame.NullFrame(
self.content, node=sub_node, parent=node,
show_title=False, dont_padx_fields=True,
attr_index=self.sel_index, tag_window=self.tag_window,
f_widget_parent=self, disabled=self.disabled)
wid = id(widget)
self.f_widget_ids.append(wid)
self.f_widget_ids_map[self.sel_index] = wid
self.f_widget_ids_map_inv[wid] = self.sel_index
self.populated = True
self.build_f_widget_cache()
# now that the field widgets are created, position them
if self.show.get():
self.pose_fields()
if self.node is None:
self.set_disabled(True)
else:
self.set_children_disabled(not self.node)
def reload(self):
'''Resupplies the nodes to the widgets which display them.'''
try:
node = self.node if self.node else ()
desc = self.desc
is_empty = len(node) == 0
field_max = self.field_max
field_min = self.field_min
if field_min is None: field_min = 0
self.set_all_buttons_disabled(self.disabled)
self.disable_unusable_buttons()
if is_empty:
self.sel_menu.sel_index = -1
self.sel_menu.max_index = -1
# if there is no index to select, destroy the content
if self.sel_index != -1:
self.sel_index = -1
self.unload_child_node_data()
else:
self.f_widget_ids_map = {}
self.f_widget_ids_map_inv = {}
self.sel_menu.sel_index = self.sel_index
self.sel_menu.max_index = len(node) - 1
sub_node = None
sub_desc = desc['SUB_STRUCT']
if node and self.sel_index in range(len(node)):
sub_node = node[self.sel_index]
sub_desc = desc['SUB_STRUCT']
if hasattr(sub_node, 'desc'):
sub_desc = sub_node.desc
for wid in self.f_widget_ids:
w = self.f_widgets[wid]
wid = id(w)
if node and self.sel_index not in range(len(node)):
# current selection index is invalid. call select_option
# to reset it to some valid option. Don't reload though,
# as we will be either reloading or repopulating below.
self.select_option(force=True, reload=False)
self.f_widget_ids_map[self.sel_index] = wid
self.f_widget_ids_map_inv[wid] = self.sel_index
# if the descriptors are different, gotta repopulate!
if w.load_node_data(node, sub_node, self.sel_index, sub_desc):
self.populate()
self.apply_style()
return
w.reload()
if w.desc.get("PORTABLE", True) and self.node:
self.set_import_disabled(False)
self.set_export_disabled(False)
else:
self.set_import_disabled()
self.set_export_disabled()
self.sel_menu.update_label()
if self.node is None:
self.set_disabled(True)
else:
self.set_children_disabled(not self.node)
except Exception:
print(format_exc())
def pose_fields(self):
# there should only be one wid in here, but for
# the sake of consistancy we'll loop over them.
for wid in self.f_widget_ids:
w = self.f_widgets[wid]
# by adding a fixed amount of padding, we fix a problem
# with difficult to predict padding based on nesting
w.pack(fill='x', side='top', expand=True,
padx=self.vertical_padx, pady=self.vertical_pady)
# if there are no children in the content, we need to
# pack in SOMETHING, update the idletasks, and then
# destroy that something to resize the content frame
if not self.f_widgets:
f = tk.Frame(self.content, width=0, height=0, bd=0)
f.pack()
self.content.update_idletasks()
f.destroy()
self.content.pack(fill='both', side='top', anchor='nw', expand=True)
def select_option(self, opt_index=None, force=False, reload=True):
node = self.node if self.node else ()
curr_index = self.sel_index
if opt_index is None:
opt_index = curr_index
if opt_index < 0:
opt_index = 0
if opt_index == curr_index and not force:
return
elif not node:
opt_index = -1
elif opt_index not in range(len(node)):
opt_index = len(node) - 1
# flush any lingering changes
self.flush()
self.sel_index = opt_index
self.sel_menu.sel_index = opt_index
self.sel_menu.max_index = len(node) - 1
if reload:
self.reload()
self.sel_menu.update_label()
@property
def visible_field_count(self):
# array frames only display one item at a time
return 1
class DynamicArrayFrame(ArrayFrame):
def __init__(self, *args, **kwargs):
ArrayFrame.__init__(self, *args, **kwargs)
self.sel_menu.bind('<FocusIn>', self.flag_sanity_change)
self.sel_menu.arrow_button.bind('<FocusIn>', self.flag_sanity_change)
self.sel_menu.options_volatile = 'DYN_NAME_PATH' in self.desc
def generate_dynamic_options(self, options, options_to_generate):
node, desc = self.node, self.desc
dyn_name_path = desc.get('DYN_NAME_PATH')
if dyn_name_path:
try:
for i in options_to_generate:
name = str(node[i].get_neighbor(dyn_name_path))
if name:
options[i] = name.split('\n')[0]
except Exception:
pass
def generate_options(self, opt_index=None):
node, desc = self.node, self.desc
if node is None:
if opt_index is None:
return options
return ""
options = {}
options_to_generate = range(len(node))
if opt_index is not None:
options_to_generate = (
(opt_index, ) if opt_index in options_to_generate else ())
if desc.get('DYN_NAME_PATH'):
try:
self.generate_dynamic_options(options, options_to_generate)
except Exception:
print(format_exc())
else:
# sort the options by value(values are integers)
options.update({i: n for n, i in
self.desc.get('NAME_MAP', {}).items()
if i not in options})
sub_desc = desc['SUB_STRUCT']
def_struct_name = sub_desc['NAME']
if self.use_gui_names and 'GUI_NAME' in sub_desc:
def_struct_name = sub_desc['GUI_NAME']
for i in options_to_generate:
if i in options:
continue
sub_node = node[i]
if not hasattr(sub_node, 'desc'):
continue
sub_struct_name = sub_node.desc['NAME']
if self.use_gui_names and 'GUI_NAME' in sub_node.desc:
sub_struct_name = sub_node.desc['GUI_NAME']
if sub_struct_name != def_struct_name:
options[i] = sub_struct_name
for i, v in options.items():
options[i] = '%s. %s' % (i, v)
if opt_index is None:
self.option_cache = options
self.options_sane = True
if self.sel_menu is not None:
self.sel_menu.options_menu_sane = False
self.sel_menu.max_index = len(node) - 1
return options
return options.get(opt_index, None)
def flag_sanity_change(self, e=None):
self.options_sane = self.sel_menu.options_menu_sane = (
not self.sel_menu.options_volatile)
| StarcoderdataPython |
3317350 | """
Codemonk link: https://www.hackerearth.com/practice/data-structures/trees/heapspriority-queues/practice-problems/algorithm/monk-and-champions-league/
Monk's favourite game is Football and his favourite club is "Manchester United". Manchester United has qualified for the
Champions League Final which is to be held at the Wembley Stadium in London. So, he decided to go there and watch his
favourite team play. After reaching the stadium, he saw that many people have lined up for the match tickets. He knows
that there are M rows in the stadium with different seating capacities. They may or may not be equal. The price of the
ticket depends on the row. If the row has K(always greater than 0) vacant seats, then the price of the ticket will be K
pounds(units of British Currency). Now, every football fan standing in the line will get a ticket one by one. Given the
seating capacities of different rows, find the maximum possible pounds that the club will gain with the help of the
ticket sales.
Input - Output:
The first line consists of M and N. M denotes the number of seating rows in the stadium
and N denotes the number of football fans waiting in the line to get a ticket for the match.
Next line consists of M space separated integers X[1],X[2],X[3].... X[M] where X[i] denotes
the number of empty seats initially in the ith row.
Print in a single line the maximum pounds the club will gain.
Sample input:
5 4 4
2 8 5 1
9 10 5 1
Sample Output:
5 4 1 3
"""
"""
Simple use of priority queues with heap implementation. Think about it.
Final complexity: O(N*logN)
"""
def add_to_heap(array, i):
if i % 2 == 0:
parent = i//2 - 1
else:
parent = i // 2
if parent >= 0:
if array[parent] < array[i]:
array[parent], array[i] = array[i], array[parent]
if parent != 0:
add_to_heap(array, parent)
def create_heap(array):
for i in range(len(array)):
add_to_heap(array, i)
return array
def maintain_heap(array):
max_index = 0
current_index = 0
left_child = 1
right_child = 2
while True:
if left_child < len(array):
if array[left_child] > array[max_index]:
max_index = left_child
if right_child < len(array):
if array[right_child] > array[max_index]:
max_index = right_child
if max_index != current_index:
array[max_index], array[current_index] = array[current_index], array[max_index]
current_index = max_index
left_child = 2*max_index + 1
right_child = 2*max_index + 2
else:
break
m, n = map(int, input().split())
seats = list(map(int, input().split()))
seats = create_heap(seats)
total = 0
for _ in range(n):
total += seats[0]
seats[0] -= 1
maintain_heap(seats)
print(total)
"""
Used the build in heapq to achieve the exact same goal with the same implementation. Note that instead of heapify we
used add_to_heap to build the priority queue, just because i was bored to implement heapify (not that it is harder).
Final complexity: O(N*logN)
"""
import heapq
m, n = map(int, input().split())
seats = list(map(int, input().split()))
for i in range(len(seats)):
seats[i] = -1 * seats[i]
heapq.heapify(seats)
total = 0
for _ in range(n):
total += seats[0]
seats[0] += 1
heapq.heapreplace(seats, seats[0])
print(-total)
| StarcoderdataPython |
3358398 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Tensorflow implementation of a LennardJones Potential for a single element.
This is a standalone module by design.
"""
import numpy as np
import tensorflow as tf
from dap.tf.neighborlist import get_neighbors_oneway
from ase.calculators.calculator import Calculator, all_changes
import ase.db
def get_Rij(positions, cell, mask, cutoff_radius):
"""Get distances to neighboring atoms with periodic boundary conditions.
The way this function works is it tiles space with unit cells to at least fill
a sphere with a radius of cutoff_radius. That means some atoms will be outside
the cutoff radius. Those are included in the results. Then we get distances to
all atoms in the tiled space. This is always the same number for every atom,
so we have consistent sized arrays.
This function is specific to the Lennard Jones potential as noted in the
comments below.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions
cutoff_radius: float
The cutoff_radius we want atoms within.
Returns:
A flattened array of distances to all the neighbors.
Notes:
One of the distances is equal to 0.0, which corresponds to Rii. This distance
is problematic for the gradients, which are undefined for these points. I have
not found a masking strategy to eliminate these points while keeping the
gradients besides the one used here. This is not an issue with other
potentials that don't have a 1/r form like this one does.
This code was adapted from:
Related: pydoc:pymatgen.core.lattice.Lattice.get_points_in_sphere
"""
with tf.name_scope("get_Rij"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
mask = tf.convert_to_tensor(mask, dtype=cell.dtype)
with tf.name_scope("get_offsets"):
# Next we get the reciprocal unit cell, which will be used to compute the
# unit cell offsets required to tile space inside the sphere.
inverse_cell = tf.matrix_inverse(cell)
fcoords = tf.mod(
tf.matmul(positions, inverse_cell), tf.ones_like(positions))
recp_len = tf.norm(inverse_cell, axis=0)
nmax = cutoff_radius * recp_len
mins = tf.reduce_min(tf.floor(fcoords - nmax), axis=0)
maxs = tf.reduce_max(tf.ceil(fcoords + nmax), axis=0)
# Now we generate a set of cell offsets. We start with the repeats in each
# unit cell direction.
arange = tf.range(mins[0], maxs[0])
brange = tf.range(mins[1], maxs[1])
crange = tf.range(mins[2], maxs[2])
# Then we expand them in each dimension
xhat = tf.constant([1.0, 0.0, 0.0], dtype=inverse_cell.dtype)
yhat = tf.constant([0.0, 1.0, 0.0], dtype=inverse_cell.dtype)
zhat = tf.constant([0.0, 0.0, 1.0], dtype=inverse_cell.dtype)
arange = arange[:, None] * xhat[None, :]
brange = brange[:, None] * yhat[None, :]
crange = crange[:, None] * zhat[None, :]
# And combine them to get an offset vector for each cell
offsets = (
arange[:, None, None] + brange[None, :, None] + crange[None, None, :])
offsets = tf.reshape(offsets, (-1, 3))
# Now we have a vector of unit cell offsets (offset_index, 3) in the inverse
# unit cell basis. We convert that to cartesian coordinate offsets here.
cart_offsets = tf.matmul(offsets, cell)
# we need to offset each atom coordinate by each offset.
# This array is (atom_index, offset, 3)
shifted_cart_coords = positions[:, None] + cart_offsets[None, :]
# Next, we subtract each position from the array of positions.
# This leads to (atom_i, atom_j, positionvector, xhat)
relative_positions = shifted_cart_coords - positions[:, None, None]
# This is the distance squared. This leads to (atom_i, atom_j, distance2)
Rij2 = tf.reduce_sum(relative_positions**2, axis=3)
# We zero out masked distances. This is subtle. We have to zero out parts of
# two dimensions. First, all the entries in the first dimension which are
# not atoms must be zeroed, and then, all the entries in the second
# dimension which aren't atoms have to be zeroed.
Rij2 *= mask[:, None] * mask[:, None, None]
# Since we assume the atoms are all the same we can flatten it. It turns out
# that the array will get flattened anyway because of the boolean mask in
# the return. This effectively removes elements in some of the subarrays so
# the shape is no longer constant, causing the array to be flattened.
Rij2 = tf.reshape(Rij2, [-1])
# We exclude the self-interaction by only considering atoms with a distance
# greater than 0. For this potential, it is necessary to do this here to
# avoid nan's in the gradients.
#
# It is not necessary to take the square root here, since we later compute
# 1/Rij^6. But, this function was originally intended to be used for other
# potentials where Rij is used directly, so we do that here.
#
# We do not mask out the values greater than cutoff_radius here. That is
# done later in the energy function.
return tf.sqrt(tf.boolean_mask(Rij2, Rij2 > 0.0))
def energy(positions, cell, mask=None, strain=None):
"""Compute the energy of a Lennard-Jones system.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions.
strain: array-like shape=(3, 3)
Array of strains to compute the energy at.
Returns: float
The total energy from the Lennard Jones potential.
"""
with tf.name_scope("LennardJones"):
with tf.name_scope("setup"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if mask is None:
mask = tf.ones_like(positions[:, 0])
mask = tf.convert_to_tensor(mask)
if strain is None:
strain = tf.zeros_like(cell)
strain = tf.convert_to_tensor(strain)
strained_cell = tf.matmul(cell, tf.eye(3, dtype=cell.dtype) + strain)
strained_positions = tf.matmul(positions,
tf.eye(3, dtype=cell.dtype) + strain)
with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
sigma = tf.get_variable(
"sigma",
dtype=cell.dtype,
initializer=tf.constant(1.0, dtype=cell.dtype))
with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
epsilon = tf.get_variable(
"epsilon",
dtype=cell.dtype,
initializer=tf.constant(1.0, dtype=cell.dtype))
rc = 3 * sigma
with tf.name_scope("calculate_energy"):
e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
energy = 0.0
d = get_Rij(strained_positions, strained_cell, mask, rc)
neighbor_mask = tf.less_equal(d, tf.ones_like(d) * rc)
energy -= e0 * tf.reduce_sum(tf.cast(neighbor_mask, e0.dtype))
c6 = (sigma**2 / tf.boolean_mask(d, neighbor_mask)**2)**3
c12 = c6**2
energy += tf.reduce_sum(4 * epsilon * (c12 - c6))
return energy / 2.0
def forces(positions, cell, mask=None, strain=None):
"""Compute the forces.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions.
strain: array-like shape=(3, 3)
Array of strains to compute the energy at.
Returns:
array: shape=(natoms, 3)
"""
with tf.name_scope("forces"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if mask is None:
mask = tf.ones_like(positions[:, 0])
mask = tf.convert_to_tensor(mask)
if strain is None:
strain = tf.zeros_like(cell)
return tf.gradients(-energy(positions, cell, mask, strain), positions)[0]
def stress(positions, cell, mask=None, strain=None):
"""Compute the stress.
Args:
positions: array-like shape=(numatoms, 3)
Array of cartesian coordinates of atoms in a unit cell.
cell: array-like shape=(3, 3)
Array of unit cell vectors in cartesian basis. Each row is a unit cell
vector.
mask: array-like (numatoms,)
ones for atoms, zero for padded positions
strain: array-like shape=(3, 3)
Array of strains to compute the stress at.
Returns:
The stress components [sxx, syy, szz, syz, sxz, sxy]
array: shape=(6,)
"""
with tf.name_scope("stress"):
with tf.name_scope("setup"):
positions = tf.convert_to_tensor(positions)
cell = tf.convert_to_tensor(cell)
if mask is None:
mask = tf.ones_like(positions[:, 0])
mask = tf.convert_to_tensor(mask)
if strain is None:
strain = tf.zeros_like(cell)
with tf.name_scope("get_stress"):
volume = tf.abs(tf.matrix_determinant(cell))
stress = tf.gradients(energy(positions, cell, mask, strain), strain)[0]
stress /= volume
return tf.gather(tf.reshape(stress, (9,)), [0, 4, 8, 5, 2, 1])
def energy_batch(POSITIONS,
CELLS,
MASKS,
strain=np.zeros((3, 3), dtype=np.float64)):
"""A batched version of `energy'.
Args:
POSITIONS: array-like shape=(batch, maxnumatoms, 3)
batched array of position arrays. Each position array should be padded
if there are fewer atoms than maxnatoms.
CELLS: array-like shape=(batch, 3, 3)
MASKS: array-like shape=(batch, maxnatoms)
strain: array-like shape=(3, 3)
Array of strains to compute the stress at.
Returns:
energies: array-like shape=(batch,)
"""
return tf.convert_to_tensor([
energy(positions, cell, mask, strain)
for positions, cell, mask in zip(POSITIONS, CELLS, MASKS)
])
def forces_batch(POSITIONS,
CELLS,
MASKS,
strain=np.zeros((3, 3), dtype=np.float64)):
"""A batched version of `forces'.
Args:
POSITIONS: array-like shape=(batch, maxnumatoms, 3)
batched array of position arrays. Each position array should be padded
if there are fewer atoms than maxnatoms.
CELLS: array-like shape=(batch, 3, 3)
MASKS: array-like shape=(batch, maxnatoms)
strain: array-like shape=(3, 3)
Array of strains to compute the stress at.
Returns:
forces: array-like shape=(batch, maxnatoms, 3)
"""
return tf.convert_to_tensor([
forces(positions, cell, mask, strain)
for positions, cell, mask in zip(POSITIONS, CELLS, MASKS)
])
def stress_batch(POSITIONS,
CELLS,
MASKS,
strain=np.zeros((3, 3), dtype=np.float64)):
"""A batched version of `stress'.
Args:
POSITIONS: array-like shape=(batch, maxnumatoms, 3)
batched array of position arrays. Each position array should be padded
if there are fewer atoms than maxnatoms.
CELLS: array-like shape=(batch, 3, 3)
MASKS: array-like shape=(batch, maxnatoms)
strain: array-like shape=(3, 3)
Array of strains to compute the stress at.
Returns:
stresses: array-like shape=(batch, 6)"""
return tf.convert_to_tensor([
stress(positions, cell, mask, strain)
for positions, cell, mask in zip(POSITIONS, CELLS, MASKS)
])
# * One way list class
class LennardJones(Calculator):
"""A simple Tensorflow driven calculator.
"""
implemented_properties = ["energy", "forces", "stress"]
default_parameters = {"sigma": 1.0, "epsilon": 1.0}
def __init__(self, **kwargs):
Calculator.__init__(self, **kwargs)
self.sess = tf.Session()
with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
sigma = tf.get_variable(
"sigma",
dtype=tf.float64,
initializer=tf.constant(self.parameters.sigma, dtype=tf.float64))
with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
epsilon = tf.get_variable(
"epsilon",
dtype=tf.float64,
initializer=tf.constant(self.parameters.epsilon, dtype=tf.float64))
self.sigma = sigma
self.epsilon = epsilon
self._positions = tf.placeholder(dtype=tf.float64, shape=(None, 3))
self._cell = tf.placeholder(dtype=tf.float64, shape=(3, 3))
self._strain = tf.placeholder(dtype=tf.float64, shape=(3, 3))
with tf.name_scope("LennardJones"):
with tf.name_scope("setup"):
strain_tensor = tf.eye(3, dtype=self._cell.dtype) + self._strain
strained_cell = tf.matmul(self._cell, strain_tensor)
strained_positions = tf.matmul(self._positions, strain_tensor)
sigma = self.sigma
epsilon = self.epsilon
rc = 3 * sigma
with tf.name_scope("calculate_energy"):
e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
_energy = 0.0
inds, dists, displacements = get_neighbors_oneway(
strained_positions, strained_cell, rc)
m = dists < rc
m.set_shape([None])
r2 = tf.boolean_mask(dists, m)**2
c6 = (sigma**2 / r2)**3
c12 = c6**2
n = tf.ones_like(r2)
_energy -= tf.reduce_sum(e0 * n)
_energy += tf.reduce_sum(4 * epsilon * (c12 - c6))
self._energy = tf.identity(_energy, name='_energy')
with tf.name_scope("forces"):
f = tf.gradients(-self._energy, self._positions)[0]
self._forces = tf.identity(tf.convert_to_tensor(f), name='_forces')
with tf.name_scope("stress"):
with tf.name_scope("get_stress"):
volume = tf.abs(tf.matrix_determinant(self._cell))
g = tf.gradients(self._energy, self._strain)
stress = tf.convert_to_tensor(g[0])
stress /= volume
stress = tf.gather(tf.reshape(stress, (9,)), [0, 4, 8, 5, 2, 1])
self._stress = tf.identity(stress, name='_stress')
def calculate(self,
atoms=None,
properties=["energy"],
system_changes=all_changes):
"""Run the calculator.
You don't usually call this, it is usually called by methods on the Atoms.
"""
Calculator.calculate(self, atoms, properties, system_changes)
self.sess.run(tf.global_variables_initializer())
self.results["energy"] = self.sess.run(self._energy,
feed_dict={self._positions: atoms.positions,
self._cell: atoms.cell,
self._strain: np.zeros_like(atoms.cell)})
self.results["forces"] = self.sess.run(self._forces,
feed_dict={self._positions: atoms.positions,
self._cell: atoms.cell,
self._strain: np.zeros_like(atoms.cell)})
self.results["stress"] = self.sess.run(self._stress,
feed_dict={self._positions: atoms.positions,
self._cell: atoms.cell,
self._strain: np.zeros_like(atoms.cell)})
def save(self, label):
"Save the graph and variables."
saver = tf.train.Saver()
saver.save(self.sess, label)
def load(self, label):
"Load variables from label."
saver = tf.train.import_meta_graph(label + ".meta")
self.sess.run(saver.restore(self.sess, label + ".meta"))
g = tf.get_default_graph()
self.sigma = g.get_tensor_by_name("sigma:0")
self.epsilon = g.get_tensor_by_name("epsilon:0")
print(f'Loaded {self.sigma} and {self.epsilon}')
def train(self, label, dbfile, nepochs=10,
learning_rate=0.001,
shuffle=True, percenttest=0.1):
"""Train the potential against the data in a database.
Parameters
----------
label: string, used for saving the results.
db: the path to an ase database containing training examples.
shuffle: boolean, if True, shuffle the data.
percenttest: float, fraction of data to use only for testing
"""
with ase.db.connect(dbfile) as db:
data = [(row.toatoms(), row.energy) for row in db.select()]
if shuffle:
import random
random.shuffle(data)
N_train = int(len(data) * (1 - percenttest))
train_data = data[0:N_train]
test_data = data[N_train:]
known_energies = tf.placeholder(tf.float64, None)
tf_energies = tf.placeholder(tf.float64, None)
#loss = tf.reduce_mean(tf.square(tf_energies - known_energies))
#opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)
for i in range(nepochs):
for atoms, ke in train_data:
atoms.set_calculator(self)
te = atoms.get_calculator()._energy
_loss = self.sess.run([te])
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.