max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
30 Days of Code/Python/07 - Day 6 - Let's Review.py | srgeyK87/Hacker-Rank-30-days-challlenge | 275 | 6616351 | # ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/30-review-loop/problem
# Difficulty: Easy
# Max Score: 30
# Language: Python
# ========================
# Solution
# ========================
N = int(input())
for _ in range(N):
listChars = [char for char in input()]
word_1 = ""
word_2 = ""
check = True
for i in range(len(listChars)):
if check:
word_1 += listChars[i]
else:
word_2 += listChars[i]
check = not check
print(word_1, word_2)
| # ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/30-review-loop/problem
# Difficulty: Easy
# Max Score: 30
# Language: Python
# ========================
# Solution
# ========================
N = int(input())
for _ in range(N):
listChars = [char for char in input()]
word_1 = ""
word_2 = ""
check = True
for i in range(len(listChars)):
if check:
word_1 += listChars[i]
else:
word_2 += listChars[i]
check = not check
print(word_1, word_2)
| en | 0.677178 | # ======================== # Information # ======================== # Direct Link: https://www.hackerrank.com/challenges/30-review-loop/problem # Difficulty: Easy # Max Score: 30 # Language: Python # ======================== # Solution # ======================== | 3.510361 | 4 |
biograph/pymol.py | MachinLeninIC/biograph | 1 | 6616352 | <reponame>MachinLeninIC/biograph
import pymol
from pymol import cmd
from pymol.wizard import Wizard
from chempy import cpv
from pymol.cgo import *
import sys
def makePrimitive(cgo, name):
az = cmd.get('auto_zoom', quiet=1)
cmd.set('auto_zoom', 0, quiet=1)
cmd.load_cgo(cgo, name)
cmd.set('auto_zoom', az, quiet=1)
def point(p):
x, y, z = p
return [COLOR, 1, 1, 1, SPHERE, float(x), float(y), float(z), 0.5]
def line(p1, p2):
x1, y1, z1 = p1
x2, y2, z2 = p2
return [CYLINDER, float(x1), float(y1), float(z1), float(x2), float(y2), float(z2), 0.25, 1, 1, 1, 1, 1, 1]
def triangle(corner1, corner2, corner3, normal):
planeObj = []
planeObj.extend(point(corner1))
planeObj.extend(point(corner2))
planeObj.extend(point(corner3))
planeObj.extend(line(corner1, corner2))
planeObj.extend(line(corner2, corner3))
planeObj.extend(line(corner3, corner1))
planeObj.extend([COLOR, 0.8, 0.8, 0.8])
planeObj.extend([BEGIN, TRIANGLE_STRIP])
#planeObj.append(NORMAL)
#planeObj.extend(normal)
for corner in [corner1, corner2, corner3, corner1]:
planeObj.append(VERTEX)
planeObj.extend(corner)
planeObj.append(END)
return planeObj
def planeFromPoints(point1, point2, point3, facetSize):
corner1 = point1
corner2 = point2
corner3 = point3
normal = cpv.cross_product(corner1, corner2)
return triangle(corner1, corner2, corner3, normal)
class PlaneWizard(Wizard):
def __init__(self):
Wizard.__init__(self)
# some attributes to do with picking
self.pick_count = 0
self.object_count = 0
self.object_prefix = "pw"
# the plane facet size (the 'radius' of the section of plane we show)
self.facetSize = 5
self.selection_mode = cmd.get_setting_legacy("mouse_selection_mode")
cmd.set("mouse_selection_mode",0) # set selection mode to atomic
cmd.deselect()
def draw_triangle(self, point1, point2, point3):
plane = planeFromPoints(point1, point2, point3, self.facetSize)
planeName = "plane-%02d" % self.object_count
self.object_count += 1
makePrimitive(plane, planeName)
cmd.show("cgo", "plane*")
def gudhi_topology(self, pdb_name):
from biograph.protein import Protein
#from biograph.structure import Perseus
from itertools import combinations
print(pdb_name)
protein = Protein.fetch(pdb_name, base_path="/tmp")
protein.df = protein.df[~protein.df.coord.isnull()]
structure = protein.generate_structure(lambda row: row["full_id"][4][0] == "CA")
b3_step = structure.get_step_for_topology(topology=[1,0,0])
core = protein.structure.get_simplices_by_step(b3_step)
for i, tetrahedron in enumerate(core):
#time.sleep(1)
for face in combinations(tetrahedron, 3):
point1 = structure.points[face[0]]
point2 = structure.points[face[1]]
point3 = structure.points[face[2]]
self.draw_triangle(point1, point2, point3)
def reset(self):
cmd.delete(self.object_prefix + "*")
cmd.delete("sele*")
cmd.delete("_indicate*")
cmd.unpick()
cmd.refresh_wizard()
def delete_all(self):
cmd.delete("plane*")
def cleanup(self):
cmd.set("mouse_selection_mode",self.selection_mode) # restore selection mode
self.reset()
self.delete_all()
def get_prompt(self):
self.prompt = None
return self.prompt
def get_panel(self):
return [
[ 1, 'Plane Wizard',''],
[ 2, 'Delete All Planes' , 'cmd.get_wizard().delete_all()'],
]
# Running on PyMol
wiz = PlaneWizard()
# add arg
cmd.extend("delaunay", wiz.gudhi_topology)
# make this the active wizard
cmd.set_wizard(wiz)
| import pymol
from pymol import cmd
from pymol.wizard import Wizard
from chempy import cpv
from pymol.cgo import *
import sys
def makePrimitive(cgo, name):
az = cmd.get('auto_zoom', quiet=1)
cmd.set('auto_zoom', 0, quiet=1)
cmd.load_cgo(cgo, name)
cmd.set('auto_zoom', az, quiet=1)
def point(p):
x, y, z = p
return [COLOR, 1, 1, 1, SPHERE, float(x), float(y), float(z), 0.5]
def line(p1, p2):
x1, y1, z1 = p1
x2, y2, z2 = p2
return [CYLINDER, float(x1), float(y1), float(z1), float(x2), float(y2), float(z2), 0.25, 1, 1, 1, 1, 1, 1]
def triangle(corner1, corner2, corner3, normal):
planeObj = []
planeObj.extend(point(corner1))
planeObj.extend(point(corner2))
planeObj.extend(point(corner3))
planeObj.extend(line(corner1, corner2))
planeObj.extend(line(corner2, corner3))
planeObj.extend(line(corner3, corner1))
planeObj.extend([COLOR, 0.8, 0.8, 0.8])
planeObj.extend([BEGIN, TRIANGLE_STRIP])
#planeObj.append(NORMAL)
#planeObj.extend(normal)
for corner in [corner1, corner2, corner3, corner1]:
planeObj.append(VERTEX)
planeObj.extend(corner)
planeObj.append(END)
return planeObj
def planeFromPoints(point1, point2, point3, facetSize):
corner1 = point1
corner2 = point2
corner3 = point3
normal = cpv.cross_product(corner1, corner2)
return triangle(corner1, corner2, corner3, normal)
class PlaneWizard(Wizard):
def __init__(self):
Wizard.__init__(self)
# some attributes to do with picking
self.pick_count = 0
self.object_count = 0
self.object_prefix = "pw"
# the plane facet size (the 'radius' of the section of plane we show)
self.facetSize = 5
self.selection_mode = cmd.get_setting_legacy("mouse_selection_mode")
cmd.set("mouse_selection_mode",0) # set selection mode to atomic
cmd.deselect()
def draw_triangle(self, point1, point2, point3):
plane = planeFromPoints(point1, point2, point3, self.facetSize)
planeName = "plane-%02d" % self.object_count
self.object_count += 1
makePrimitive(plane, planeName)
cmd.show("cgo", "plane*")
def gudhi_topology(self, pdb_name):
from biograph.protein import Protein
#from biograph.structure import Perseus
from itertools import combinations
print(pdb_name)
protein = Protein.fetch(pdb_name, base_path="/tmp")
protein.df = protein.df[~protein.df.coord.isnull()]
structure = protein.generate_structure(lambda row: row["full_id"][4][0] == "CA")
b3_step = structure.get_step_for_topology(topology=[1,0,0])
core = protein.structure.get_simplices_by_step(b3_step)
for i, tetrahedron in enumerate(core):
#time.sleep(1)
for face in combinations(tetrahedron, 3):
point1 = structure.points[face[0]]
point2 = structure.points[face[1]]
point3 = structure.points[face[2]]
self.draw_triangle(point1, point2, point3)
def reset(self):
cmd.delete(self.object_prefix + "*")
cmd.delete("sele*")
cmd.delete("_indicate*")
cmd.unpick()
cmd.refresh_wizard()
def delete_all(self):
cmd.delete("plane*")
def cleanup(self):
cmd.set("mouse_selection_mode",self.selection_mode) # restore selection mode
self.reset()
self.delete_all()
def get_prompt(self):
self.prompt = None
return self.prompt
def get_panel(self):
return [
[ 1, 'Plane Wizard',''],
[ 2, 'Delete All Planes' , 'cmd.get_wizard().delete_all()'],
]
# Running on PyMol
wiz = PlaneWizard()
# add arg
cmd.extend("delaunay", wiz.gudhi_topology)
# make this the active wizard
cmd.set_wizard(wiz) | en | 0.661329 | #planeObj.append(NORMAL) #planeObj.extend(normal) # some attributes to do with picking # the plane facet size (the 'radius' of the section of plane we show) # set selection mode to atomic #from biograph.structure import Perseus #time.sleep(1) # restore selection mode # Running on PyMol # add arg # make this the active wizard | 2.18428 | 2 |
api/views/authentication/emailpasswordreset.py | smegurus/smegurus-django | 1 | 6616353 | <reponame>smegurus/smegurus-django
from django.core.management import call_command
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login, logout
from rest_framework import generics, permissions, status, response, views
from rest_framework.permissions import AllowAny
from api.serializers.authentication import EmailSerializer
from smegurus.settings import env_var
class ActionViewMixin(object):
def post(self, request):
serializer = EmailSerializer(data=request.data)
if serializer.is_valid():
return self.action(serializer)
else:
return response.Response(
data=serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
class EmailPasswordResetViewSet(ActionViewMixin, views.APIView):
serializer_class = EmailSerializer
permission_classes = [AllowAny,]
def action(self, serializer):
# Fetch the email that the User inputted.
email = serializer.data['email']
# Send password reset email.
call_command('send_password_reset_email',str(email))
return response.Response(
data={},
status=status.HTTP_200_OK,
)
| from django.core.management import call_command
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login, logout
from rest_framework import generics, permissions, status, response, views
from rest_framework.permissions import AllowAny
from api.serializers.authentication import EmailSerializer
from smegurus.settings import env_var
class ActionViewMixin(object):
def post(self, request):
serializer = EmailSerializer(data=request.data)
if serializer.is_valid():
return self.action(serializer)
else:
return response.Response(
data=serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
class EmailPasswordResetViewSet(ActionViewMixin, views.APIView):
serializer_class = EmailSerializer
permission_classes = [AllowAny,]
def action(self, serializer):
# Fetch the email that the User inputted.
email = serializer.data['email']
# Send password reset email.
call_command('send_password_reset_email',str(email))
return response.Response(
data={},
status=status.HTTP_200_OK,
) | en | 0.769369 | # Fetch the email that the User inputted. # Send password reset email. | 2.09887 | 2 |
taller_estructuras_de_control/codigo python/ejercicio_14.py | lauralopez00/algoritmos_programacion | 0 | 6616354 | <reponame>lauralopez00/algoritmos_programacion
num_lectura = int(input("numero actual de la lectura: "))
num_lectura_pasada = int(input("numero actual de la lectura pasada: "))
costo_kilovatio = float(input("costo del kilovatio por hora: "))
total = num_lectura - num_lectura_pasada
total_pagar = total * costo_kilovatio
print(f"total a pagar de la eletricidad es de {total_pagar}$") | num_lectura = int(input("numero actual de la lectura: "))
num_lectura_pasada = int(input("numero actual de la lectura pasada: "))
costo_kilovatio = float(input("costo del kilovatio por hora: "))
total = num_lectura - num_lectura_pasada
total_pagar = total * costo_kilovatio
print(f"total a pagar de la eletricidad es de {total_pagar}$") | none | 1 | 3.923432 | 4 | |
__scraping__/ncbi.nlm.nih.gov/main.py | whitmans-max/python-examples | 140 | 6616355 |
#
# https://stackoverflow.com/a/47706195/1832058
#
import requests
import re
from bs4 import BeautifulSoup
# Create a variable with the url
url = 'https://www.ncbi.nlm.nih.gov/protein/EGW15053.1?report=fasta'
# Use requests to get the contents
r = requests.get(url)
# Get the text of the contents
html_content = r.text
# Convert the html content into a beautiful soup object
soup = BeautifulSoup(html_content, 'html.parser')
div = soup.find_all('div', attrs={'class', 'seq gbff'})
for each in div.children:
print(each)
soup.find_all('span', aatrs={'class', 'ff_line'})
That's actually easy for me. div = soup.find_all('div', attrs={'class', 'seq gbff'}) contains the unique value for each page I want to access, just have to replace the id in each url.
url = 'https://www.ncbi.nlm.nih.gov/sviewer/viewer.fcgi?id=344258949&db=protein&report=fasta&extrafeat=0&fmt_mask=0&retmode=html&withmarkup=on&tool=portal&log$=seqview&maxdownloadsize=1000000'
I checked url need only three arguments to get data id=344258949&report=fasta&retmode=text
|
#
# https://stackoverflow.com/a/47706195/1832058
#
import requests
import re
from bs4 import BeautifulSoup
# Create a variable with the url
url = 'https://www.ncbi.nlm.nih.gov/protein/EGW15053.1?report=fasta'
# Use requests to get the contents
r = requests.get(url)
# Get the text of the contents
html_content = r.text
# Convert the html content into a beautiful soup object
soup = BeautifulSoup(html_content, 'html.parser')
div = soup.find_all('div', attrs={'class', 'seq gbff'})
for each in div.children:
print(each)
soup.find_all('span', aatrs={'class', 'ff_line'})
That's actually easy for me. div = soup.find_all('div', attrs={'class', 'seq gbff'}) contains the unique value for each page I want to access, just have to replace the id in each url.
url = 'https://www.ncbi.nlm.nih.gov/sviewer/viewer.fcgi?id=344258949&db=protein&report=fasta&extrafeat=0&fmt_mask=0&retmode=html&withmarkup=on&tool=portal&log$=seqview&maxdownloadsize=1000000'
I checked url need only three arguments to get data id=344258949&report=fasta&retmode=text
| en | 0.683557 | # # https://stackoverflow.com/a/47706195/1832058 # # Create a variable with the url # Use requests to get the contents # Get the text of the contents # Convert the html content into a beautiful soup object | 2.978927 | 3 |
scvae/data/sparse.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | 46 | 6616356 | # ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import numpy
import scipy.sparse
class SparseRowMatrix(scipy.sparse.csr_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
super().__init__(arg1, shape=shape, dtype=dtype, copy=copy)
@property
def size(self):
return self.shape[0] * self.shape[1]
def mean(self, axis=None):
if axis is not None:
return super().mean(axis)
dtype = self.dtype.type
if numpy.issubdtype(dtype, numpy.integer):
dtype = numpy.float64
self_sum = self.data.sum()
self_mean = self_sum / self.size
self_mean = self_mean.astype(dtype)
return self_mean
def std(self, axis=None, ddof=0):
return numpy.sqrt(self.var(axis=axis, ddof=ddof))
def var(self, axis=None, ddof=0):
self_squared_mean = self.power(2).mean(axis)
self_mean_squared = numpy.power(self.mean(axis), 2)
var = self_squared_mean - self_mean_squared
if ddof > 0:
size = numpy.prod(self.shape)
var = var * size / (size - ddof)
return var
def sparsity(a, tolerance=1e-3, batch_size=None):
def count_nonzero_values(b):
return (b >= tolerance).sum()
if scipy.sparse.issparse(a):
size = numpy.prod(a.shape)
else:
size = a.size
if batch_size:
number_of_rows = a.shape[0]
nonzero_count = 0
for i in range(0, number_of_rows, batch_size):
nonzero_count += count_nonzero_values(a[i:i+batch_size])
else:
nonzero_count = count_nonzero_values(a)
a_sparsity = 1 - nonzero_count / size
return a_sparsity
| # ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import numpy
import scipy.sparse
class SparseRowMatrix(scipy.sparse.csr_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
super().__init__(arg1, shape=shape, dtype=dtype, copy=copy)
@property
def size(self):
return self.shape[0] * self.shape[1]
def mean(self, axis=None):
if axis is not None:
return super().mean(axis)
dtype = self.dtype.type
if numpy.issubdtype(dtype, numpy.integer):
dtype = numpy.float64
self_sum = self.data.sum()
self_mean = self_sum / self.size
self_mean = self_mean.astype(dtype)
return self_mean
def std(self, axis=None, ddof=0):
return numpy.sqrt(self.var(axis=axis, ddof=ddof))
def var(self, axis=None, ddof=0):
self_squared_mean = self.power(2).mean(axis)
self_mean_squared = numpy.power(self.mean(axis), 2)
var = self_squared_mean - self_mean_squared
if ddof > 0:
size = numpy.prod(self.shape)
var = var * size / (size - ddof)
return var
def sparsity(a, tolerance=1e-3, batch_size=None):
def count_nonzero_values(b):
return (b >= tolerance).sum()
if scipy.sparse.issparse(a):
size = numpy.prod(a.shape)
else:
size = a.size
if batch_size:
number_of_rows = a.shape[0]
nonzero_count = 0
for i in range(0, number_of_rows, batch_size):
nonzero_count += count_nonzero_values(a[i:i+batch_size])
else:
nonzero_count = count_nonzero_values(a)
a_sparsity = 1 - nonzero_count / size
return a_sparsity
| en | 0.772384 | # ======================================================================== # # # Copyright (c) 2017 - 2020 scVAE authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ======================================================================== # | 2.465096 | 2 |
avc_analyser.py | mdelhoneux/avc_analyser | 0 | 6616357 | from optparse import OptionParser
from uuparser.src import utils
import os, copy
from avc.src.options_manager import OptionsManager
from avc.src.utils import (collect_avc_info,
dump_all_vecs,
train_word2vec,
collect_finite_verb_info,
write_avcs)
from avc.src.task_manager import TaskManager
import config
def run(treebank, i, options, n_tot, all_results={}):
word_types = options.word_types
vec_types = options.vec_types
#empty if we do not check for lemma
lemma_aux_list = config.lemma_auxiliaries[treebank.iso_id]
lemma_aux_list = [item.encode('utf-8') for item in lemma_aux_list]
if not options.tasks:
if treebank.testfile:
testdata = list(utils.read_conll(treebank.testfile, False,
treebank.iso_id))
else:
testdata= None
traindata = list(utils.read_conll(treebank.trainfile, False,
treebank.iso_id))
if options.create_avc_gold:
if word_types != ['finite_verb']:
if 'finite_verb' in word_types:
traindata_copy = copy.deepcopy(traindata)
if testdata:
testdata_copy = copy.deepcopy(testdata)
avcs = collect_avc_info(traindata, style=options.style,
lemma_aux_list=lemma_aux_list)
print "writing to " + treebank.task_train
write_avcs(avcs, treebank.task_train)
if testdata:
avcs = collect_avc_info(testdata, style=options.style,
lemma_aux_list=lemma_aux_list)
print "writing to " + treebank.task_test
write_avcs(avcs, treebank.task_test)
if 'finite_verb' in word_types:
if len(word_types) > 1:
traindata = traindata_copy
if testdata:
testdata = testdata_copy
finite_verbs = collect_finite_verb_info(traindata)
print "writing to " + treebank.fv_train
write_avcs(finite_verbs, treebank.fv_train)
finite_verbs = collect_finite_verb_info(testdata)
print "writing to " + treebank.fv_test
write_avcs(finite_verbs, treebank.fv_test)
elif options.dump_vecs:
if options.style == 'ud':
base_model, compos_model = config.models
elif options.style == 'ms':
base_model, compos_model = config.transformed_models
if 'composed' in vec_types:
traindata_copy = copy.deepcopy(traindata)
testdata_copy = copy.deepcopy(testdata)
traindata_copy, testdata_copy = utils.parser_process_data(treebank,
compos_model,
traindata_copy,
testdata_copy,
predict=True)
collect_avc_info(traindata_copy, options.style, lemma_aux_list=lemma_aux_list)
collect_avc_info(testdata_copy, options.style, lemma_aux_list=lemma_aux_list)
if options.style == 'ud':
word_type = 'main_verb'
else:
word_type = 'aux'
dump_all_vecs(options,traindata_copy,testdata_copy,treebank,'composed',word_type,
options.style)
vec_types.remove('composed')
if not (vec_types == []): #any left
traindata, testdata = utils.parser_process_data(treebank,
base_model,
traindata,
testdata)
collect_avc_info(traindata, options.style, lemma_aux_list=lemma_aux_list)
collect_avc_info(testdata, options.style, lemma_aux_list=lemma_aux_list)
#TODO: this could also be done in parallel potentially
for vec_type in vec_types:
for word_type in word_types:
if word_type == 'finite_verb' or word_type =='punct':
collect_finite_verb_info(traindata)
collect_finite_verb_info(testdata)
dump_all_vecs(options,traindata,testdata,treebank,vec_type,word_type,
options.style)
elif options.train_word2vec:
train_word2vec(traindata,treebank)
elif options.predict or options.evaluate:
tm = TaskManager(treebank,options,i,om.task_list,vec_types,word_types)
print "Working on %s"%treebank.iso_id
results = {}
if options.predict and options.parallel:
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores)(delayed(tm.run)(results,
vec_type,
word_type)
for vec_type in options.vec_types
for word_type in options.word_types )
else:
for vec_type in options.vec_types:
for word_type in options.word_types:
tm.run(results, vec_type, word_type)
if options.evaluate:
tm.write_results(results)
if __name__ == '__main__':
parser = OptionParser()
#maybe some of these should also to the config file
parser.add_option("--outdir", type="string", dest="output", default="taskEXP")
parser.add_option("--task_prediction_output", type="string",
dest="taskpred_out", default="taskpred")
parser.add_option("--style", type="string", dest="style", default="ud")
parser.add_option("--wembed_dir", type="string", dest="wembed_dir",
default="wembed")
#TODO: these should all be mutually exclusive
parser.add_option("--create_avc_gold", action="store_true", dest="create_avc_gold", default=False)
parser.add_option("--predict", action="store_true", dest="predict", default=False)
parser.add_option("--evaluate", action="store_true", dest="evaluate", default=False)
parser.add_option("--train_word2vec", action="store_true", dest="train_word2vec", default=False)
parser.add_option("--parallel", action="store_true", dest="parallel", default=False)
parser.add_option("--test_tasks", dest="tasks", default=None)
parser.add_option("--classifier", dest="classifier", default='mlp')
parser.add_option("--word_types", dest="word_types", default="main_verb finite_verb")
parser.add_option("--vec_types", dest="vec_types", default="contextual type")
parser.add_option("--dump_vecs", action="store_true", dest="dump_vecs", default=False)
parser.add_option("--results-file", type="string", dest="results_file",\
default="res.csv")
parser.add_option("--datadir", dest="datadir", help="UD Dir -obligatory if\
using include", default=None)
parser.add_option("--n_seed", type="int", dest="n_seed", default=1)
parser.add_option("--include", dest="include", default =None,\
help="The languages to be run if using UD - None\
by default - if None - need to specify dev,train,test.\
\n Used in combination with multiling: trains a common \
parser for all languages. Otherwise, train monolingual \
parsers for each")
(options, args) = parser.parse_args()
om = OptionsManager(options)
n_tot = len(om.languages) - 1
all_results = {}
for i, treebank in enumerate(om.languages):
run(treebank, i, options, n_tot, all_results)
| from optparse import OptionParser
from uuparser.src import utils
import os, copy
from avc.src.options_manager import OptionsManager
from avc.src.utils import (collect_avc_info,
dump_all_vecs,
train_word2vec,
collect_finite_verb_info,
write_avcs)
from avc.src.task_manager import TaskManager
import config
def run(treebank, i, options, n_tot, all_results={}):
word_types = options.word_types
vec_types = options.vec_types
#empty if we do not check for lemma
lemma_aux_list = config.lemma_auxiliaries[treebank.iso_id]
lemma_aux_list = [item.encode('utf-8') for item in lemma_aux_list]
if not options.tasks:
if treebank.testfile:
testdata = list(utils.read_conll(treebank.testfile, False,
treebank.iso_id))
else:
testdata= None
traindata = list(utils.read_conll(treebank.trainfile, False,
treebank.iso_id))
if options.create_avc_gold:
if word_types != ['finite_verb']:
if 'finite_verb' in word_types:
traindata_copy = copy.deepcopy(traindata)
if testdata:
testdata_copy = copy.deepcopy(testdata)
avcs = collect_avc_info(traindata, style=options.style,
lemma_aux_list=lemma_aux_list)
print "writing to " + treebank.task_train
write_avcs(avcs, treebank.task_train)
if testdata:
avcs = collect_avc_info(testdata, style=options.style,
lemma_aux_list=lemma_aux_list)
print "writing to " + treebank.task_test
write_avcs(avcs, treebank.task_test)
if 'finite_verb' in word_types:
if len(word_types) > 1:
traindata = traindata_copy
if testdata:
testdata = testdata_copy
finite_verbs = collect_finite_verb_info(traindata)
print "writing to " + treebank.fv_train
write_avcs(finite_verbs, treebank.fv_train)
finite_verbs = collect_finite_verb_info(testdata)
print "writing to " + treebank.fv_test
write_avcs(finite_verbs, treebank.fv_test)
elif options.dump_vecs:
if options.style == 'ud':
base_model, compos_model = config.models
elif options.style == 'ms':
base_model, compos_model = config.transformed_models
if 'composed' in vec_types:
traindata_copy = copy.deepcopy(traindata)
testdata_copy = copy.deepcopy(testdata)
traindata_copy, testdata_copy = utils.parser_process_data(treebank,
compos_model,
traindata_copy,
testdata_copy,
predict=True)
collect_avc_info(traindata_copy, options.style, lemma_aux_list=lemma_aux_list)
collect_avc_info(testdata_copy, options.style, lemma_aux_list=lemma_aux_list)
if options.style == 'ud':
word_type = 'main_verb'
else:
word_type = 'aux'
dump_all_vecs(options,traindata_copy,testdata_copy,treebank,'composed',word_type,
options.style)
vec_types.remove('composed')
if not (vec_types == []): #any left
traindata, testdata = utils.parser_process_data(treebank,
base_model,
traindata,
testdata)
collect_avc_info(traindata, options.style, lemma_aux_list=lemma_aux_list)
collect_avc_info(testdata, options.style, lemma_aux_list=lemma_aux_list)
#TODO: this could also be done in parallel potentially
for vec_type in vec_types:
for word_type in word_types:
if word_type == 'finite_verb' or word_type =='punct':
collect_finite_verb_info(traindata)
collect_finite_verb_info(testdata)
dump_all_vecs(options,traindata,testdata,treebank,vec_type,word_type,
options.style)
elif options.train_word2vec:
train_word2vec(traindata,treebank)
elif options.predict or options.evaluate:
tm = TaskManager(treebank,options,i,om.task_list,vec_types,word_types)
print "Working on %s"%treebank.iso_id
results = {}
if options.predict and options.parallel:
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores)(delayed(tm.run)(results,
vec_type,
word_type)
for vec_type in options.vec_types
for word_type in options.word_types )
else:
for vec_type in options.vec_types:
for word_type in options.word_types:
tm.run(results, vec_type, word_type)
if options.evaluate:
tm.write_results(results)
if __name__ == '__main__':
parser = OptionParser()
#maybe some of these should also to the config file
parser.add_option("--outdir", type="string", dest="output", default="taskEXP")
parser.add_option("--task_prediction_output", type="string",
dest="taskpred_out", default="taskpred")
parser.add_option("--style", type="string", dest="style", default="ud")
parser.add_option("--wembed_dir", type="string", dest="wembed_dir",
default="wembed")
#TODO: these should all be mutually exclusive
parser.add_option("--create_avc_gold", action="store_true", dest="create_avc_gold", default=False)
parser.add_option("--predict", action="store_true", dest="predict", default=False)
parser.add_option("--evaluate", action="store_true", dest="evaluate", default=False)
parser.add_option("--train_word2vec", action="store_true", dest="train_word2vec", default=False)
parser.add_option("--parallel", action="store_true", dest="parallel", default=False)
parser.add_option("--test_tasks", dest="tasks", default=None)
parser.add_option("--classifier", dest="classifier", default='mlp')
parser.add_option("--word_types", dest="word_types", default="main_verb finite_verb")
parser.add_option("--vec_types", dest="vec_types", default="contextual type")
parser.add_option("--dump_vecs", action="store_true", dest="dump_vecs", default=False)
parser.add_option("--results-file", type="string", dest="results_file",\
default="res.csv")
parser.add_option("--datadir", dest="datadir", help="UD Dir -obligatory if\
using include", default=None)
parser.add_option("--n_seed", type="int", dest="n_seed", default=1)
parser.add_option("--include", dest="include", default =None,\
help="The languages to be run if using UD - None\
by default - if None - need to specify dev,train,test.\
\n Used in combination with multiling: trains a common \
parser for all languages. Otherwise, train monolingual \
parsers for each")
(options, args) = parser.parse_args()
om = OptionsManager(options)
n_tot = len(om.languages) - 1
all_results = {}
for i, treebank in enumerate(om.languages):
run(treebank, i, options, n_tot, all_results)
| en | 0.91246 | #empty if we do not check for lemma #any left #TODO: this could also be done in parallel potentially #maybe some of these should also to the config file #TODO: these should all be mutually exclusive | 2.309661 | 2 |
limbook_api/v1/auth/__init__.py | limvus/limbook-api | 0 | 6616358 | from limbook_api.v1.auth.utils import *
| from limbook_api.v1.auth.utils import *
| none | 1 | 1.066315 | 1 | |
src/cogs/general/__init__.py | re-nft/animetas-discord-bot | 0 | 6616359 | <filename>src/cogs/general/__init__.py
from .general import General
from .wallet import Wallet
__all__ = ["General", "Wallet"]
| <filename>src/cogs/general/__init__.py
from .general import General
from .wallet import Wallet
__all__ = ["General", "Wallet"]
| none | 1 | 1.170149 | 1 | |
layouter/__init__.py | frnhr/djangocms-layouter | 28 | 6616360 | # -*- coding: utf-8 -*-
__version__ = '1.0.0'
default_app_config = 'layouter.apps.LayouterConfig'
| # -*- coding: utf-8 -*-
__version__ = '1.0.0'
default_app_config = 'layouter.apps.LayouterConfig'
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.098087 | 1 |
src/app/basics/primitives/test_line.py | spartanPAGE/python-trigonometrics | 0 | 6616361 | import unittest
import app.basics.primitives.line as subject
class LineCreationCase(unittest.TestCase):
def test_creation(self):
l = subject.Line(1, 10, 'white', 'thick')
self.assertEqual(l.starting_position, 1)
self.assertEqual(l.ending_position, 10)
self.assertEqual(l.color, 'white')
self.assertEqual(l.thickness, 'thick')
| import unittest
import app.basics.primitives.line as subject
class LineCreationCase(unittest.TestCase):
def test_creation(self):
l = subject.Line(1, 10, 'white', 'thick')
self.assertEqual(l.starting_position, 1)
self.assertEqual(l.ending_position, 10)
self.assertEqual(l.color, 'white')
self.assertEqual(l.thickness, 'thick')
| none | 1 | 3.26906 | 3 | |
frequency.py | animesh-chouhan/ledstrip-sound-reactive | 0 | 6616362 | <reponame>animesh-chouhan/ledstrip-sound-reactive<gh_stars>0
import pyaudio
import numpy as np
CHUNK = 512
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 40
def get_color(freq):
if freq > 0 and freq <= 300:
# return "#800080"
return "\u001b[44m" + str(freq)
elif freq > 300 and freq <= 1500:
# return "#00FFFF"
return "\u001b[46m" + str(freq)
elif freq > 1500 and freq <= 5000:
# return "#98FF98"
return "\u001b[42m" + str(freq)
elif freq > 5000 and freq <= 19000:
# return "#FF0090"
return "\u001b[45m" + str(freq)
elif freq > 19000:
# return "#E3242B"
return "\u001b[41m" + str(freq)
def get_peak_frequency(data):
data = data * np.hanning(len(data)) # smooth the FFT by windowing data
fft = abs(np.fft.fft(data).real)
fft = fft[:int(len(fft)/2)]
fft_freq = np.fft.fftfreq(data.size, 1.0/RATE)
fft_freq = fft_freq[:int(len(fft))]
# focus on only the positive frequencies
pos_mask = np.where(fft_freq > 0)
fft_freq = fft_freq[pos_mask]
peak_freq = fft_freq[fft[pos_mask].argmax()]
# print(f"peak frequency: {peak_freq} Hz")
return int(peak_freq * 2)
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
print("Started Listening")
while True:
try:
raw_data = stream.read(CHUNK)
data = np.frombuffer(raw_data, dtype=np.int16)
freq = get_peak_frequency(data)
print(get_color(freq))
except KeyboardInterrupt:
print("\nStopped")
stream.stop_stream()
stream.close()
p.terminate()
| import pyaudio
import numpy as np
CHUNK = 512
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 40
def get_color(freq):
if freq > 0 and freq <= 300:
# return "#800080"
return "\u001b[44m" + str(freq)
elif freq > 300 and freq <= 1500:
# return "#00FFFF"
return "\u001b[46m" + str(freq)
elif freq > 1500 and freq <= 5000:
# return "#98FF98"
return "\u001b[42m" + str(freq)
elif freq > 5000 and freq <= 19000:
# return "#FF0090"
return "\u001b[45m" + str(freq)
elif freq > 19000:
# return "#E3242B"
return "\u001b[41m" + str(freq)
def get_peak_frequency(data):
data = data * np.hanning(len(data)) # smooth the FFT by windowing data
fft = abs(np.fft.fft(data).real)
fft = fft[:int(len(fft)/2)]
fft_freq = np.fft.fftfreq(data.size, 1.0/RATE)
fft_freq = fft_freq[:int(len(fft))]
# focus on only the positive frequencies
pos_mask = np.where(fft_freq > 0)
fft_freq = fft_freq[pos_mask]
peak_freq = fft_freq[fft[pos_mask].argmax()]
# print(f"peak frequency: {peak_freq} Hz")
return int(peak_freq * 2)
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
print("Started Listening")
while True:
try:
raw_data = stream.read(CHUNK)
data = np.frombuffer(raw_data, dtype=np.int16)
freq = get_peak_frequency(data)
print(get_color(freq))
except KeyboardInterrupt:
print("\nStopped")
stream.stop_stream()
stream.close()
p.terminate() | en | 0.709126 | # return "#800080" # return "#00FFFF" # return "#98FF98" # return "#FF0090" # return "#E3242B" # smooth the FFT by windowing data # focus on only the positive frequencies # print(f"peak frequency: {peak_freq} Hz") | 2.706493 | 3 |
cloudmesh_cmd3/plugins/cm_shell_usage.py | JulienPalard/cloudmesh | 0 | 6616363 | <reponame>JulienPalard/cloudmesh<filename>cloudmesh_cmd3/plugins/cm_shell_usage.py
# import os
from __future__ import print_function
from cloudmesh_base.logger import LOGGER
from cloudmesh.config.cm_config import cm_config
from cloudmesh.user.cm_user import cm_user
from cloudmesh.cm_mongo import cm_mongo
from cloudmesh_common.tables import row_table
from cmd3.console import Console
from cmd3.shell import command
import json
log = LOGGER(__file__)
class cm_shell_usage:
"""opt_example class"""
_id = "usage" # id for usage in cm_mongo
def activate_cm_shell_usage(self):
self.register_command_topic('cloud', 'usage')
pass
def get_cloud_name(self, cm_user_id):
"""Returns a default cloud name if exists
"""
try:
return self.cm_user.get_defaults(cm_user_id)['nova-cloud']
except KeyError:
log.error('Set OpenStack as a default cloud.'
'"stack" ONLY works with openstack platform.')
return None
@command
def do_usage(self, args, arguments):
"""
::
Usage:
usage [CLOUD] [--start=START] [--end=END] [--format=json]
usage help | -h
Usage data on a current project/tenant
Arguments:
CLOUD Cloud name to see the usage
START start date of usage (YYYY-MM-DD)
END end date of usage (YYYY-MM-DD)
help Prints this message
Options:
-v verbose mode
"""
self.cm_mongo = cm_mongo()
self.cm_config = cm_config()
self.cm_user = cm_user().username
if arguments["help"] or arguments["-h"]:
print (self.do_usage.__doc__)
else:
userid = self.cm_config.username()
self.cm_mongo.activate(userid)
cloudid = arguments["CLOUD"]
if cloudid is None:
cloudid = self.get_cloud_name(userid)
# if an id is still not found print error
if cloudid is None:
Console.error('Please set a default cloud.')
return
usage = self.cm_mongo.usage(cloudid, userid)
# server usages need to be supressed.
# e.g. {u'hours': 24.00000006388889, u'uptime': 1960234,
# u'started_at': u'2014-10-07T23:03:57.000000', u'ended_at': None,
# u'name': u'hrlee-server-2zuvke4wujud', u'tenant_id':
# u'3e6eaf1d913a48f694a7bc0fbb027507', u'instance_id':
# u'2c9d24e0-7453-4f83-84b7-f8c0254a574f', u'state':
# u'active', u'memory_mb': 2048, u'vcpus': 1, u'flavor':
# u'm1.small', u'local_gb': 20}
try:
usage['server_usages'] = str(
len(usage['server_usages'])) + " vms"
except:
pass
if arguments["--format"] is None:
print(
row_table(usage, order=None, labels=["Variable", "Value"]))
elif 'json' in arguments["--format"]:
print(json.dumps(usage, indent=4))
else:
Console.error('Usage is not supported.')
return usage
| # import os
from __future__ import print_function
from cloudmesh_base.logger import LOGGER
from cloudmesh.config.cm_config import cm_config
from cloudmesh.user.cm_user import cm_user
from cloudmesh.cm_mongo import cm_mongo
from cloudmesh_common.tables import row_table
from cmd3.console import Console
from cmd3.shell import command
import json
log = LOGGER(__file__)
class cm_shell_usage:
"""opt_example class"""
_id = "usage" # id for usage in cm_mongo
def activate_cm_shell_usage(self):
self.register_command_topic('cloud', 'usage')
pass
def get_cloud_name(self, cm_user_id):
"""Returns a default cloud name if exists
"""
try:
return self.cm_user.get_defaults(cm_user_id)['nova-cloud']
except KeyError:
log.error('Set OpenStack as a default cloud.'
'"stack" ONLY works with openstack platform.')
return None
@command
def do_usage(self, args, arguments):
"""
::
Usage:
usage [CLOUD] [--start=START] [--end=END] [--format=json]
usage help | -h
Usage data on a current project/tenant
Arguments:
CLOUD Cloud name to see the usage
START start date of usage (YYYY-MM-DD)
END end date of usage (YYYY-MM-DD)
help Prints this message
Options:
-v verbose mode
"""
self.cm_mongo = cm_mongo()
self.cm_config = cm_config()
self.cm_user = cm_user().username
if arguments["help"] or arguments["-h"]:
print (self.do_usage.__doc__)
else:
userid = self.cm_config.username()
self.cm_mongo.activate(userid)
cloudid = arguments["CLOUD"]
if cloudid is None:
cloudid = self.get_cloud_name(userid)
# if an id is still not found print error
if cloudid is None:
Console.error('Please set a default cloud.')
return
usage = self.cm_mongo.usage(cloudid, userid)
# server usages need to be supressed.
# e.g. {u'hours': 24.00000006388889, u'uptime': 1960234,
# u'started_at': u'2014-10-07T23:03:57.000000', u'ended_at': None,
# u'name': u'hrlee-server-2zuvke4wujud', u'tenant_id':
# u'3e6eaf1d913a48f694a7bc0fbb027507', u'instance_id':
# u'2c9d24e0-7453-4f83-84b7-f8c0254a574f', u'state':
# u'active', u'memory_mb': 2048, u'vcpus': 1, u'flavor':
# u'm1.small', u'local_gb': 20}
try:
usage['server_usages'] = str(
len(usage['server_usages'])) + " vms"
except:
pass
if arguments["--format"] is None:
print(
row_table(usage, order=None, labels=["Variable", "Value"]))
elif 'json' in arguments["--format"]:
print(json.dumps(usage, indent=4))
else:
Console.error('Usage is not supported.')
return usage | en | 0.26938 | # import os opt_example class # id for usage in cm_mongo Returns a default cloud name if exists :: Usage: usage [CLOUD] [--start=START] [--end=END] [--format=json] usage help | -h Usage data on a current project/tenant Arguments: CLOUD Cloud name to see the usage START start date of usage (YYYY-MM-DD) END end date of usage (YYYY-MM-DD) help Prints this message Options: -v verbose mode # if an id is still not found print error # server usages need to be supressed. # e.g. {u'hours': 24.00000006388889, u'uptime': 1960234, # u'started_at': u'2014-10-07T23:03:57.000000', u'ended_at': None, # u'name': u'hrlee-server-2zuvke4wujud', u'tenant_id': # u'3e6eaf1d913a48f694a7bc0fbb027507', u'instance_id': # u'2c9d24e0-7453-4f83-84b7-f8c0254a574f', u'state': # u'active', u'memory_mb': 2048, u'vcpus': 1, u'flavor': # u'm1.small', u'local_gb': 20} | 2.115674 | 2 |
chapter_4/4.1.py | frbaroni/ctci | 0 | 6616364 | import unittest
import collections
from mygraph import GraphNode
def hasRoute(origin, destination):
visited = set()
queue = collections.deque()
queue.append(origin)
while len(queue) > 0:
node = queue.popleft()
if node == destination:
return True
visited.add(node)
for child in node.children:
if not child in visited:
queue.append(child)
return False
class Playground(unittest.TestCase):
def test_has_route(self):
nodes = GraphNode.create([
'a -> b',
'b -> c',
'c -> d',
'd -> e',
'e -> f',
'a -> c',
'h -> j'
])
self.assertTrue(hasRoute(nodes['a'], nodes['e']))
self.assertTrue(hasRoute(nodes['c'], nodes['f']))
self.assertTrue(hasRoute(nodes['f'], nodes['b']))
self.assertTrue(hasRoute(nodes['j'], nodes['h']))
self.assertFalse(hasRoute(nodes['h'], nodes['a']))
if __name__ == '__main__':
unittest.main()
| import unittest
import collections
from mygraph import GraphNode
def hasRoute(origin, destination):
visited = set()
queue = collections.deque()
queue.append(origin)
while len(queue) > 0:
node = queue.popleft()
if node == destination:
return True
visited.add(node)
for child in node.children:
if not child in visited:
queue.append(child)
return False
class Playground(unittest.TestCase):
def test_has_route(self):
nodes = GraphNode.create([
'a -> b',
'b -> c',
'c -> d',
'd -> e',
'e -> f',
'a -> c',
'h -> j'
])
self.assertTrue(hasRoute(nodes['a'], nodes['e']))
self.assertTrue(hasRoute(nodes['c'], nodes['f']))
self.assertTrue(hasRoute(nodes['f'], nodes['b']))
self.assertTrue(hasRoute(nodes['j'], nodes['h']))
self.assertFalse(hasRoute(nodes['h'], nodes['a']))
if __name__ == '__main__':
unittest.main()
| none | 1 | 3.574977 | 4 | |
robot-vision/detectYellow.py | CarterFendley/2015-vision | 1 | 6616365 | import cv2
import numpy as np
from networktables import NetworkTable
def threshold_range(im, lo, hi):
unused, t1 = cv2.threshold(im, lo, 255, type=cv2.THRESH_BINARY)
unused, t2 = cv2.threshold(im, hi, 255, type=cv2.THRESH_BINARY_INV)
return cv2.bitwise_and(t1, t2)
'''def findContoursList(inputList, mode, method):
outputList = []
for i in range(0, len(inputList)):
outputList.append(cv2.findContours(inputList[i].copy(), mode, method)[1])
return outputList'''
NetworkTable.setIPAddress("localhost")
NetworkTable.setClientMode()
NetworkTable.initialize()
sd = NetworkTable.getTable("SmartDashboard")
cam = cv2.VideoCapture(0)
running = True
while(running):
#get image from webcam
frame = cam.read()[1]
img = frame
size = img.shape[:2]
#cv2.imshow('img', img)
#copy the image for later use
oimg = img.copy()
#bgr = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
#convert the color to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#split into images for each of those variables
h, s, v = cv2.split(hsv)
#look for a certain range of each variable
h = threshold_range(h, 20, 50)
#cv2.imshow('h', h)
s = threshold_range(s, 57, 255)
#cv2.imshow('s', s)
v = threshold_range(v, 49, 255)
#cv2.imshow('v', v)
#combine each of those images
combined = cv2.bitwise_and(h, cv2.bitwise_and(s,v))
#cv2.imshow('Combined', combined)
img2 = combined.copy()
#find contours
trash, contours, hierarchy = cv2.findContours(combined, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rs = 0
ls = 0
tx = size[1]
ty = size[0]
midy = ty/2
midx = tx/2
bottomContours = []
#Tells how many contours are on each side of the image
#also which contours are on the bottom and which are on the top
for contour in range(0, len(contours)):
c = contours[contour]
#counters for how many contours are on top and bottom
tcounter = 0
bcounter = 0
for co in range(0, len(c)):
#have to go through many layers in the array to get to the raw points
con = c[co]
for cont in range(0, len(con)):
conto = con[cont]
xc = conto[0]
yc = conto[1]
if yc >= midy:
bcounter = bcounter + 1
if xc >= midx:
rs = rs + 1
elif xc < midx:
ls = ls + 1
elif yc < midy:
tcounter = tcounter + 1
#if there are more points on the bottom add the contour to array of contours on the bottom
if bcounter > tcounter:
bottomContours.append(contours[contour])
#cv2.imshow('contouryb4', oimg)
#makes contours into polygons
p = []
for contour in range(0, len(bottomContours)):
t = cv2.approxPolyDP(bottomContours[contour], 100, True)
#print t
p.append(t)
bp = 0
pp = []
#Sorts small groups and shows bigger polygons
#print len(p), 'plength'
for contour in range(0, len(p)):
ap = cv2.arcLength(p[contour], True)
if ap > 400:
pp.append(p[contour])
#booleans to send over network tables
#boolean for yellow on bottom right
rb = False
#boolean for yellow on bottom left
lb = False
#boolean for any blob at all(Carter you can use that one or both of the others, your choice)
isBlob = False
if len(pp) > 0:
isBlob = True
if rs > ls:
rb = True
if ls <= rs:
lb = True
#print pp
#print ap
#x, y, xlen, ylen = cv2.boundingRect(pp)
#print p
sd.putBoolean("yellowDetected", isBlob)
sd.putBoolean("rightGrater", rb)
sd.putBoolean("leftGrater", lb)
'''if cv2.arcLength(p[contour], True) > 300:
nContours.append(p[conto ur])'''
cv2.drawContours(oimg, pp, -1,(0,0,255), 3)
cv2.imshow('contoury', oimg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| import cv2
import numpy as np
from networktables import NetworkTable
def threshold_range(im, lo, hi):
unused, t1 = cv2.threshold(im, lo, 255, type=cv2.THRESH_BINARY)
unused, t2 = cv2.threshold(im, hi, 255, type=cv2.THRESH_BINARY_INV)
return cv2.bitwise_and(t1, t2)
'''def findContoursList(inputList, mode, method):
outputList = []
for i in range(0, len(inputList)):
outputList.append(cv2.findContours(inputList[i].copy(), mode, method)[1])
return outputList'''
NetworkTable.setIPAddress("localhost")
NetworkTable.setClientMode()
NetworkTable.initialize()
sd = NetworkTable.getTable("SmartDashboard")
cam = cv2.VideoCapture(0)
running = True
while(running):
#get image from webcam
frame = cam.read()[1]
img = frame
size = img.shape[:2]
#cv2.imshow('img', img)
#copy the image for later use
oimg = img.copy()
#bgr = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
#convert the color to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#split into images for each of those variables
h, s, v = cv2.split(hsv)
#look for a certain range of each variable
h = threshold_range(h, 20, 50)
#cv2.imshow('h', h)
s = threshold_range(s, 57, 255)
#cv2.imshow('s', s)
v = threshold_range(v, 49, 255)
#cv2.imshow('v', v)
#combine each of those images
combined = cv2.bitwise_and(h, cv2.bitwise_and(s,v))
#cv2.imshow('Combined', combined)
img2 = combined.copy()
#find contours
trash, contours, hierarchy = cv2.findContours(combined, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rs = 0
ls = 0
tx = size[1]
ty = size[0]
midy = ty/2
midx = tx/2
bottomContours = []
#Tells how many contours are on each side of the image
#also which contours are on the bottom and which are on the top
for contour in range(0, len(contours)):
c = contours[contour]
#counters for how many contours are on top and bottom
tcounter = 0
bcounter = 0
for co in range(0, len(c)):
#have to go through many layers in the array to get to the raw points
con = c[co]
for cont in range(0, len(con)):
conto = con[cont]
xc = conto[0]
yc = conto[1]
if yc >= midy:
bcounter = bcounter + 1
if xc >= midx:
rs = rs + 1
elif xc < midx:
ls = ls + 1
elif yc < midy:
tcounter = tcounter + 1
#if there are more points on the bottom add the contour to array of contours on the bottom
if bcounter > tcounter:
bottomContours.append(contours[contour])
#cv2.imshow('contouryb4', oimg)
#makes contours into polygons
p = []
for contour in range(0, len(bottomContours)):
t = cv2.approxPolyDP(bottomContours[contour], 100, True)
#print t
p.append(t)
bp = 0
pp = []
#Sorts small groups and shows bigger polygons
#print len(p), 'plength'
for contour in range(0, len(p)):
ap = cv2.arcLength(p[contour], True)
if ap > 400:
pp.append(p[contour])
#booleans to send over network tables
#boolean for yellow on bottom right
rb = False
#boolean for yellow on bottom left
lb = False
#boolean for any blob at all(Carter you can use that one or both of the others, your choice)
isBlob = False
if len(pp) > 0:
isBlob = True
if rs > ls:
rb = True
if ls <= rs:
lb = True
#print pp
#print ap
#x, y, xlen, ylen = cv2.boundingRect(pp)
#print p
sd.putBoolean("yellowDetected", isBlob)
sd.putBoolean("rightGrater", rb)
sd.putBoolean("leftGrater", lb)
'''if cv2.arcLength(p[contour], True) > 300:
nContours.append(p[conto ur])'''
cv2.drawContours(oimg, pp, -1,(0,0,255), 3)
cv2.imshow('contoury', oimg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| en | 0.651266 | def findContoursList(inputList, mode, method): outputList = [] for i in range(0, len(inputList)): outputList.append(cv2.findContours(inputList[i].copy(), mode, method)[1]) return outputList #get image from webcam #cv2.imshow('img', img) #copy the image for later use #bgr = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR) #convert the color to hsv #split into images for each of those variables #look for a certain range of each variable #cv2.imshow('h', h) #cv2.imshow('s', s) #cv2.imshow('v', v) #combine each of those images #cv2.imshow('Combined', combined) #find contours #Tells how many contours are on each side of the image #also which contours are on the bottom and which are on the top #counters for how many contours are on top and bottom #have to go through many layers in the array to get to the raw points #if there are more points on the bottom add the contour to array of contours on the bottom #cv2.imshow('contouryb4', oimg) #makes contours into polygons #print t #Sorts small groups and shows bigger polygons #print len(p), 'plength' #booleans to send over network tables #boolean for yellow on bottom right #boolean for yellow on bottom left #boolean for any blob at all(Carter you can use that one or both of the others, your choice) #print pp #print ap #x, y, xlen, ylen = cv2.boundingRect(pp) #print p if cv2.arcLength(p[contour], True) > 300: nContours.append(p[conto ur]) | 2.920313 | 3 |
__main__.py | the-r3dshirt/image_sort | 0 | 6616366 | <reponame>the-r3dshirt/image_sort
import os
import image_sort
if __name__ == "__main__":
data_in = 'C:/Users/Sam/Documents/data_in/'
data_out = 'C:/Users/Sam/Documents/data_out/'
data_unsorted = '{}unsorted/'.format(data_out)
image_sort.setup(data_out, data_unsorted)
structure = {}
for root, sub_dirs, files in os.walk(data_in):
for f in files:
structure = image_sort.build_structure(structure, root, f, data_unsorted)
image_sort.print_structure(structure)
image_sort.output(structure, data_out)
| import os
import image_sort
if __name__ == "__main__":
data_in = 'C:/Users/Sam/Documents/data_in/'
data_out = 'C:/Users/Sam/Documents/data_out/'
data_unsorted = '{}unsorted/'.format(data_out)
image_sort.setup(data_out, data_unsorted)
structure = {}
for root, sub_dirs, files in os.walk(data_in):
for f in files:
structure = image_sort.build_structure(structure, root, f, data_unsorted)
image_sort.print_structure(structure)
image_sort.output(structure, data_out) | none | 1 | 2.950007 | 3 | |
insta/recursion_error.py | osambo/instagram | 0 | 6616367 | import sys
def recursive_function(n, sum):
if n < 1:
return sum
else:
return recursive_function(n-1, sum+n)
print(sys.getrecursionlimit())
sys.setrecursionlimit(1500) | import sys
def recursive_function(n, sum):
if n < 1:
return sum
else:
return recursive_function(n-1, sum+n)
print(sys.getrecursionlimit())
sys.setrecursionlimit(1500) | none | 1 | 3.438574 | 3 | |
src/topic_store/data.py | pet1330/topic_store | 0 | 6616368 | <reponame>pet1330/topic_store<filename>src/topic_store/data.py
# <NAME> (Tunstill) Copyright (c) 2020
# Email: <EMAIL>
# Provides the container for easily handling topic_store data. Exposed by topic_store.__init__
from __future__ import absolute_import, division, print_function
from datetime import datetime
import bson
import genpy
from topic_store.sanitation import rosify_dict, sanitise_dict
from topic_store.utils import ros_time_as_ms, time_as_ms
__all__ = ["TopicStore"]
_session_id = bson.ObjectId()
class TopicStore:
"""Storage container for message data .dict or [] returns python objects, .msgs or () returns ROS messages"""
def __init__(self, data_tree):
if not isinstance(data_tree, dict):
raise ValueError("Data tree must be a dict to construct a TopicStore")
# Ensure passed data tree does not contain ROS msgs
self.__data_tree = sanitise_dict(data_tree)
if "_id" not in self.__data_tree:
self.__data_tree["_id"] = bson.ObjectId()
if "_ts_meta" not in self.__data_tree:
self.__data_tree["_ts_meta"] = dict(session=_session_id, sys_time=time_as_ms(), ros_time=ros_time_as_ms())
# Cache for dict to ROS message parsing
self.__msgs = None
@property
def dict(self):
return self.__data_tree
@property
def msgs(self):
if self.__msgs is None:
self.__msgs = rosify_dict(self.dict)
return self.__msgs
# Expose document ID and meta fields
@property
def id(self):
return self["_id"]
@property
def session(self):
return self["_ts_meta"]["session"]
@property
def sys_time(self):
return self["_ts_meta"]["sys_time"]
@property
def ros_time(self):
return self["_ts_meta"]["ros_time"]
# TopicStore()[item] returns python type
def __getitem__(self, item):
return self.dict[item]
# TopicStore()(item) returns ros type
def __call__(self, item):
return self.msgs[item]
@staticmethod
def __get_size(obj, recurse=True, human_readable=True):
"""Sum size of object & members. Utility function for printing document size, used in __repr__."""
from types import ModuleType, FunctionType
from gc import get_referents
import sys
blacklisted_types = (type, ModuleType, FunctionType)
if isinstance(obj, blacklisted_types):
raise TypeError('getsize() does not take argument of type: ' + str(type(obj)))
size = 0
if recurse:
seen_ids = set()
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, blacklisted_types) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
else:
size = sys.getsizeof(obj)
if not human_readable:
return size
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if size < 1024.0:
break
size /= 1024.0
return "{:.2f}{}B".format(size, unit)
def __recurse_types(self, d=None, depth=1, tabs=1, sep='\n', print_size=False):
"""Used by __repr__ to recurse dict and print types and sizes"""
s = ""
if depth == 1:
s += "TopicStore Object {}: {}".format(
datetime.fromtimestamp(self.dict["_ts_meta"]["sys_time"]).strftime('%d-%m-%Y %H:%M:%S'), "{"
)
if d is None:
d = self.msgs
for k, v in d.items():
s += "{}{}{}{}: ".format(sep, "\t" * depth, k, ("(" + self.__get_size(v) + ")") if print_size else "")
if isinstance(v, dict):
s += "{" + self.__recurse_types(v, depth + tabs, tabs, sep, print_size) + sep + "\t" * depth + "}"
else:
s += "{}".format(type(v))
if depth == 1:
s += sep + "}"
return s
def __repr__(self, print_size=False):
return self.__recurse_types(self.msgs, print_size=print_size)
@staticmethod
def __ros_msg_dict_to_list(ros_msg_dict, return_keys=False, parent=""):
"""Useful for getting all ROS messages as flat list/dict. Only messages with _connection_header are returned."""
if not isinstance(ros_msg_dict, dict):
return
for key, value in ros_msg_dict.items():
if isinstance(value, genpy.Message):
if return_keys:
yield (parent + "." + key), value
else:
yield value
for ret in TopicStore.__ros_msg_dict_to_list(value, return_keys, key if not parent else parent + "." + key):
yield ret
def to_ros_msg_list(self):
# TODO: Cache this operation until self.__data_tree updated
return list(TopicStore.__ros_msg_dict_to_list(self.msgs))
def flatten_ros_msg_dict(self):
# TODO: Cache this operation until self.__data_tree updated
return {k: v for k, v in TopicStore.__ros_msg_dict_to_list(self.msgs, return_keys=True)}
| # <NAME> (Tunstill) Copyright (c) 2020
# Email: <EMAIL>
# Provides the container for easily handling topic_store data. Exposed by topic_store.__init__
from __future__ import absolute_import, division, print_function
from datetime import datetime
import bson
import genpy
from topic_store.sanitation import rosify_dict, sanitise_dict
from topic_store.utils import ros_time_as_ms, time_as_ms
__all__ = ["TopicStore"]
_session_id = bson.ObjectId()
class TopicStore:
"""Storage container for message data .dict or [] returns python objects, .msgs or () returns ROS messages"""
def __init__(self, data_tree):
if not isinstance(data_tree, dict):
raise ValueError("Data tree must be a dict to construct a TopicStore")
# Ensure passed data tree does not contain ROS msgs
self.__data_tree = sanitise_dict(data_tree)
if "_id" not in self.__data_tree:
self.__data_tree["_id"] = bson.ObjectId()
if "_ts_meta" not in self.__data_tree:
self.__data_tree["_ts_meta"] = dict(session=_session_id, sys_time=time_as_ms(), ros_time=ros_time_as_ms())
# Cache for dict to ROS message parsing
self.__msgs = None
@property
def dict(self):
return self.__data_tree
@property
def msgs(self):
if self.__msgs is None:
self.__msgs = rosify_dict(self.dict)
return self.__msgs
# Expose document ID and meta fields
@property
def id(self):
return self["_id"]
@property
def session(self):
return self["_ts_meta"]["session"]
@property
def sys_time(self):
return self["_ts_meta"]["sys_time"]
@property
def ros_time(self):
return self["_ts_meta"]["ros_time"]
# TopicStore()[item] returns python type
def __getitem__(self, item):
return self.dict[item]
# TopicStore()(item) returns ros type
def __call__(self, item):
return self.msgs[item]
@staticmethod
def __get_size(obj, recurse=True, human_readable=True):
"""Sum size of object & members. Utility function for printing document size, used in __repr__."""
from types import ModuleType, FunctionType
from gc import get_referents
import sys
blacklisted_types = (type, ModuleType, FunctionType)
if isinstance(obj, blacklisted_types):
raise TypeError('getsize() does not take argument of type: ' + str(type(obj)))
size = 0
if recurse:
seen_ids = set()
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, blacklisted_types) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
else:
size = sys.getsizeof(obj)
if not human_readable:
return size
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if size < 1024.0:
break
size /= 1024.0
return "{:.2f}{}B".format(size, unit)
def __recurse_types(self, d=None, depth=1, tabs=1, sep='\n', print_size=False):
"""Used by __repr__ to recurse dict and print types and sizes"""
s = ""
if depth == 1:
s += "TopicStore Object {}: {}".format(
datetime.fromtimestamp(self.dict["_ts_meta"]["sys_time"]).strftime('%d-%m-%Y %H:%M:%S'), "{"
)
if d is None:
d = self.msgs
for k, v in d.items():
s += "{}{}{}{}: ".format(sep, "\t" * depth, k, ("(" + self.__get_size(v) + ")") if print_size else "")
if isinstance(v, dict):
s += "{" + self.__recurse_types(v, depth + tabs, tabs, sep, print_size) + sep + "\t" * depth + "}"
else:
s += "{}".format(type(v))
if depth == 1:
s += sep + "}"
return s
def __repr__(self, print_size=False):
return self.__recurse_types(self.msgs, print_size=print_size)
@staticmethod
def __ros_msg_dict_to_list(ros_msg_dict, return_keys=False, parent=""):
"""Useful for getting all ROS messages as flat list/dict. Only messages with _connection_header are returned."""
if not isinstance(ros_msg_dict, dict):
return
for key, value in ros_msg_dict.items():
if isinstance(value, genpy.Message):
if return_keys:
yield (parent + "." + key), value
else:
yield value
for ret in TopicStore.__ros_msg_dict_to_list(value, return_keys, key if not parent else parent + "." + key):
yield ret
def to_ros_msg_list(self):
# TODO: Cache this operation until self.__data_tree updated
return list(TopicStore.__ros_msg_dict_to_list(self.msgs))
def flatten_ros_msg_dict(self):
# TODO: Cache this operation until self.__data_tree updated
return {k: v for k, v in TopicStore.__ros_msg_dict_to_list(self.msgs, return_keys=True)} | en | 0.605944 | # <NAME> (Tunstill) Copyright (c) 2020 # Email: <EMAIL> # Provides the container for easily handling topic_store data. Exposed by topic_store.__init__ Storage container for message data .dict or [] returns python objects, .msgs or () returns ROS messages # Ensure passed data tree does not contain ROS msgs # Cache for dict to ROS message parsing # Expose document ID and meta fields # TopicStore()[item] returns python type # TopicStore()(item) returns ros type Sum size of object & members. Utility function for printing document size, used in __repr__. Used by __repr__ to recurse dict and print types and sizes Useful for getting all ROS messages as flat list/dict. Only messages with _connection_header are returned. # TODO: Cache this operation until self.__data_tree updated # TODO: Cache this operation until self.__data_tree updated | 2.247059 | 2 |
gs/group/member/bounce/queries.py | groupserver/gs.group.member.bounce | 0 | 6616369 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2014 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals
import datetime
from pytz import UTC
import sqlalchemy as sa
from zope.sqlalchemy import mark_changed
from gs.database import getTable, getSession
from .audit import SUBSYSTEM, DISABLE
LAST_NUM_DAYS = 60
class BounceQuery(object):
def __init__(self):
self.bounceTable = getTable('bounce')
self.auditEventTable = getTable('audit_event')
def addBounce(self, userId, groupId, siteId, email):
bt = self.bounceTable
i = bt.insert()
now = datetime.datetime.now()
session = getSession()
session.execute(i, params={'date': now, 'user_id': userId,
'group_id': groupId,
'site_id': siteId, 'email': email})
mark_changed(session)
def previousBounceDates(self, email):
""" Checks for the number of bounces from this email address
in the past LAST_NUM_DAYS, or since the address was last
disabled.
"""
now = datetime.datetime.now(UTC)
dateToCheck = (now-datetime.timedelta(LAST_NUM_DAYS))
lastDisabledDate = self.lastDisabledDate(email)
if lastDisabledDate:
lastDisabledDate = lastDisabledDate.replace(tzinfo=UTC)
if lastDisabledDate > dateToCheck:
dateToCheck = lastDisabledDate
daysChecked = (now.date() - dateToCheck.date()).days
bt = self.bounceTable
s = bt.select(order_by=sa.desc(bt.c.date))
s.append_whereclause(bt.c.email == email)
s.append_whereclause(bt.c.date > dateToCheck)
session = getSession()
r = session.execute(s)
bounces = []
if r.rowcount:
for row in r:
bounceDate = row['date'].strftime("%Y%m%d")
if bounceDate not in bounces:
bounces.append(bounceDate)
return (bounces, daysChecked)
def lastDisabledDate(self, email):
""" Checks for the last time this address was disabled, if ever.
"""
at = self.auditEventTable
# SELECT event_date
# FROM audit_event
# WHERE subsystem = 'groupserver.BounceHandling' AND event_code = '2'
# AND instance_user_id = userId
# AND instance_datum = email
# ORDER BY event_date DESC;
# Opted to leave this out instance_user_id for reasons of efficiency.
s = sa.select([at.c.event_date], order_by=sa.desc(at.c.event_date))
s.append_whereclause(at.c.subsystem == SUBSYSTEM)
s.append_whereclause(at.c.event_code == DISABLE)
s.append_whereclause(at.c.instance_datum == email)
retval = None
session = getSession()
r = session.execute(s).fetchone()
if r:
retval = r['event_date']
return retval
class GroupBounceQuery(object):
def __init__(self):
self.bounceTable = getTable('bounce')
@staticmethod
def map_x(x, items):
retval = {i: x[i] for i in items}
return retval
def get_bounces_for_group(self, siteId, groupId, limit=100):
bt = self.bounceTable
s = bt.select(order_by=sa.desc(bt.c.date), limit=limit)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.group_id == groupId)
session = getSession()
r = session.execute(s)
m = ['site_id', 'group_id', 'user_id', 'email', 'date']
retval = [self.map_x(x, m) for x in r]
assert type(retval) == list
return retval
def n_bounces_in_last_week(self, siteId, groupId):
bt = self.bounceTable
cols = [sa.func.count()]
s = sa.select(cols)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.group_id == groupId)
now = datetime.datetime.now(UTC)
lastWeek = (now - datetime.timedelta(7))
s.append_whereclause(bt.c.date >= lastWeek)
session = getSession()
r = session.execute(s)
retval = r.scalar()
if retval is None:
retval = 0
assert retval >= 0
return retval
class ProfileBounceQuery(GroupBounceQuery):
'Like the GroupBounceQuery, but for people'
def get_bounces_for_person(self, siteId, userId, limit=100):
bt = self.bounceTable
s = bt.select(order_by=sa.desc(bt.c.date), limit=limit)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.user_id == userId)
session = getSession()
r = session.execute(s)
m = ['site_id', 'group_id', 'user_id', 'email', 'date']
retval = [self.map_x(x, m) for x in r]
assert type(retval) == list
return retval
def n_bounces_in_last_week(self, siteId, userId):
bt = self.bounceTable
cols = [sa.func.count()]
s = sa.select(cols)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.user_id == userId)
now = datetime.datetime.now(UTC)
lastWeek = (now - datetime.timedelta(7))
s.append_whereclause(bt.c.date >= lastWeek)
session = getSession()
r = session.execute(s)
retval = r.scalar()
if retval is None:
retval = 0
assert retval >= 0
return retval | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2014 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals
import datetime
from pytz import UTC
import sqlalchemy as sa
from zope.sqlalchemy import mark_changed
from gs.database import getTable, getSession
from .audit import SUBSYSTEM, DISABLE
LAST_NUM_DAYS = 60
class BounceQuery(object):
def __init__(self):
self.bounceTable = getTable('bounce')
self.auditEventTable = getTable('audit_event')
def addBounce(self, userId, groupId, siteId, email):
bt = self.bounceTable
i = bt.insert()
now = datetime.datetime.now()
session = getSession()
session.execute(i, params={'date': now, 'user_id': userId,
'group_id': groupId,
'site_id': siteId, 'email': email})
mark_changed(session)
def previousBounceDates(self, email):
""" Checks for the number of bounces from this email address
in the past LAST_NUM_DAYS, or since the address was last
disabled.
"""
now = datetime.datetime.now(UTC)
dateToCheck = (now-datetime.timedelta(LAST_NUM_DAYS))
lastDisabledDate = self.lastDisabledDate(email)
if lastDisabledDate:
lastDisabledDate = lastDisabledDate.replace(tzinfo=UTC)
if lastDisabledDate > dateToCheck:
dateToCheck = lastDisabledDate
daysChecked = (now.date() - dateToCheck.date()).days
bt = self.bounceTable
s = bt.select(order_by=sa.desc(bt.c.date))
s.append_whereclause(bt.c.email == email)
s.append_whereclause(bt.c.date > dateToCheck)
session = getSession()
r = session.execute(s)
bounces = []
if r.rowcount:
for row in r:
bounceDate = row['date'].strftime("%Y%m%d")
if bounceDate not in bounces:
bounces.append(bounceDate)
return (bounces, daysChecked)
def lastDisabledDate(self, email):
""" Checks for the last time this address was disabled, if ever.
"""
at = self.auditEventTable
# SELECT event_date
# FROM audit_event
# WHERE subsystem = 'groupserver.BounceHandling' AND event_code = '2'
# AND instance_user_id = userId
# AND instance_datum = email
# ORDER BY event_date DESC;
# Opted to leave this out instance_user_id for reasons of efficiency.
s = sa.select([at.c.event_date], order_by=sa.desc(at.c.event_date))
s.append_whereclause(at.c.subsystem == SUBSYSTEM)
s.append_whereclause(at.c.event_code == DISABLE)
s.append_whereclause(at.c.instance_datum == email)
retval = None
session = getSession()
r = session.execute(s).fetchone()
if r:
retval = r['event_date']
return retval
class GroupBounceQuery(object):
def __init__(self):
self.bounceTable = getTable('bounce')
@staticmethod
def map_x(x, items):
retval = {i: x[i] for i in items}
return retval
def get_bounces_for_group(self, siteId, groupId, limit=100):
bt = self.bounceTable
s = bt.select(order_by=sa.desc(bt.c.date), limit=limit)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.group_id == groupId)
session = getSession()
r = session.execute(s)
m = ['site_id', 'group_id', 'user_id', 'email', 'date']
retval = [self.map_x(x, m) for x in r]
assert type(retval) == list
return retval
def n_bounces_in_last_week(self, siteId, groupId):
bt = self.bounceTable
cols = [sa.func.count()]
s = sa.select(cols)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.group_id == groupId)
now = datetime.datetime.now(UTC)
lastWeek = (now - datetime.timedelta(7))
s.append_whereclause(bt.c.date >= lastWeek)
session = getSession()
r = session.execute(s)
retval = r.scalar()
if retval is None:
retval = 0
assert retval >= 0
return retval
class ProfileBounceQuery(GroupBounceQuery):
'Like the GroupBounceQuery, but for people'
def get_bounces_for_person(self, siteId, userId, limit=100):
bt = self.bounceTable
s = bt.select(order_by=sa.desc(bt.c.date), limit=limit)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.user_id == userId)
session = getSession()
r = session.execute(s)
m = ['site_id', 'group_id', 'user_id', 'email', 'date']
retval = [self.map_x(x, m) for x in r]
assert type(retval) == list
return retval
def n_bounces_in_last_week(self, siteId, userId):
bt = self.bounceTable
cols = [sa.func.count()]
s = sa.select(cols)
s.append_whereclause(bt.c.site_id == siteId)
s.append_whereclause(bt.c.user_id == userId)
now = datetime.datetime.now(UTC)
lastWeek = (now - datetime.timedelta(7))
s.append_whereclause(bt.c.date >= lastWeek)
session = getSession()
r = session.execute(s)
retval = r.scalar()
if retval is None:
retval = 0
assert retval >= 0
return retval | en | 0.61512 | # -*- coding: utf-8 -*- ############################################################################ # # Copyright © 2014 OnlineGroups.net and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################ Checks for the number of bounces from this email address in the past LAST_NUM_DAYS, or since the address was last disabled. Checks for the last time this address was disabled, if ever. # SELECT event_date # FROM audit_event # WHERE subsystem = 'groupserver.BounceHandling' AND event_code = '2' # AND instance_user_id = userId # AND instance_datum = email # ORDER BY event_date DESC; # Opted to leave this out instance_user_id for reasons of efficiency. | 2.095399 | 2 |
registration/migrations/0005_auto_20210709_0330.py | Eddyjim/registrations-backend | 0 | 6616370 | # Generated by Django 3.2.5 on 2021-07-09 03:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0004_auto_20210709_0303'),
]
operations = [
migrations.AddField(
model_name='person',
name='birth_date',
field=models.DateField(default=datetime.datetime(2021, 7, 9, 3, 30, 21, 783368)),
),
migrations.AlterField(
model_name='person',
name='first_surname',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='person',
name='second_surname',
field=models.CharField(max_length=255, null=True),
),
]
| # Generated by Django 3.2.5 on 2021-07-09 03:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0004_auto_20210709_0303'),
]
operations = [
migrations.AddField(
model_name='person',
name='birth_date',
field=models.DateField(default=datetime.datetime(2021, 7, 9, 3, 30, 21, 783368)),
),
migrations.AlterField(
model_name='person',
name='first_surname',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='person',
name='second_surname',
field=models.CharField(max_length=255, null=True),
),
]
| en | 0.84063 | # Generated by Django 3.2.5 on 2021-07-09 03:30 | 1.792442 | 2 |
prog1/implementacoes/uri/1021.py | gabrielmbs/Tamburetei | 209 | 6616371 | <filename>prog1/implementacoes/uri/1021.py
# -*- coding: utf-8 -*-
dinheiro = int(float(input()) * 100)
notas = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 25, 10, 5, 1]
resposta = [0,0,0,0,0,0,0,0,0,0,0,0]
for nota in range(len(notas)):
while dinheiro >= notas[nota]:
dinheiro -= notas[nota]
resposta[nota] += 1
print ("NOTAS:")
print ("%d nota(s) de R$ 100.00" % (resposta[0]))
print ("%d nota(s) de R$ 50.00" % (resposta[1]))
print ("%d nota(s) de R$ 20.00" % (resposta[2]))
print ("%d nota(s) de R$ 10.00" % (resposta[3]))
print ("%d nota(s) de R$ 5.00" % (resposta[4]))
print ("%d nota(s) de R$ 2.00" % (resposta[5]))
print ("MOEDAS:")
print ("%d moeda(s) de R$ 1.00" % (resposta[6]))
print ("%d moeda(s) de R$ 0.50" % (resposta[7]))
print ("%d moeda(s) de R$ 0.25" % (resposta[8]))
print ("%d moeda(s) de R$ 0.10" % (resposta[9]))
print ("%d moeda(s) de R$ 0.05" % (resposta[10]))
print ("%d moeda(s) de R$ 0.01" % (resposta[11]))
| <filename>prog1/implementacoes/uri/1021.py
# -*- coding: utf-8 -*-
dinheiro = int(float(input()) * 100)
notas = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 25, 10, 5, 1]
resposta = [0,0,0,0,0,0,0,0,0,0,0,0]
for nota in range(len(notas)):
while dinheiro >= notas[nota]:
dinheiro -= notas[nota]
resposta[nota] += 1
print ("NOTAS:")
print ("%d nota(s) de R$ 100.00" % (resposta[0]))
print ("%d nota(s) de R$ 50.00" % (resposta[1]))
print ("%d nota(s) de R$ 20.00" % (resposta[2]))
print ("%d nota(s) de R$ 10.00" % (resposta[3]))
print ("%d nota(s) de R$ 5.00" % (resposta[4]))
print ("%d nota(s) de R$ 2.00" % (resposta[5]))
print ("MOEDAS:")
print ("%d moeda(s) de R$ 1.00" % (resposta[6]))
print ("%d moeda(s) de R$ 0.50" % (resposta[7]))
print ("%d moeda(s) de R$ 0.25" % (resposta[8]))
print ("%d moeda(s) de R$ 0.10" % (resposta[9]))
print ("%d moeda(s) de R$ 0.05" % (resposta[10]))
print ("%d moeda(s) de R$ 0.01" % (resposta[11]))
| en | 0.769321 | # -*- coding: utf-8 -*- | 3.542115 | 4 |
Tests/Methods/Mesh/Mesh/test_convert_MeshVTK.py | tobsen2code/pyleecan | 95 | 6616372 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.MeshVTK import MeshVTK
from numpy.testing import assert_array_almost_equal
from Tests import TEST_DATA_DIR
import numpy as np
from os.path import join
@pytest.mark.MeshSol
def test_convert_MeshVTK():
"""test convert method of MeshVTK with some vtu file"""
mesh = MeshVTK(
path=join(TEST_DATA_DIR, "StructElmer"), name="case_t0001", format="vtu"
)
meshmat = mesh.convert(meshtype="MeshMat", scale=1)
nodes_pv = mesh.get_node()
nodes = meshmat.get_node()
assert_array_almost_equal(nodes_pv, nodes, decimal=6)
cells_pv, _, _ = mesh.get_cell()
cells, _, _ = meshmat.get_cell()
assert_array_almost_equal(cells_pv["quad9"], cells["quad9"], decimal=1)
if __name__ == "__main__":
test_convert_MeshVTK()
| # -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.MeshVTK import MeshVTK
from numpy.testing import assert_array_almost_equal
from Tests import TEST_DATA_DIR
import numpy as np
from os.path import join
@pytest.mark.MeshSol
def test_convert_MeshVTK():
"""test convert method of MeshVTK with some vtu file"""
mesh = MeshVTK(
path=join(TEST_DATA_DIR, "StructElmer"), name="case_t0001", format="vtu"
)
meshmat = mesh.convert(meshtype="MeshMat", scale=1)
nodes_pv = mesh.get_node()
nodes = meshmat.get_node()
assert_array_almost_equal(nodes_pv, nodes, decimal=6)
cells_pv, _, _ = mesh.get_cell()
cells, _, _ = meshmat.get_cell()
assert_array_almost_equal(cells_pv["quad9"], cells["quad9"], decimal=1)
if __name__ == "__main__":
test_convert_MeshVTK() | en | 0.835774 | # -*- coding: utf-8 -*- test convert method of MeshVTK with some vtu file | 2.366208 | 2 |
x7/lib/annotations.py | gribbg/x7-lib | 0 | 6616373 | """
Annotations to support maketests
"""
import typing
from typing import Callable, Dict, Set, Union
from x7.lib.inspect_more import item_name, item_lookup
__all__ = ['tests', 'tested_by']
ModuleType = type(typing)
all_tests = dict() # type: Dict[Callable,Set[str]]
StrOrCallable = Union[str, Callable, ModuleType]
def tests(arg0: StrOrCallable, *args: StrOrCallable) -> Callable:
"""Annotation to track what function(s) are tested by the wrapped function/class"""
def fixup(func_or_class: Callable):
def best_name(thing):
if isinstance(thing, str):
return item_name(item_lookup(thing))
else:
return item_name(thing)
names = [best_name(arg) for arg in [arg0]+list(args)]
all_tests.setdefault(func_or_class, set()).update(names)
return func_or_class
return fixup
def tested_by(func_or_class: Callable) -> Set[str]:
return all_tests.get(func_or_class, set())
| """
Annotations to support maketests
"""
import typing
from typing import Callable, Dict, Set, Union
from x7.lib.inspect_more import item_name, item_lookup
__all__ = ['tests', 'tested_by']
ModuleType = type(typing)
all_tests = dict() # type: Dict[Callable,Set[str]]
StrOrCallable = Union[str, Callable, ModuleType]
def tests(arg0: StrOrCallable, *args: StrOrCallable) -> Callable:
"""Annotation to track what function(s) are tested by the wrapped function/class"""
def fixup(func_or_class: Callable):
def best_name(thing):
if isinstance(thing, str):
return item_name(item_lookup(thing))
else:
return item_name(thing)
names = [best_name(arg) for arg in [arg0]+list(args)]
all_tests.setdefault(func_or_class, set()).update(names)
return func_or_class
return fixup
def tested_by(func_or_class: Callable) -> Set[str]:
return all_tests.get(func_or_class, set())
| en | 0.73681 | Annotations to support maketests # type: Dict[Callable,Set[str]] Annotation to track what function(s) are tested by the wrapped function/class | 2.712148 | 3 |
bank_class.py | alosoft/bank_app | 0 | 6616374 | """Contains all Class methods and functions and their implementation"""
import random
def account_number():
"""generates account number"""
num = '300126'
for _ in range(7):
num += str(random.randint(0, 9))
return int(num)
def make_dict(string, integer):
"""makes a dictionary of Account Names and Account Numbers"""
return {string: integer}
class Account:
"""
This Class contains Balance and Name with functions like Withdraw, Deposit and Account History
"""
def __init__(self, name, balance=0, total_deposit=0, total_withdrawal=0):
"""Constructor of __init__"""
self.name = name.title()
self.balance = balance
self.records = [f'Default Balance: \t${self.balance}']
self.total_deposit = total_deposit
self.total_withdrawal = total_withdrawal
self.account = account_number()
def __str__(self):
"""returns a string when called"""
return f'Account Name:\t\t{self.name} \nAccount Balance:\t${str(self.balance)} ' \
f'\nAccount History:\t{self.records} \nAccount Number:\t\t{ self.account}'
def __len__(self):
"""returns balance"""
return self.balance
def history(self):
"""returns Account Information"""
return self.records
def print_records(self, history):
"""Prints Account Records"""
line = ' \n'
print(line.join(history) + f' \n\nTotal Deposit: \t\t${str(self.total_deposit)} '
f'\nTotal Withdrawal: \t${str(self.total_withdrawal)} '
f'\nTotal Balance: \t\t${str(self.balance)} ')
def deposit(self, amount):
"""Deposit function"""
self.total_deposit += amount
self.balance += amount
self.records.append(f'Deposited: ${amount}')
return f'Deposited: ${amount}'
def withdraw(self, amount):
"""Withdrawal function"""
if self.balance >= amount:
self.total_withdrawal += amount
self.balance -= amount
self.records.append(f'Withdrew: ${amount}')
return f'Withdrew: ${amount}'
self.records.append(
f'Balance: ${str(self.balance)} '
f'is less than intended Withdrawal Amount: ${amount}')
return f'Invalid command \nBalance: ${str(self.balance)} ' \
f'is less than intended Withdrawal Amount: ${amount}'
| """Contains all Class methods and functions and their implementation"""
import random
def account_number():
"""generates account number"""
num = '300126'
for _ in range(7):
num += str(random.randint(0, 9))
return int(num)
def make_dict(string, integer):
"""makes a dictionary of Account Names and Account Numbers"""
return {string: integer}
class Account:
"""
This Class contains Balance and Name with functions like Withdraw, Deposit and Account History
"""
def __init__(self, name, balance=0, total_deposit=0, total_withdrawal=0):
"""Constructor of __init__"""
self.name = name.title()
self.balance = balance
self.records = [f'Default Balance: \t${self.balance}']
self.total_deposit = total_deposit
self.total_withdrawal = total_withdrawal
self.account = account_number()
def __str__(self):
"""returns a string when called"""
return f'Account Name:\t\t{self.name} \nAccount Balance:\t${str(self.balance)} ' \
f'\nAccount History:\t{self.records} \nAccount Number:\t\t{ self.account}'
def __len__(self):
"""returns balance"""
return self.balance
def history(self):
"""returns Account Information"""
return self.records
def print_records(self, history):
"""Prints Account Records"""
line = ' \n'
print(line.join(history) + f' \n\nTotal Deposit: \t\t${str(self.total_deposit)} '
f'\nTotal Withdrawal: \t${str(self.total_withdrawal)} '
f'\nTotal Balance: \t\t${str(self.balance)} ')
def deposit(self, amount):
"""Deposit function"""
self.total_deposit += amount
self.balance += amount
self.records.append(f'Deposited: ${amount}')
return f'Deposited: ${amount}'
def withdraw(self, amount):
"""Withdrawal function"""
if self.balance >= amount:
self.total_withdrawal += amount
self.balance -= amount
self.records.append(f'Withdrew: ${amount}')
return f'Withdrew: ${amount}'
self.records.append(
f'Balance: ${str(self.balance)} '
f'is less than intended Withdrawal Amount: ${amount}')
return f'Invalid command \nBalance: ${str(self.balance)} ' \
f'is less than intended Withdrawal Amount: ${amount}'
| en | 0.772185 | Contains all Class methods and functions and their implementation generates account number makes a dictionary of Account Names and Account Numbers This Class contains Balance and Name with functions like Withdraw, Deposit and Account History Constructor of __init__ returns a string when called returns balance returns Account Information Prints Account Records Deposit function Withdrawal function | 4.322957 | 4 |
model/architectures/optimizers.py | ratschlab/ncl | 7 | 6616375 | import gin
import tensorflow as tf
Adam_keras = gin.external_configurable(tf.keras.optimizers.Adam, name='Adam')
SGD_keras = gin.external_configurable(tf.keras.optimizers.SGD, name='SGD')
@gin.configurable('LinearWarmupCosineDecay')
class LinearWarmupCosineDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
"""A LearningRateSchedule that uses a Cosine decay schedule."""
def __init__(self,
initial_learning_rate,
warmup_steps,
warm_learning_rate,
decay_steps,
alpha=None,
name=None):
super(LinearWarmupCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.warm_learning_rate = warm_learning_rate
self.decay_steps = decay_steps
self.rate = (warm_learning_rate - initial_learning_rate) / warmup_steps
if not alpha:
self.alpha = initial_learning_rate / warm_learning_rate
else:
self.alpha = alpha
self.name = name
self.cosine_decay = tf.keras.experimental.CosineDecay(self.warm_learning_rate, self.decay_steps, self.alpha)
@tf.function(experimental_relax_shapes=True)
def __call__(self, step):
with tf.name_scope(self.name or "LinearWarmupCosineDecay") as name:
if step < self.warmup_steps:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
rate = tf.cast(self.rate, dtype)
global_step_recomp = tf.cast(step, dtype)
p = rate * global_step_recomp + initial_learning_rate
return p
else:
return self.cosine_decay(step - self.warmup_steps)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"warmup_steps": self.warmup_steps,
"rate": self.rate,
"name": self.name
}
| import gin
import tensorflow as tf
Adam_keras = gin.external_configurable(tf.keras.optimizers.Adam, name='Adam')
SGD_keras = gin.external_configurable(tf.keras.optimizers.SGD, name='SGD')
@gin.configurable('LinearWarmupCosineDecay')
class LinearWarmupCosineDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
"""A LearningRateSchedule that uses a Cosine decay schedule."""
def __init__(self,
initial_learning_rate,
warmup_steps,
warm_learning_rate,
decay_steps,
alpha=None,
name=None):
super(LinearWarmupCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.warm_learning_rate = warm_learning_rate
self.decay_steps = decay_steps
self.rate = (warm_learning_rate - initial_learning_rate) / warmup_steps
if not alpha:
self.alpha = initial_learning_rate / warm_learning_rate
else:
self.alpha = alpha
self.name = name
self.cosine_decay = tf.keras.experimental.CosineDecay(self.warm_learning_rate, self.decay_steps, self.alpha)
@tf.function(experimental_relax_shapes=True)
def __call__(self, step):
with tf.name_scope(self.name or "LinearWarmupCosineDecay") as name:
if step < self.warmup_steps:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
rate = tf.cast(self.rate, dtype)
global_step_recomp = tf.cast(step, dtype)
p = rate * global_step_recomp + initial_learning_rate
return p
else:
return self.cosine_decay(step - self.warmup_steps)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"warmup_steps": self.warmup_steps,
"rate": self.rate,
"name": self.name
}
| en | 0.603092 | A LearningRateSchedule that uses a Cosine decay schedule. | 2.581072 | 3 |
raiden/tests/unit/test_transfer.py | destenson/raiden-network--raiden | 0 | 6616376 | <reponame>destenson/raiden-network--raiden
# -*- coding: utf-8 -*-
import gevent
import pytest
from coincurve import PrivateKey
from raiden.messages import (
Ack,
decode,
DirectTransfer,
Lock,
MediatedTransfer,
Ping,
RefundTransfer,
RevealSecret,
Secret,
SecretRequest,
)
from raiden.tests.utils.transport import UnreliableTransport
from raiden.tests.utils.messages import (
setup_messages_cb,
make_refund_transfer,
)
from raiden.tests.utils.transport import (
MessageLoggerTransport,
)
from raiden.tests.utils.transfer import (
assert_synched_channels,
channel,
direct_transfer,
transfer,
)
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.factories import (
UNIT_SECRET,
UNIT_HASHLOCK,
make_address,
make_privkey_address,
)
from raiden.utils import (
sha3,
privatekey_to_address,
)
from raiden.raiden_service import create_default_identifier
from raiden.tests.utils.blockchain import wait_until_block
from raiden.network.protocol import (
NODE_NETWORK_REACHABLE,
NODE_NETWORK_UNKNOWN,
NODE_NETWORK_UNREACHABLE,
)
# pylint: disable=too-many-locals,too-many-statements,line-too-long
HASH2 = sha3(b'terribleweathermuchstayinside___')
def get_messages_by_type(messages, type_):
return [
m
for m in messages
if isinstance(m, type_)
]
def assert_ack_for(receiver, message, message_list):
direct_hash = sha3(message.encode() + receiver.raiden.address)
assert any(
ack.echo == direct_hash
for ack in message_list
if isinstance(ack, Ack)
)
def sign_and_send(message, key, address, app):
message.sign(key, address)
message_data = bytes(message.packed().data)
app.raiden.protocol.receive(message_data)
# Give it some time to see if the unknown sender causes an error in the logic
gevent.sleep(3)
class MediatedTransferTestHelper:
def __init__(self, raiden_network, graph):
self.raiden_network = raiden_network
self.graph = graph
self.token_address = graph.token_address
self.nodes = {
app.raiden.address: app
for app in self.raiden_network
}
def get_channel(self, from_, to_):
raiden = self.nodes[from_].raiden
graph = raiden.token_to_channelgraph[self.token_address]
return graph.partneraddress_to_channel[to_]
def get_paths_of_length(self, initiator_address, num_hops):
"""
Search for paths of length=num_of_hops starting from initiator_address
"""
paths_length = self.graph.get_paths_of_length(
initiator_address,
num_hops,
)
assert paths_length, 'path must not be empty'
for path in paths_length:
assert len(path) == num_hops + 1
assert path[0] == initiator_address
return paths_length[0]
def assert_path_in_shortest_paths(self, path, initiator_address, num_hops):
_, _, charlie_address = path
shortest_paths = list(self.graph.get_shortest_paths(
initiator_address,
charlie_address,
))
assert path in shortest_paths
assert min(len(path) for path in shortest_paths) == num_hops + 1
def get_app_from_address(self, address):
for app in self.raiden_network:
if address == app.raiden.address:
return app
return None
@pytest.mark.parametrize('number_of_nodes', [2])
def test_direct_transfer(raiden_network):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
messages = setup_messages_cb()
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
target = app1.raiden.address
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(5)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
decoded_messages = [decode(m) for m in messages]
direct_messages = get_messages_by_type(decoded_messages, DirectTransfer)
assert len(direct_messages) == 1
assert direct_messages[0].transferred_amount == amount
assert_ack_for(
app1,
direct_messages[0],
decoded_messages,
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_transfer_channels(raiden_network, token_addresses):
""" When the node has no channels it should fail without raising exceptions. """
token_address = token_addresses[0]
app0, app1 = raiden_network
amount = 10
async_result = app0.raiden.mediated_transfer_async(
token_address,
amount,
app1.raiden.address,
identifier=1,
)
assert async_result.wait() is False
@pytest.mark.parametrize('number_of_nodes', [4])
@pytest.mark.parametrize('channels_per_node', [1])
def test_transfer_noroutes(raiden_network, token_addresses):
""" When there are no routes it should fail without raising exceptions. """
# Topology:
# App0 <-> App1 App2 <-> App3
#
app0, _, app2, app3 = raiden_network
token_address = token_addresses[0]
amount = 10
async_result = app0.raiden.mediated_transfer_async(
token_address,
amount,
app2.raiden.address,
identifier=1,
)
assert async_result.wait() is False
async_result = app0.raiden.mediated_transfer_async(
token_address,
amount,
app3.raiden.address,
identifier=1,
)
assert async_result.wait() is False
@pytest.mark.parametrize('channels_per_node', [CHAIN])
@pytest.mark.parametrize('number_of_nodes', [4])
def test_mediated_transfer(raiden_network):
alice_app = raiden_network[0]
graph = list(alice_app.raiden.token_to_channelgraph.values())[0]
token_address = graph.token_address
mt_helper = MediatedTransferTestHelper(raiden_network, graph)
initiator_address = alice_app.raiden.address
path = mt_helper.get_paths_of_length(initiator_address, 2)
mt_helper.assert_path_in_shortest_paths(path, initiator_address, 2)
alice_address, bob_address, charlie_address = path
# channels (alice <-> bob <-> charlie)
channel_ab = mt_helper.get_channel(alice_address, bob_address)
channel_ba = mt_helper.get_channel(bob_address, alice_address)
channel_bc = mt_helper.get_channel(bob_address, charlie_address)
channel_cb = mt_helper.get_channel(charlie_address, bob_address)
initial_balance_ab = channel_ab.balance
initial_balance_ba = channel_ba.balance
initial_balance_bc = channel_bc.balance
initial_balance_cb = channel_cb.balance
amount = 10
result = alice_app.raiden.mediated_transfer_async(
token_address,
amount,
charlie_address,
identifier=1,
)
# The assert is for an in-flight transfer
assert not result.ready()
assert channel_ab.locked == amount
# Cannot assert the intermediary state of the channels since the code is
# concurrently executed.
# assert channel_ba.outstanding == amount
# assert channel_bc.locked == amount
# assert channel_cb.outstanding == amount
assert result.wait(timeout=1)
gevent.sleep(.1) # wait for the other nodes to sync
assert initial_balance_ab - amount == channel_ab.balance
assert initial_balance_ba + amount == channel_ba.balance
assert initial_balance_bc - amount == channel_bc.balance
assert initial_balance_cb + amount == channel_cb.balance
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('number_of_nodes', [2])
def test_direct_transfer_exceeding_distributable(raiden_network, token_addresses, deposit):
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
result = alice_app.raiden.direct_transfer_async(
token_address,
deposit * 2,
bob_app.raiden.address,
identifier=1,
)
assert not result.wait(timeout=10)
@pytest.mark.parametrize('channels_per_node', [CHAIN])
@pytest.mark.parametrize('number_of_nodes', [3])
def test_mediated_transfer_with_entire_deposit(raiden_network, token_addresses, deposit):
alice_app, bob_app, charlie_app = raiden_network
token_address = token_addresses[0]
result = alice_app.raiden.mediated_transfer_async(
token_address,
deposit,
charlie_app.raiden.address,
identifier=1,
)
channel_ab = channel(alice_app, bob_app, token_address)
assert channel_ab.locked == deposit
assert channel_ab.outstanding == 0
assert channel_ab.distributable == 0
assert result.wait(timeout=10)
gevent.sleep(.1) # wait for charlie to sync
result = charlie_app.raiden.mediated_transfer_async(
token_address,
deposit * 2,
alice_app.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
@pytest.mark.parametrize('privatekey_seed', ['cancel_transfer:{}'])
@pytest.mark.parametrize('number_of_nodes', [3])
@pytest.mark.parametrize('channels_per_node', [CHAIN])
@pytest.mark.parametrize('transport_class', [MessageLoggerTransport])
def test_cancel_transfer(raiden_chain, token_addresses, deposit):
""" A failed transfer must send a refund back.
TODO:
- Unlock the token on refund #1091
- Clear the merkletree and update the locked amount #193
- Remove the refund message type #490
"""
# Topology:
#
# 0 -> 1 -> 2
#
app0, app1, app2 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking
token = token_addresses[0]
assert_synched_channels(
channel(app0, app1, token), deposit, [],
channel(app1, app0, token), deposit, []
)
assert_synched_channels(
channel(app1, app2, token), deposit, [],
channel(app2, app1, token), deposit, []
)
# make a transfer to test the path app0 -> app1 -> app2
identifier_path = 1
amount_path = 1
transfer(app0, app2, token, amount_path, identifier_path)
# drain the channel app1 -> app2
identifier_drain = 2
amount_drain = int(deposit * 0.8)
direct_transfer(app1, app2, token, amount_drain, identifier_drain)
# wait for the nodes to sync
gevent.sleep(0.2)
assert_synched_channels(
channel(app0, app1, token), deposit - amount_path, [],
channel(app1, app0, token), deposit + amount_path, []
)
assert_synched_channels(
channel(app1, app2, token), deposit - amount_path - amount_drain, [],
channel(app2, app1, token), deposit + amount_path + amount_drain, []
)
# app0 -> app1 -> app2 is the only available path but the channel app1 ->
# app2 doesnt have resources and needs to send a RefundTransfer down the
# path
identifier_refund = 3
amount_refund = 50
async_result = app0.raiden.mediated_transfer_async(
token,
amount_refund,
app2.raiden.address,
identifier_refund,
)
assert async_result.wait() is False, 'there is no path with capacity, the transfer must fail'
gevent.sleep(0.2)
# A lock structure with the correct amount
app0_messages = app0.raiden.protocol.transport.get_sent_messages(app0.raiden.address)
mediated_message = list(
message
for message in app0_messages
if isinstance(message, MediatedTransfer) and message.target == app2.raiden.address
)[-1]
assert mediated_message
app1_messages = app1.raiden.protocol.transport.get_sent_messages(app1.raiden.address)
refund_message = next(
message
for message in app1_messages
if isinstance(message, RefundTransfer) and message.recipient == app0.raiden.address
)
assert refund_message
assert mediated_message.lock.amount == refund_message.lock.amount
assert mediated_message.lock.hashlock == refund_message.lock.hashlock
assert mediated_message.lock.expiration > refund_message.lock.expiration
# Both channels have the amount locked because of the refund message
assert_synched_channels(
channel(app0, app1, token), deposit - amount_path, [refund_message.lock],
channel(app1, app0, token), deposit + amount_path, [mediated_message.lock],
)
assert_synched_channels(
channel(app1, app2, token), deposit - amount_path - amount_drain, [],
channel(app2, app1, token), deposit + amount_path + amount_drain, []
)
@pytest.mark.parametrize('number_of_nodes', [2])
def test_healthcheck_with_normal_peer(raiden_network, token_addresses):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
messages = setup_messages_cb()
token_address = token_addresses[0]
address0 = app0.raiden.address
address1 = app1.raiden.address
graph0 = app0.raiden.token_to_channelgraph[token_address]
graph1 = app1.raiden.token_to_channelgraph[token_address]
# check the nodes have a channel
assert graph0.token_address == graph1.token_address
assert address1 in graph0.partneraddress_to_channel
assert address0 in graph1.partneraddress_to_channel
# check both have started the healthcheck
assert address0 in app1.raiden.protocol.addresses_events
assert address1 in app0.raiden.protocol.addresses_events
# wait for the healthcheck task to send a ping
gevent.sleep(app0.raiden.protocol.nat_keepalive_timeout)
gevent.sleep(app1.raiden.protocol.nat_keepalive_timeout)
decoded_messages = [decode(m) for m in set(messages)]
ping_messages = get_messages_by_type(decoded_messages, Ping)
assert ping_messages
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_healthcheck_with_unconnected_node(raiden_network, nat_keepalive_timeout):
""" Nodes start at the unknown state. """
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
address0 = app0.raiden.address
address1 = app1.raiden.address
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_UNKNOWN
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_UNKNOWN
app0.raiden.start_health_check_for(address1)
gevent.sleep(nat_keepalive_timeout)
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_REACHABLE
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_UNKNOWN
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('transport_class', [UnreliableTransport])
def test_healthcheck_with_bad_peer(raiden_network, nat_keepalive_retries, nat_keepalive_timeout):
""" If the Ping messages are not answered, the node must be set to
unreachable.
"""
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
address0 = app0.raiden.address
address1 = app1.raiden.address
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_REACHABLE
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_REACHABLE
# Drop all Ping and Ack messages
app0.raiden.protocol.transport.droprate = 1
app1.raiden.protocol.transport.droprate = 1
gevent.sleep(
(nat_keepalive_retries + 2) * nat_keepalive_timeout
)
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_UNREACHABLE
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_UNREACHABLE
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_receive_directtransfer_unknown(raiden_network):
app0 = raiden_network[0] # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
other_key, other_address = make_privkey_address()
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=other_address,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
)
sign_and_send(direct_transfer_message, other_key, other_address, app0)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_receive_mediatedtransfer_unknown(raiden_network):
app0 = raiden_network[0] # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
other_key, other_address = make_privkey_address()
amount = 10
mediated_transfer = MediatedTransfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=other_address,
transferred_amount=amount,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
lock=Lock(amount, 1, UNIT_HASHLOCK),
target=make_address(),
initiator=other_address,
fee=0
)
sign_and_send(mediated_transfer, other_key, other_address, app0)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_receive_hashlocktransfer_unknown(raiden_network):
app0 = raiden_network[0] # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
other_key = PrivateKey(HASH2)
other_address = privatekey_to_address(HASH2)
amount = 10
refund_transfer = make_refund_transfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=other_address,
transferred_amount=amount,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
amount=amount,
hashlock=UNIT_HASHLOCK,
)
sign_and_send(refund_transfer, other_key, other_address, app0)
secret = Secret(
identifier=1,
nonce=1,
channel=make_address(),
transferred_amount=amount,
locksroot=UNIT_HASHLOCK,
secret=UNIT_SECRET,
)
sign_and_send(secret, other_key, other_address, app0)
secret_request = SecretRequest(1, UNIT_HASHLOCK, 1)
sign_and_send(secret_request, other_key, other_address, app0)
reveal_secret = RevealSecret(UNIT_SECRET)
sign_and_send(reveal_secret, other_key, other_address, app0)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_outoforder(raiden_network, private_keys):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
target = app1.raiden.address
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
# and now send one more direct transfer with the same nonce, simulating
# an out-of-order/resent message that arrives late
identifier = create_default_identifier()
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=1,
token=graph0.token_address,
channel=channel0.channel_address,
transferred_amount=10,
recipient=app1.raiden.address,
locksroot=UNIT_HASHLOCK,
)
app0_key = PrivateKey(private_keys[0])
sign_and_send(direct_transfer_message, app0_key, app0.raiden.address, app1)
@pytest.mark.parametrize('number_of_nodes', [3])
@pytest.mark.parametrize('channels_per_node', [CHAIN])
def test_receive_mediatedtransfer_outoforder(raiden_network, private_keys):
alice_app = raiden_network[0]
bob_app = raiden_network[1]
charlie_app = raiden_network[2]
graph = list(alice_app.raiden.token_to_channelgraph.values())[0]
token_address = graph.token_address
channel0 = channel(
alice_app,
bob_app,
token_address,
)
amount = 10
result = alice_app.raiden.mediated_transfer_async(
token_address,
amount,
charlie_app.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
lock = Lock(amount, 1, UNIT_HASHLOCK)
identifier = create_default_identifier()
mediated_transfer = MediatedTransfer(
identifier=identifier,
nonce=1,
token=token_address,
channel=channel0.channel_address,
transferred_amount=amount,
recipient=bob_app.raiden.address,
locksroot=UNIT_HASHLOCK,
lock=lock,
target=charlie_app.raiden.address,
initiator=alice_app.raiden.address,
fee=0
)
alice_key = PrivateKey(private_keys[0])
# send the invalid mediated transfer from alice to bob with the same nonce
sign_and_send(
mediated_transfer,
alice_key,
alice_app.raiden.address,
bob_app,
)
@pytest.mark.parametrize('number_of_nodes', [5])
@pytest.mark.parametrize('channels_per_node', [2])
def test_receive_mediatedtransfer_invalid_address(raiden_network, private_keys):
alice_app = raiden_network[0]
bob_app = raiden_network[1]
graph = list(alice_app.raiden.token_to_channelgraph.values())[0]
token_address = graph.token_address
channel0 = channel(alice_app, bob_app, token_address)
mt_helper = MediatedTransferTestHelper(raiden_network, graph)
initiator_address = alice_app.raiden.address
path = mt_helper.get_paths_of_length(initiator_address, 2)
alice_address, bob_address, charlie_address = path
amount = 10
result = alice_app.raiden.mediated_transfer_async(
token_address,
amount,
charlie_address,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1.)
# and now send one more mediated transfer with the same nonce, simulating
# an out-of-order/resent message that arrives late
lock = Lock(amount, 1, UNIT_HASHLOCK)
identifier = create_default_identifier()
mediated_transfer = MediatedTransfer(
identifier=identifier,
nonce=1,
token=token_address,
channel=channel0.channel_address,
transferred_amount=amount,
recipient=bob_address,
locksroot=UNIT_HASHLOCK,
lock=lock,
target=charlie_address,
initiator=initiator_address,
fee=0
)
alice_key = PrivateKey(private_keys[0])
target_app = None
for app in raiden_network:
if app.raiden.address not in path:
target_app = app
break
sign_and_send(mediated_transfer, alice_key, alice_address, target_app)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_wrongtoken(raiden_network, private_keys):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target=app1.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
# and now send one more direct transfer with a mistaken token address
identifier = create_default_identifier()
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=2,
token=make_address(),
channel=channel0.channel_address,
transferred_amount=10,
recipient=app1.raiden.address,
locksroot=UNIT_HASHLOCK,
)
app0_key = PrivateKey(private_keys[0])
sign_and_send(direct_transfer_message, app0_key, app0.raiden.address, app1)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidlocksroot(raiden_network, private_keys):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target=app1.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
# and now send one more direct transfer with the locksroot not set correctly
identifier = create_default_identifier()
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=2,
token=graph0.token_address,
channel=channel0.channel_address,
transferred_amount=10,
recipient=app1.raiden.address,
locksroot=UNIT_HASHLOCK,
)
app0_key = PrivateKey(private_keys[0])
sign_and_send(direct_transfer_message, app0_key, app0.raiden.address, app1)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('settle_timeout', [30])
def test_transfer_from_outdated(raiden_network, settle_timeout):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target=app1.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
channel1.external_state.close(channel1.our_state.balance_proof)
wait_until_block(
app1.raiden.chain,
app1.raiden.chain.block_number() + 1
)
assert channel0.external_state.close_event.wait(timeout=25)
assert channel1.external_state.close_event.wait(timeout=25)
assert channel0.external_state.closed_block != 0
assert channel1.external_state.closed_block != 0
wait_until_block(
app0.raiden.chain,
app0.raiden.chain.block_number() + settle_timeout,
)
assert channel0.external_state.settle_event.wait(timeout=25)
assert channel1.external_state.settle_event.wait(timeout=25)
assert channel0.external_state.settled_block != 0
assert channel1.external_state.settled_block != 0
# and now receive one more transfer from the closed channel
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=channel0.channel_address,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
)
sign_and_send(
direct_transfer_message,
app1.raiden.private_key,
app1.raiden.address,
app1,
)
| # -*- coding: utf-8 -*-
import gevent
import pytest
from coincurve import PrivateKey
from raiden.messages import (
Ack,
decode,
DirectTransfer,
Lock,
MediatedTransfer,
Ping,
RefundTransfer,
RevealSecret,
Secret,
SecretRequest,
)
from raiden.tests.utils.transport import UnreliableTransport
from raiden.tests.utils.messages import (
setup_messages_cb,
make_refund_transfer,
)
from raiden.tests.utils.transport import (
MessageLoggerTransport,
)
from raiden.tests.utils.transfer import (
assert_synched_channels,
channel,
direct_transfer,
transfer,
)
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.factories import (
UNIT_SECRET,
UNIT_HASHLOCK,
make_address,
make_privkey_address,
)
from raiden.utils import (
sha3,
privatekey_to_address,
)
from raiden.raiden_service import create_default_identifier
from raiden.tests.utils.blockchain import wait_until_block
from raiden.network.protocol import (
NODE_NETWORK_REACHABLE,
NODE_NETWORK_UNKNOWN,
NODE_NETWORK_UNREACHABLE,
)
# pylint: disable=too-many-locals,too-many-statements,line-too-long
HASH2 = sha3(b'terribleweathermuchstayinside___')
def get_messages_by_type(messages, type_):
return [
m
for m in messages
if isinstance(m, type_)
]
def assert_ack_for(receiver, message, message_list):
direct_hash = sha3(message.encode() + receiver.raiden.address)
assert any(
ack.echo == direct_hash
for ack in message_list
if isinstance(ack, Ack)
)
def sign_and_send(message, key, address, app):
message.sign(key, address)
message_data = bytes(message.packed().data)
app.raiden.protocol.receive(message_data)
# Give it some time to see if the unknown sender causes an error in the logic
gevent.sleep(3)
class MediatedTransferTestHelper:
def __init__(self, raiden_network, graph):
self.raiden_network = raiden_network
self.graph = graph
self.token_address = graph.token_address
self.nodes = {
app.raiden.address: app
for app in self.raiden_network
}
def get_channel(self, from_, to_):
raiden = self.nodes[from_].raiden
graph = raiden.token_to_channelgraph[self.token_address]
return graph.partneraddress_to_channel[to_]
def get_paths_of_length(self, initiator_address, num_hops):
"""
Search for paths of length=num_of_hops starting from initiator_address
"""
paths_length = self.graph.get_paths_of_length(
initiator_address,
num_hops,
)
assert paths_length, 'path must not be empty'
for path in paths_length:
assert len(path) == num_hops + 1
assert path[0] == initiator_address
return paths_length[0]
def assert_path_in_shortest_paths(self, path, initiator_address, num_hops):
_, _, charlie_address = path
shortest_paths = list(self.graph.get_shortest_paths(
initiator_address,
charlie_address,
))
assert path in shortest_paths
assert min(len(path) for path in shortest_paths) == num_hops + 1
def get_app_from_address(self, address):
for app in self.raiden_network:
if address == app.raiden.address:
return app
return None
@pytest.mark.parametrize('number_of_nodes', [2])
def test_direct_transfer(raiden_network):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
messages = setup_messages_cb()
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
target = app1.raiden.address
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(5)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
decoded_messages = [decode(m) for m in messages]
direct_messages = get_messages_by_type(decoded_messages, DirectTransfer)
assert len(direct_messages) == 1
assert direct_messages[0].transferred_amount == amount
assert_ack_for(
app1,
direct_messages[0],
decoded_messages,
)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_transfer_channels(raiden_network, token_addresses):
""" When the node has no channels it should fail without raising exceptions. """
token_address = token_addresses[0]
app0, app1 = raiden_network
amount = 10
async_result = app0.raiden.mediated_transfer_async(
token_address,
amount,
app1.raiden.address,
identifier=1,
)
assert async_result.wait() is False
@pytest.mark.parametrize('number_of_nodes', [4])
@pytest.mark.parametrize('channels_per_node', [1])
def test_transfer_noroutes(raiden_network, token_addresses):
""" When there are no routes it should fail without raising exceptions. """
# Topology:
# App0 <-> App1 App2 <-> App3
#
app0, _, app2, app3 = raiden_network
token_address = token_addresses[0]
amount = 10
async_result = app0.raiden.mediated_transfer_async(
token_address,
amount,
app2.raiden.address,
identifier=1,
)
assert async_result.wait() is False
async_result = app0.raiden.mediated_transfer_async(
token_address,
amount,
app3.raiden.address,
identifier=1,
)
assert async_result.wait() is False
@pytest.mark.parametrize('channels_per_node', [CHAIN])
@pytest.mark.parametrize('number_of_nodes', [4])
def test_mediated_transfer(raiden_network):
alice_app = raiden_network[0]
graph = list(alice_app.raiden.token_to_channelgraph.values())[0]
token_address = graph.token_address
mt_helper = MediatedTransferTestHelper(raiden_network, graph)
initiator_address = alice_app.raiden.address
path = mt_helper.get_paths_of_length(initiator_address, 2)
mt_helper.assert_path_in_shortest_paths(path, initiator_address, 2)
alice_address, bob_address, charlie_address = path
# channels (alice <-> bob <-> charlie)
channel_ab = mt_helper.get_channel(alice_address, bob_address)
channel_ba = mt_helper.get_channel(bob_address, alice_address)
channel_bc = mt_helper.get_channel(bob_address, charlie_address)
channel_cb = mt_helper.get_channel(charlie_address, bob_address)
initial_balance_ab = channel_ab.balance
initial_balance_ba = channel_ba.balance
initial_balance_bc = channel_bc.balance
initial_balance_cb = channel_cb.balance
amount = 10
result = alice_app.raiden.mediated_transfer_async(
token_address,
amount,
charlie_address,
identifier=1,
)
# The assert is for an in-flight transfer
assert not result.ready()
assert channel_ab.locked == amount
# Cannot assert the intermediary state of the channels since the code is
# concurrently executed.
# assert channel_ba.outstanding == amount
# assert channel_bc.locked == amount
# assert channel_cb.outstanding == amount
assert result.wait(timeout=1)
gevent.sleep(.1) # wait for the other nodes to sync
assert initial_balance_ab - amount == channel_ab.balance
assert initial_balance_ba + amount == channel_ba.balance
assert initial_balance_bc - amount == channel_bc.balance
assert initial_balance_cb + amount == channel_cb.balance
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('number_of_nodes', [2])
def test_direct_transfer_exceeding_distributable(raiden_network, token_addresses, deposit):
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
result = alice_app.raiden.direct_transfer_async(
token_address,
deposit * 2,
bob_app.raiden.address,
identifier=1,
)
assert not result.wait(timeout=10)
@pytest.mark.parametrize('channels_per_node', [CHAIN])
@pytest.mark.parametrize('number_of_nodes', [3])
def test_mediated_transfer_with_entire_deposit(raiden_network, token_addresses, deposit):
alice_app, bob_app, charlie_app = raiden_network
token_address = token_addresses[0]
result = alice_app.raiden.mediated_transfer_async(
token_address,
deposit,
charlie_app.raiden.address,
identifier=1,
)
channel_ab = channel(alice_app, bob_app, token_address)
assert channel_ab.locked == deposit
assert channel_ab.outstanding == 0
assert channel_ab.distributable == 0
assert result.wait(timeout=10)
gevent.sleep(.1) # wait for charlie to sync
result = charlie_app.raiden.mediated_transfer_async(
token_address,
deposit * 2,
alice_app.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
@pytest.mark.parametrize('privatekey_seed', ['cancel_transfer:{}'])
@pytest.mark.parametrize('number_of_nodes', [3])
@pytest.mark.parametrize('channels_per_node', [CHAIN])
@pytest.mark.parametrize('transport_class', [MessageLoggerTransport])
def test_cancel_transfer(raiden_chain, token_addresses, deposit):
""" A failed transfer must send a refund back.
TODO:
- Unlock the token on refund #1091
- Clear the merkletree and update the locked amount #193
- Remove the refund message type #490
"""
# Topology:
#
# 0 -> 1 -> 2
#
app0, app1, app2 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking
token = token_addresses[0]
assert_synched_channels(
channel(app0, app1, token), deposit, [],
channel(app1, app0, token), deposit, []
)
assert_synched_channels(
channel(app1, app2, token), deposit, [],
channel(app2, app1, token), deposit, []
)
# make a transfer to test the path app0 -> app1 -> app2
identifier_path = 1
amount_path = 1
transfer(app0, app2, token, amount_path, identifier_path)
# drain the channel app1 -> app2
identifier_drain = 2
amount_drain = int(deposit * 0.8)
direct_transfer(app1, app2, token, amount_drain, identifier_drain)
# wait for the nodes to sync
gevent.sleep(0.2)
assert_synched_channels(
channel(app0, app1, token), deposit - amount_path, [],
channel(app1, app0, token), deposit + amount_path, []
)
assert_synched_channels(
channel(app1, app2, token), deposit - amount_path - amount_drain, [],
channel(app2, app1, token), deposit + amount_path + amount_drain, []
)
# app0 -> app1 -> app2 is the only available path but the channel app1 ->
# app2 doesnt have resources and needs to send a RefundTransfer down the
# path
identifier_refund = 3
amount_refund = 50
async_result = app0.raiden.mediated_transfer_async(
token,
amount_refund,
app2.raiden.address,
identifier_refund,
)
assert async_result.wait() is False, 'there is no path with capacity, the transfer must fail'
gevent.sleep(0.2)
# A lock structure with the correct amount
app0_messages = app0.raiden.protocol.transport.get_sent_messages(app0.raiden.address)
mediated_message = list(
message
for message in app0_messages
if isinstance(message, MediatedTransfer) and message.target == app2.raiden.address
)[-1]
assert mediated_message
app1_messages = app1.raiden.protocol.transport.get_sent_messages(app1.raiden.address)
refund_message = next(
message
for message in app1_messages
if isinstance(message, RefundTransfer) and message.recipient == app0.raiden.address
)
assert refund_message
assert mediated_message.lock.amount == refund_message.lock.amount
assert mediated_message.lock.hashlock == refund_message.lock.hashlock
assert mediated_message.lock.expiration > refund_message.lock.expiration
# Both channels have the amount locked because of the refund message
assert_synched_channels(
channel(app0, app1, token), deposit - amount_path, [refund_message.lock],
channel(app1, app0, token), deposit + amount_path, [mediated_message.lock],
)
assert_synched_channels(
channel(app1, app2, token), deposit - amount_path - amount_drain, [],
channel(app2, app1, token), deposit + amount_path + amount_drain, []
)
@pytest.mark.parametrize('number_of_nodes', [2])
def test_healthcheck_with_normal_peer(raiden_network, token_addresses):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
messages = setup_messages_cb()
token_address = token_addresses[0]
address0 = app0.raiden.address
address1 = app1.raiden.address
graph0 = app0.raiden.token_to_channelgraph[token_address]
graph1 = app1.raiden.token_to_channelgraph[token_address]
# check the nodes have a channel
assert graph0.token_address == graph1.token_address
assert address1 in graph0.partneraddress_to_channel
assert address0 in graph1.partneraddress_to_channel
# check both have started the healthcheck
assert address0 in app1.raiden.protocol.addresses_events
assert address1 in app0.raiden.protocol.addresses_events
# wait for the healthcheck task to send a ping
gevent.sleep(app0.raiden.protocol.nat_keepalive_timeout)
gevent.sleep(app1.raiden.protocol.nat_keepalive_timeout)
decoded_messages = [decode(m) for m in set(messages)]
ping_messages = get_messages_by_type(decoded_messages, Ping)
assert ping_messages
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_healthcheck_with_unconnected_node(raiden_network, nat_keepalive_timeout):
""" Nodes start at the unknown state. """
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
address0 = app0.raiden.address
address1 = app1.raiden.address
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_UNKNOWN
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_UNKNOWN
app0.raiden.start_health_check_for(address1)
gevent.sleep(nat_keepalive_timeout)
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_REACHABLE
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_UNKNOWN
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('transport_class', [UnreliableTransport])
def test_healthcheck_with_bad_peer(raiden_network, nat_keepalive_retries, nat_keepalive_timeout):
""" If the Ping messages are not answered, the node must be set to
unreachable.
"""
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
address0 = app0.raiden.address
address1 = app1.raiden.address
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_REACHABLE
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_REACHABLE
# Drop all Ping and Ack messages
app0.raiden.protocol.transport.droprate = 1
app1.raiden.protocol.transport.droprate = 1
gevent.sleep(
(nat_keepalive_retries + 2) * nat_keepalive_timeout
)
assert app0.raiden.protocol.nodeaddresses_networkstatuses[address1] == NODE_NETWORK_UNREACHABLE
assert app1.raiden.protocol.nodeaddresses_networkstatuses[address0] == NODE_NETWORK_UNREACHABLE
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_receive_directtransfer_unknown(raiden_network):
app0 = raiden_network[0] # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
other_key, other_address = make_privkey_address()
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=other_address,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
)
sign_and_send(direct_transfer_message, other_key, other_address, app0)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_receive_mediatedtransfer_unknown(raiden_network):
app0 = raiden_network[0] # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
other_key, other_address = make_privkey_address()
amount = 10
mediated_transfer = MediatedTransfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=other_address,
transferred_amount=amount,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
lock=Lock(amount, 1, UNIT_HASHLOCK),
target=make_address(),
initiator=other_address,
fee=0
)
sign_and_send(mediated_transfer, other_key, other_address, app0)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_receive_hashlocktransfer_unknown(raiden_network):
app0 = raiden_network[0] # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
other_key = PrivateKey(HASH2)
other_address = privatekey_to_address(HASH2)
amount = 10
refund_transfer = make_refund_transfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=other_address,
transferred_amount=amount,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
amount=amount,
hashlock=UNIT_HASHLOCK,
)
sign_and_send(refund_transfer, other_key, other_address, app0)
secret = Secret(
identifier=1,
nonce=1,
channel=make_address(),
transferred_amount=amount,
locksroot=UNIT_HASHLOCK,
secret=UNIT_SECRET,
)
sign_and_send(secret, other_key, other_address, app0)
secret_request = SecretRequest(1, UNIT_HASHLOCK, 1)
sign_and_send(secret_request, other_key, other_address, app0)
reveal_secret = RevealSecret(UNIT_SECRET)
sign_and_send(reveal_secret, other_key, other_address, app0)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_outoforder(raiden_network, private_keys):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
target = app1.raiden.address
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
# and now send one more direct transfer with the same nonce, simulating
# an out-of-order/resent message that arrives late
identifier = create_default_identifier()
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=1,
token=graph0.token_address,
channel=channel0.channel_address,
transferred_amount=10,
recipient=app1.raiden.address,
locksroot=UNIT_HASHLOCK,
)
app0_key = PrivateKey(private_keys[0])
sign_and_send(direct_transfer_message, app0_key, app0.raiden.address, app1)
@pytest.mark.parametrize('number_of_nodes', [3])
@pytest.mark.parametrize('channels_per_node', [CHAIN])
def test_receive_mediatedtransfer_outoforder(raiden_network, private_keys):
alice_app = raiden_network[0]
bob_app = raiden_network[1]
charlie_app = raiden_network[2]
graph = list(alice_app.raiden.token_to_channelgraph.values())[0]
token_address = graph.token_address
channel0 = channel(
alice_app,
bob_app,
token_address,
)
amount = 10
result = alice_app.raiden.mediated_transfer_async(
token_address,
amount,
charlie_app.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
lock = Lock(amount, 1, UNIT_HASHLOCK)
identifier = create_default_identifier()
mediated_transfer = MediatedTransfer(
identifier=identifier,
nonce=1,
token=token_address,
channel=channel0.channel_address,
transferred_amount=amount,
recipient=bob_app.raiden.address,
locksroot=UNIT_HASHLOCK,
lock=lock,
target=charlie_app.raiden.address,
initiator=alice_app.raiden.address,
fee=0
)
alice_key = PrivateKey(private_keys[0])
# send the invalid mediated transfer from alice to bob with the same nonce
sign_and_send(
mediated_transfer,
alice_key,
alice_app.raiden.address,
bob_app,
)
@pytest.mark.parametrize('number_of_nodes', [5])
@pytest.mark.parametrize('channels_per_node', [2])
def test_receive_mediatedtransfer_invalid_address(raiden_network, private_keys):
alice_app = raiden_network[0]
bob_app = raiden_network[1]
graph = list(alice_app.raiden.token_to_channelgraph.values())[0]
token_address = graph.token_address
channel0 = channel(alice_app, bob_app, token_address)
mt_helper = MediatedTransferTestHelper(raiden_network, graph)
initiator_address = alice_app.raiden.address
path = mt_helper.get_paths_of_length(initiator_address, 2)
alice_address, bob_address, charlie_address = path
amount = 10
result = alice_app.raiden.mediated_transfer_async(
token_address,
amount,
charlie_address,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1.)
# and now send one more mediated transfer with the same nonce, simulating
# an out-of-order/resent message that arrives late
lock = Lock(amount, 1, UNIT_HASHLOCK)
identifier = create_default_identifier()
mediated_transfer = MediatedTransfer(
identifier=identifier,
nonce=1,
token=token_address,
channel=channel0.channel_address,
transferred_amount=amount,
recipient=bob_address,
locksroot=UNIT_HASHLOCK,
lock=lock,
target=charlie_address,
initiator=initiator_address,
fee=0
)
alice_key = PrivateKey(private_keys[0])
target_app = None
for app in raiden_network:
if app.raiden.address not in path:
target_app = app
break
sign_and_send(mediated_transfer, alice_key, alice_address, target_app)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_wrongtoken(raiden_network, private_keys):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target=app1.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
# and now send one more direct transfer with a mistaken token address
identifier = create_default_identifier()
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=2,
token=make_address(),
channel=channel0.channel_address,
transferred_amount=10,
recipient=app1.raiden.address,
locksroot=UNIT_HASHLOCK,
)
app0_key = PrivateKey(private_keys[0])
sign_and_send(direct_transfer_message, app0_key, app0.raiden.address, app1)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
def test_receive_directtransfer_invalidlocksroot(raiden_network, private_keys):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target=app1.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
gevent.sleep(1)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
# and now send one more direct transfer with the locksroot not set correctly
identifier = create_default_identifier()
direct_transfer_message = DirectTransfer(
identifier=identifier,
nonce=2,
token=graph0.token_address,
channel=channel0.channel_address,
transferred_amount=10,
recipient=app1.raiden.address,
locksroot=UNIT_HASHLOCK,
)
app0_key = PrivateKey(private_keys[0])
sign_and_send(direct_transfer_message, app0_key, app0.raiden.address, app1)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [1])
@pytest.mark.parametrize('settle_timeout', [30])
def test_transfer_from_outdated(raiden_network, settle_timeout):
app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking
graph0 = list(app0.raiden.token_to_channelgraph.values())[0]
graph1 = list(app1.raiden.token_to_channelgraph.values())[0]
channel0 = graph0.partneraddress_to_channel[app1.raiden.address]
channel1 = graph1.partneraddress_to_channel[app0.raiden.address]
balance0 = channel0.balance
balance1 = channel1.balance
assert graph0.token_address == graph1.token_address
assert app1.raiden.address in graph0.partneraddress_to_channel
amount = 10
result = app0.raiden.direct_transfer_async(
graph0.token_address,
amount,
target=app1.raiden.address,
identifier=1,
)
assert result.wait(timeout=10)
assert_synched_channels(
channel0, balance0 - amount, [],
channel1, balance1 + amount, []
)
channel1.external_state.close(channel1.our_state.balance_proof)
wait_until_block(
app1.raiden.chain,
app1.raiden.chain.block_number() + 1
)
assert channel0.external_state.close_event.wait(timeout=25)
assert channel1.external_state.close_event.wait(timeout=25)
assert channel0.external_state.closed_block != 0
assert channel1.external_state.closed_block != 0
wait_until_block(
app0.raiden.chain,
app0.raiden.chain.block_number() + settle_timeout,
)
assert channel0.external_state.settle_event.wait(timeout=25)
assert channel1.external_state.settle_event.wait(timeout=25)
assert channel0.external_state.settled_block != 0
assert channel1.external_state.settled_block != 0
# and now receive one more transfer from the closed channel
direct_transfer_message = DirectTransfer(
identifier=1,
nonce=1,
token=graph0.token_address,
channel=channel0.channel_address,
transferred_amount=10,
recipient=app0.raiden.address,
locksroot=UNIT_HASHLOCK,
)
sign_and_send(
direct_transfer_message,
app1.raiden.private_key,
app1.raiden.address,
app1,
) | en | 0.859967 | # -*- coding: utf-8 -*- # pylint: disable=too-many-locals,too-many-statements,line-too-long # Give it some time to see if the unknown sender causes an error in the logic Search for paths of length=num_of_hops starting from initiator_address # pylint: disable=unbalanced-tuple-unpacking When the node has no channels it should fail without raising exceptions. When there are no routes it should fail without raising exceptions. # Topology: # App0 <-> App1 App2 <-> App3 # # channels (alice <-> bob <-> charlie) # The assert is for an in-flight transfer # Cannot assert the intermediary state of the channels since the code is # concurrently executed. # assert channel_ba.outstanding == amount # assert channel_bc.locked == amount # assert channel_cb.outstanding == amount # wait for the other nodes to sync # wait for charlie to sync A failed transfer must send a refund back. TODO: - Unlock the token on refund #1091 - Clear the merkletree and update the locked amount #193 - Remove the refund message type #490 # Topology: # # 0 -> 1 -> 2 # # pylint: disable=unbalanced-tuple-unpacking # make a transfer to test the path app0 -> app1 -> app2 # drain the channel app1 -> app2 # wait for the nodes to sync # app0 -> app1 -> app2 is the only available path but the channel app1 -> # app2 doesnt have resources and needs to send a RefundTransfer down the # path # A lock structure with the correct amount # Both channels have the amount locked because of the refund message # pylint: disable=unbalanced-tuple-unpacking # check the nodes have a channel # check both have started the healthcheck # wait for the healthcheck task to send a ping Nodes start at the unknown state. # pylint: disable=unbalanced-tuple-unpacking If the Ping messages are not answered, the node must be set to unreachable. # pylint: disable=unbalanced-tuple-unpacking # Drop all Ping and Ack messages # pylint: disable=unbalanced-tuple-unpacking # pylint: disable=unbalanced-tuple-unpacking # pylint: disable=unbalanced-tuple-unpacking # pylint: disable=unbalanced-tuple-unpacking # and now send one more direct transfer with the same nonce, simulating # an out-of-order/resent message that arrives late # send the invalid mediated transfer from alice to bob with the same nonce # and now send one more mediated transfer with the same nonce, simulating # an out-of-order/resent message that arrives late # pylint: disable=unbalanced-tuple-unpacking # and now send one more direct transfer with a mistaken token address # pylint: disable=unbalanced-tuple-unpacking # and now send one more direct transfer with the locksroot not set correctly # pylint: disable=unbalanced-tuple-unpacking # and now receive one more transfer from the closed channel | 1.917302 | 2 |
src/python/src/poweredup/protocol/messages.py | Hertattack/ev3 | 0 | 6616377 | <reponame>Hertattack/ev3
from . import ValueMapping, ProtocolError
class MessageType(ValueMapping):
# Hub Related
HUB_PROPERTY = b'\x01'
HUB_ACTION = b'\x02'
HUB_ALERT = b'\x03'
HUB_ATTACHED_IO = b'\x04'
GENERIC_ERROR_MSG = b'\x05'
HW_NW_COMMAND = b'\x08'
FW_UPDATE_BOOT = b'\x10'
FW_UPDATE_LOCK_MEM = b'\x11'
FW_UPDATE_LOCK_STATUS_REQ = b'\x12'
FW_LOCK_STATUS = b'\x13'
# Port related
PORT_INFO_REQ = b'\x21'
PORT_MODE_INFO_REQ = b'\x22'
PORT_INPUT_FORMAT_SETUP_SINGLE = b'\x41'
PORT_INPUT_FORMAT_SETUP_COMBINED = b'\x42'
PORT_INFO = b'\x43'
PORT_MODE_INFO = b'\x44'
PORT_VALUE_SINGLE = b'\x45'
PORT_VALUE_COMBINED = b'\x46'
PORT_INPUT_FORMAT_SINGLE = b'\x47'
PORT_INPUT_FORMAT_COMBINED = b'\x48'
VIRTUAL_PORT_SETUP = b'\x61'
PORT_OUTPUT_COMMAND = b'\x81'
PORT_OUTPUT_COMMAND_FEEDBACK = b'\x82'
class CommonMessageHeader:
"""
Common Header for all messages
Size = 3 bytes or 4 bytes depending on the length of the message.
"""
HUB_ID = b'\x00'
REMAINDER_MASK = int("01111111", 2)
@classmethod
def parse_bytes(cls, message_header_bytes: bytes):
header_length = len(message_header_bytes)
if 3 > header_length > 4:
raise ProtocolError(f"Unsupported header length {header_length}")
message_type = message_header_bytes[header_length - 1]
if header_length == 3:
message_length = int.from_bytes(message_header_bytes[0:1], byteorder="big", signed=False) - header_length
return CommonMessageHeader(message_length, message_header_bytes[2:3])
multiplier = int.from_bytes(message_header_bytes[1:2], byteorder="big", signed=False)
remainder = int.from_bytes(message_header_bytes[0:1], byteorder="big",
signed=False) & CommonMessageHeader.REMAINDER_MASK
message_length = multiplier * 128 + remainder - header_length
return CommonMessageHeader(message_length, message_header_bytes[3:])
def __init__(self, message_length: int, message_type: bytes):
self.message_length = message_length
self.message_type = MessageType(message_type)
@property
def value(self):
if self.message_length + 3 <= 127:
actual_length = self.message_length + 3
return actual_length.to_bytes(1, byteorder="big", signed=False) + \
CommonMessageHeader.HUB_ID + \
self.message_type.value
else:
actual_length = self.message_length + 4
remainder = actual_length % 128
multiplier = (actual_length - remainder) // 128
return (remainder + 128).to_bytes(1, byteorder='big', signed=False) + \
multiplier.to_bytes(1, byteorder='big', signed=False) + \
CommonMessageHeader.HUB_ID + self.message_type.value
def build_index():
Message.IMPLEMENTATIONS = {}
for subclass in Message.__subclasses__():
if hasattr(subclass, "MESSAGE_TYPE"):
Message.IMPLEMENTATIONS[subclass.MESSAGE_TYPE] = subclass
class Message:
IMPLEMENTATIONS = None
LENGTH_MASK = int("10000000", 2)
@classmethod
def parse_bytes(cls, message_bytes: bytes):
if Message.IMPLEMENTATIONS is None:
build_index()
header_length = 3
if (message_bytes[0] & Message.LENGTH_MASK) == 128:
header_length = 4
header: CommonMessageHeader = CommonMessageHeader.parse_bytes(message_bytes[0:header_length])
if not Message.IMPLEMENTATIONS.__contains__(header.message_type.value):
raise ProtocolError(f"Unknown message type: {header.message_type.name}")
implementation = Message.IMPLEMENTATIONS[header.message_type.value]
return implementation.parse_bytes(message_bytes[header_length:])
def __eq__(self, other):
self_has_value = "value" in dir(self)
if type(self) == type(other):
if self_has_value:
return self.value == other.value
else:
return self is other
elif type(other) == bytes:
if self_has_value:
return self.value == other
else:
return False
return False
| from . import ValueMapping, ProtocolError
class MessageType(ValueMapping):
# Hub Related
HUB_PROPERTY = b'\x01'
HUB_ACTION = b'\x02'
HUB_ALERT = b'\x03'
HUB_ATTACHED_IO = b'\x04'
GENERIC_ERROR_MSG = b'\x05'
HW_NW_COMMAND = b'\x08'
FW_UPDATE_BOOT = b'\x10'
FW_UPDATE_LOCK_MEM = b'\x11'
FW_UPDATE_LOCK_STATUS_REQ = b'\x12'
FW_LOCK_STATUS = b'\x13'
# Port related
PORT_INFO_REQ = b'\x21'
PORT_MODE_INFO_REQ = b'\x22'
PORT_INPUT_FORMAT_SETUP_SINGLE = b'\x41'
PORT_INPUT_FORMAT_SETUP_COMBINED = b'\x42'
PORT_INFO = b'\x43'
PORT_MODE_INFO = b'\x44'
PORT_VALUE_SINGLE = b'\x45'
PORT_VALUE_COMBINED = b'\x46'
PORT_INPUT_FORMAT_SINGLE = b'\x47'
PORT_INPUT_FORMAT_COMBINED = b'\x48'
VIRTUAL_PORT_SETUP = b'\x61'
PORT_OUTPUT_COMMAND = b'\x81'
PORT_OUTPUT_COMMAND_FEEDBACK = b'\x82'
class CommonMessageHeader:
"""
Common Header for all messages
Size = 3 bytes or 4 bytes depending on the length of the message.
"""
HUB_ID = b'\x00'
REMAINDER_MASK = int("01111111", 2)
@classmethod
def parse_bytes(cls, message_header_bytes: bytes):
header_length = len(message_header_bytes)
if 3 > header_length > 4:
raise ProtocolError(f"Unsupported header length {header_length}")
message_type = message_header_bytes[header_length - 1]
if header_length == 3:
message_length = int.from_bytes(message_header_bytes[0:1], byteorder="big", signed=False) - header_length
return CommonMessageHeader(message_length, message_header_bytes[2:3])
multiplier = int.from_bytes(message_header_bytes[1:2], byteorder="big", signed=False)
remainder = int.from_bytes(message_header_bytes[0:1], byteorder="big",
signed=False) & CommonMessageHeader.REMAINDER_MASK
message_length = multiplier * 128 + remainder - header_length
return CommonMessageHeader(message_length, message_header_bytes[3:])
def __init__(self, message_length: int, message_type: bytes):
self.message_length = message_length
self.message_type = MessageType(message_type)
@property
def value(self):
if self.message_length + 3 <= 127:
actual_length = self.message_length + 3
return actual_length.to_bytes(1, byteorder="big", signed=False) + \
CommonMessageHeader.HUB_ID + \
self.message_type.value
else:
actual_length = self.message_length + 4
remainder = actual_length % 128
multiplier = (actual_length - remainder) // 128
return (remainder + 128).to_bytes(1, byteorder='big', signed=False) + \
multiplier.to_bytes(1, byteorder='big', signed=False) + \
CommonMessageHeader.HUB_ID + self.message_type.value
def build_index():
Message.IMPLEMENTATIONS = {}
for subclass in Message.__subclasses__():
if hasattr(subclass, "MESSAGE_TYPE"):
Message.IMPLEMENTATIONS[subclass.MESSAGE_TYPE] = subclass
class Message:
IMPLEMENTATIONS = None
LENGTH_MASK = int("10000000", 2)
@classmethod
def parse_bytes(cls, message_bytes: bytes):
if Message.IMPLEMENTATIONS is None:
build_index()
header_length = 3
if (message_bytes[0] & Message.LENGTH_MASK) == 128:
header_length = 4
header: CommonMessageHeader = CommonMessageHeader.parse_bytes(message_bytes[0:header_length])
if not Message.IMPLEMENTATIONS.__contains__(header.message_type.value):
raise ProtocolError(f"Unknown message type: {header.message_type.name}")
implementation = Message.IMPLEMENTATIONS[header.message_type.value]
return implementation.parse_bytes(message_bytes[header_length:])
def __eq__(self, other):
self_has_value = "value" in dir(self)
if type(self) == type(other):
if self_has_value:
return self.value == other.value
else:
return self is other
elif type(other) == bytes:
if self_has_value:
return self.value == other
else:
return False
return False | en | 0.732819 | # Hub Related # Port related Common Header for all messages Size = 3 bytes or 4 bytes depending on the length of the message. | 2.187753 | 2 |
webmails/roundcube/start.py | oli-ver/Mailu | 0 | 6616378 | <reponame>oli-ver/Mailu<gh_stars>0
#!/usr/bin/python3
import os
import logging as log
import sys
from socrate import conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576))
conf.jinja("/php.ini", os.environ, "/usr/local/etc/php/conf.d/roundcube.ini")
# Fix some permissions
os.system("mkdir -p /data/gpg")
os.system("chown -R www-data:www-data /data")
# Run apache
os.execv("/usr/local/bin/apache2-foreground", ["apache2-foreground"])
| #!/usr/bin/python3
import os
import logging as log
import sys
from socrate import conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576))
conf.jinja("/php.ini", os.environ, "/usr/local/etc/php/conf.d/roundcube.ini")
# Fix some permissions
os.system("mkdir -p /data/gpg")
os.system("chown -R www-data:www-data /data")
# Run apache
os.execv("/usr/local/bin/apache2-foreground", ["apache2-foreground"]) | en | 0.355475 | #!/usr/bin/python3 # Fix some permissions # Run apache | 1.838001 | 2 |
tests/conftest.py | Hoboneer/surfraw-elvis-generator | 10 | 6616379 | import logging
import pytest
@pytest.fixture
def placeholder_elvis_name():
return "placeholder"
@pytest.fixture
def placeholder_domain():
return "example.com"
@pytest.fixture
def placeholder_url(placeholder_domain):
return f"https://{placeholder_domain}"
@pytest.fixture
def caplog_cli_error(caplog):
caplog.set_level(logging.CRITICAL)
return caplog
| import logging
import pytest
@pytest.fixture
def placeholder_elvis_name():
return "placeholder"
@pytest.fixture
def placeholder_domain():
return "example.com"
@pytest.fixture
def placeholder_url(placeholder_domain):
return f"https://{placeholder_domain}"
@pytest.fixture
def caplog_cli_error(caplog):
caplog.set_level(logging.CRITICAL)
return caplog
| none | 1 | 1.870907 | 2 | |
detect.py | AllenMusk/Rotating-frame | 0 | 6616380 | import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_labels,
xyxy2xywh, plot_one_rotated_box, strip_optimizer, set_logging, rotate_non_max_suppression)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.evaluation_utils import rbox2txt
def detect(save_img=False):
'''
input: save_img_flag
output(result):
'''
# 获取输出文件夹,输入路径,权重,参数等参数
out, source, weights, view_img, save_txt, imgsz = \
opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
# Initialize
set_logging()
# 获取设备
device = select_device(opt.device)
# 移除之前的输出文件夹,并新建输出文件夹
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# 如果设备为gpu,使用Float16
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
# 加载Float32模型,确保用户设定的输入图片分辨率能整除最大步长s=32(如不能则调整为能整除并返回)
'''
model = Model(
(model): Sequential(
(0): Focus(...)
(1): Conv(...)
...
(24): Detect(...)
)
'''
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# 设置Float16
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
# 通过不同的输入源来设置不同的数据加载方式
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
# 获取类别名字 names = ['person', 'bicycle', 'car',...,'toothbrush']
names = model.module.names if hasattr(model, 'module') else model.names
# 设置画框的颜色 colors = [[178, 63, 143], [25, 184, 176], [238, 152, 129],....,[235, 137, 120]]随机设置RGB颜色
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
# 进行一次前向推理,测试程序是否正常 向量维度(1,3,imgsz,imgsz)
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
"""
path 图片/视频路径 'E:\...\bus.jpg'
img 进行resize+pad之后的图片 1*3*re_size1*resize2的张量 (3,img_height,img_weight)
img0 原size图片 (img_height,img_weight,3)
cap 当读取图片时为None,读取视频时为视频源
"""
for path, img, im0s, vid_cap in dataset:
print(img.shape)
img = torch.from_numpy(img).to(device)
# 图片也设置为Float16
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# 没有batch_size的话则在最前面添加一个轴
if img.ndimension() == 3:
# (in_channels,size1,size2) to (1,in_channels,img_height,img_weight)
img = img.unsqueeze(0) # 在[0]维增加一个维度
# Inference
t1 = time_synchronized()
"""
model:
input: in_tensor (batch_size, 3, img_height, img_weight)
output: 推理时返回 [z,x]
z tensor: [small+medium+large_inference] size=(batch_size, 3 * (small_size1*small_size2 + medium_size1*medium_size2 + large_size1*large_size2), nc)
x list: [small_forward, medium_forward, large_forward] eg:small_forward.size=( batch_size, 3种scale框, size1, size2, [xywh,score,num_classes])
'''
前向传播 返回pred[0]的shape是(1, num_boxes, nc)
h,w为传入网络图片的长和宽,注意dataset在检测时使用了矩形推理,所以这里h不一定等于w
num_boxes = 3 * h/32 * w/32 + 3 * h/16 * w/16 + 3 * h/8 * w/8
pred[0][..., 0:4] 预测框坐标为xywh(中心点+宽长)格式
pred[0][..., 4]为objectness置信度
pred[0][..., 5:5+nc]为分类结果
pred[0][..., 5+nc:]为Θ分类结果
"""
# pred : (batch_size, num_boxes, no) batch_size=1
pred = model(img, augment=opt.augment)[0]
# Apply NMS
# 进行NMS
# pred : list[tensor(batch_size, num_conf_nms, [xylsθ,conf,classid])] θ∈[0,179]
#pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
pred = rotate_non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms, without_iouthres=False)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # i:image index det:(num_nms_boxes, [xylsθ,conf,classid]) θ∈[0,179]
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name) # 图片保存路径+图片名字
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
#print(txt_path)
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :5] = scale_labels(img.shape[2:], det[:, :5], im0.shape).round()
# Print results det:(num_nms_boxes, [xylsθ,conf,classid]) θ∈[0,179]
for c in det[:, -1].unique(): # unique函数去除其中重复的元素,并按元素(类别)由大到小返回一个新的无元素重复的元组或者列表
n = (det[:, -1] == c).sum() # detections per class 每个类别检测出来的素含量
s += '%g %ss, ' % (n, names[int(c)]) # add to string 输出‘数量 类别,’
# Write results det:(num_nms_boxes, [xywhθ,conf,classid]) θ∈[0,179]
for *rbox, conf, cls in reversed(det): # 翻转list的排列结果,改为类别由小到大的排列
# rbox=[tensor(x),tensor(y),tensor(w),tensor(h),tsneor(θ)] θ∈[0,179]
# if save_txt: # Write to file
# xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# with open(txt_path + '.txt', 'a') as f:
# f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
classname = '%s' % names[int(cls)]
#classname = ''
conf_str = '%.3f' % conf
rbox2txt(rbox, classname, conf_str, Path(p).stem, str(out + '/result_txt/result_before_merge'))
#plot_one_box(rbox, im0, label=label, color=colors[int(cls)], line_thickness=2)
plot_one_rotated_box(rbox, im0, label=label, color=colors[int(cls)], line_thickness=1,
pi_format=False)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results 播放结果
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
pass
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
print(' Results saved to %s' % Path(out))
print(' All Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
"""
weights:训练的权重
source:测试数据,可以是图片/视频路径,也可以是'0'(电脑自带摄像头),也可以是rtsp等视频流
output:网络预测之后的图片/视频的保存路径
img-size:网络输入图片大小
conf-thres:置信度阈值
iou-thres:做nms的iou阈值
device:设置设备
view-img:是否展示预测之后的图片/视频,默认False
save-txt:是否将预测的框坐标以txt文件形式保存,默认False
classes:设置只保留某一部分类别,形如0或者0 2 3
agnostic-nms:进行nms是否将所有类别框一视同仁,默认False
augment:推理的时候进行多尺度,翻转等操作(TTA)推理
update:如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
"""
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='./weights/YOLOv5_DOTAv1.5_OBB.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
# parser.add_argument('--source', type=str, default='DOTA_demo_view/images', help='source') # file/folder, 0 for webcam
# parser.add_argument('--output', type=str, default='DOTA_demo_view/detection', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=1024, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.4, help='IOU threshold for NMS')
parser.add_argument('--device', default='0,1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')#指定类别检测
parser.add_argument('--agnostic-nms', action='store_true', default=True, help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
# 去除pt文件中的优化器等信息
strip_optimizer(opt.weights)
else:
detect()
| import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_labels,
xyxy2xywh, plot_one_rotated_box, strip_optimizer, set_logging, rotate_non_max_suppression)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.evaluation_utils import rbox2txt
def detect(save_img=False):
'''
input: save_img_flag
output(result):
'''
# 获取输出文件夹,输入路径,权重,参数等参数
out, source, weights, view_img, save_txt, imgsz = \
opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
# Initialize
set_logging()
# 获取设备
device = select_device(opt.device)
# 移除之前的输出文件夹,并新建输出文件夹
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# 如果设备为gpu,使用Float16
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
# 加载Float32模型,确保用户设定的输入图片分辨率能整除最大步长s=32(如不能则调整为能整除并返回)
'''
model = Model(
(model): Sequential(
(0): Focus(...)
(1): Conv(...)
...
(24): Detect(...)
)
'''
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# 设置Float16
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
# 通过不同的输入源来设置不同的数据加载方式
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
# 获取类别名字 names = ['person', 'bicycle', 'car',...,'toothbrush']
names = model.module.names if hasattr(model, 'module') else model.names
# 设置画框的颜色 colors = [[178, 63, 143], [25, 184, 176], [238, 152, 129],....,[235, 137, 120]]随机设置RGB颜色
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
# 进行一次前向推理,测试程序是否正常 向量维度(1,3,imgsz,imgsz)
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
"""
path 图片/视频路径 'E:\...\bus.jpg'
img 进行resize+pad之后的图片 1*3*re_size1*resize2的张量 (3,img_height,img_weight)
img0 原size图片 (img_height,img_weight,3)
cap 当读取图片时为None,读取视频时为视频源
"""
for path, img, im0s, vid_cap in dataset:
print(img.shape)
img = torch.from_numpy(img).to(device)
# 图片也设置为Float16
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# 没有batch_size的话则在最前面添加一个轴
if img.ndimension() == 3:
# (in_channels,size1,size2) to (1,in_channels,img_height,img_weight)
img = img.unsqueeze(0) # 在[0]维增加一个维度
# Inference
t1 = time_synchronized()
"""
model:
input: in_tensor (batch_size, 3, img_height, img_weight)
output: 推理时返回 [z,x]
z tensor: [small+medium+large_inference] size=(batch_size, 3 * (small_size1*small_size2 + medium_size1*medium_size2 + large_size1*large_size2), nc)
x list: [small_forward, medium_forward, large_forward] eg:small_forward.size=( batch_size, 3种scale框, size1, size2, [xywh,score,num_classes])
'''
前向传播 返回pred[0]的shape是(1, num_boxes, nc)
h,w为传入网络图片的长和宽,注意dataset在检测时使用了矩形推理,所以这里h不一定等于w
num_boxes = 3 * h/32 * w/32 + 3 * h/16 * w/16 + 3 * h/8 * w/8
pred[0][..., 0:4] 预测框坐标为xywh(中心点+宽长)格式
pred[0][..., 4]为objectness置信度
pred[0][..., 5:5+nc]为分类结果
pred[0][..., 5+nc:]为Θ分类结果
"""
# pred : (batch_size, num_boxes, no) batch_size=1
pred = model(img, augment=opt.augment)[0]
# Apply NMS
# 进行NMS
# pred : list[tensor(batch_size, num_conf_nms, [xylsθ,conf,classid])] θ∈[0,179]
#pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
pred = rotate_non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms, without_iouthres=False)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # i:image index det:(num_nms_boxes, [xylsθ,conf,classid]) θ∈[0,179]
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name) # 图片保存路径+图片名字
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
#print(txt_path)
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :5] = scale_labels(img.shape[2:], det[:, :5], im0.shape).round()
# Print results det:(num_nms_boxes, [xylsθ,conf,classid]) θ∈[0,179]
for c in det[:, -1].unique(): # unique函数去除其中重复的元素,并按元素(类别)由大到小返回一个新的无元素重复的元组或者列表
n = (det[:, -1] == c).sum() # detections per class 每个类别检测出来的素含量
s += '%g %ss, ' % (n, names[int(c)]) # add to string 输出‘数量 类别,’
# Write results det:(num_nms_boxes, [xywhθ,conf,classid]) θ∈[0,179]
for *rbox, conf, cls in reversed(det): # 翻转list的排列结果,改为类别由小到大的排列
# rbox=[tensor(x),tensor(y),tensor(w),tensor(h),tsneor(θ)] θ∈[0,179]
# if save_txt: # Write to file
# xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
# with open(txt_path + '.txt', 'a') as f:
# f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
classname = '%s' % names[int(cls)]
#classname = ''
conf_str = '%.3f' % conf
rbox2txt(rbox, classname, conf_str, Path(p).stem, str(out + '/result_txt/result_before_merge'))
#plot_one_box(rbox, im0, label=label, color=colors[int(cls)], line_thickness=2)
plot_one_rotated_box(rbox, im0, label=label, color=colors[int(cls)], line_thickness=1,
pi_format=False)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results 播放结果
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
pass
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
print(' Results saved to %s' % Path(out))
print(' All Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
"""
weights:训练的权重
source:测试数据,可以是图片/视频路径,也可以是'0'(电脑自带摄像头),也可以是rtsp等视频流
output:网络预测之后的图片/视频的保存路径
img-size:网络输入图片大小
conf-thres:置信度阈值
iou-thres:做nms的iou阈值
device:设置设备
view-img:是否展示预测之后的图片/视频,默认False
save-txt:是否将预测的框坐标以txt文件形式保存,默认False
classes:设置只保留某一部分类别,形如0或者0 2 3
agnostic-nms:进行nms是否将所有类别框一视同仁,默认False
augment:推理的时候进行多尺度,翻转等操作(TTA)推理
update:如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False
"""
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='./weights/YOLOv5_DOTAv1.5_OBB.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
# parser.add_argument('--source', type=str, default='DOTA_demo_view/images', help='source') # file/folder, 0 for webcam
# parser.add_argument('--output', type=str, default='DOTA_demo_view/detection', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=1024, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.4, help='IOU threshold for NMS')
parser.add_argument('--device', default='0,1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')#指定类别检测
parser.add_argument('--agnostic-nms', action='store_true', default=True, help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
# 去除pt文件中的优化器等信息
strip_optimizer(opt.weights)
else:
detect()
| zh | 0.278887 | input: save_img_flag output(result): # 获取输出文件夹,输入路径,权重,参数等参数 # Initialize # 获取设备 # 移除之前的输出文件夹,并新建输出文件夹 # delete output folder # make new output folder # 如果设备为gpu,使用Float16 # half precision only supported on CUDA # Load model # 加载Float32模型,确保用户设定的输入图片分辨率能整除最大步长s=32(如不能则调整为能整除并返回) model = Model( (model): Sequential( (0): Focus(...) (1): Conv(...) ... (24): Detect(...) ) # load FP32 model # check img_size # 设置Float16 # to FP16 # Second-stage classifier # initialize # load weights # Set Dataloader # 通过不同的输入源来设置不同的数据加载方式 # set True to speed up constant image size inference # Get names and colors # 获取类别名字 names = ['person', 'bicycle', 'car',...,'toothbrush'] # 设置画框的颜色 colors = [[178, 63, 143], [25, 184, 176], [238, 152, 129],....,[235, 137, 120]]随机设置RGB颜色 # Run inference # 进行一次前向推理,测试程序是否正常 向量维度(1,3,imgsz,imgsz) # init img # run once path 图片/视频路径 'E:\...\bus.jpg' img 进行resize+pad之后的图片 1*3*re_size1*resize2的张量 (3,img_height,img_weight) img0 原size图片 (img_height,img_weight,3) cap 当读取图片时为None,读取视频时为视频源 # 图片也设置为Float16 # uint8 to fp16/32 # 0 - 255 to 0.0 - 1.0 # 没有batch_size的话则在最前面添加一个轴 # (in_channels,size1,size2) to (1,in_channels,img_height,img_weight) # 在[0]维增加一个维度 # Inference model: input: in_tensor (batch_size, 3, img_height, img_weight) output: 推理时返回 [z,x] z tensor: [small+medium+large_inference] size=(batch_size, 3 * (small_size1*small_size2 + medium_size1*medium_size2 + large_size1*large_size2), nc) x list: [small_forward, medium_forward, large_forward] eg:small_forward.size=( batch_size, 3种scale框, size1, size2, [xywh,score,num_classes]) ''' 前向传播 返回pred[0]的shape是(1, num_boxes, nc) h,w为传入网络图片的长和宽,注意dataset在检测时使用了矩形推理,所以这里h不一定等于w num_boxes = 3 * h/32 * w/32 + 3 * h/16 * w/16 + 3 * h/8 * w/8 pred[0][..., 0:4] 预测框坐标为xywh(中心点+宽长)格式 pred[0][..., 4]为objectness置信度 pred[0][..., 5:5+nc]为分类结果 pred[0][..., 5+nc:]为Θ分类结果 # pred : (batch_size, num_boxes, no) batch_size=1 # Apply NMS # 进行NMS # pred : list[tensor(batch_size, num_conf_nms, [xylsθ,conf,classid])] θ∈[0,179] #pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) # Apply Classifier # Process detections # i:image index det:(num_nms_boxes, [xylsθ,conf,classid]) θ∈[0,179] # batch_size >= 1 # 图片保存路径+图片名字 #print(txt_path) # print string # normalization gain whwh # Rescale boxes from img_size to im0 size # Print results det:(num_nms_boxes, [xylsθ,conf,classid]) θ∈[0,179] # unique函数去除其中重复的元素,并按元素(类别)由大到小返回一个新的无元素重复的元组或者列表 # detections per class 每个类别检测出来的素含量 # add to string 输出‘数量 类别,’ # Write results det:(num_nms_boxes, [xywhθ,conf,classid]) θ∈[0,179] # 翻转list的排列结果,改为类别由小到大的排列 # rbox=[tensor(x),tensor(y),tensor(w),tensor(h),tsneor(θ)] θ∈[0,179] # if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh # with open(txt_path + '.txt', 'a') as f: # f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format # Add bbox to image #classname = '' #plot_one_box(rbox, im0, label=label, color=colors[int(cls)], line_thickness=2) # Print time (inference + NMS) # Stream results 播放结果 # q to quit # Save results (image with detections) # new video # release previous video writer # output video codec weights:训练的权重 source:测试数据,可以是图片/视频路径,也可以是'0'(电脑自带摄像头),也可以是rtsp等视频流 output:网络预测之后的图片/视频的保存路径 img-size:网络输入图片大小 conf-thres:置信度阈值 iou-thres:做nms的iou阈值 device:设置设备 view-img:是否展示预测之后的图片/视频,默认False save-txt:是否将预测的框坐标以txt文件形式保存,默认False classes:设置只保留某一部分类别,形如0或者0 2 3 agnostic-nms:进行nms是否将所有类别框一视同仁,默认False augment:推理的时候进行多尺度,翻转等操作(TTA)推理 update:如果为True,则对所有模型进行strip_optimizer操作,去除pt文件中的优化器等信息,默认为False # file/folder, 0 for webcam # output folder # parser.add_argument('--source', type=str, default='DOTA_demo_view/images', help='source') # file/folder, 0 for webcam # parser.add_argument('--output', type=str, default='DOTA_demo_view/detection', help='output folder') # output folder #指定类别检测 # update all models (to fix SourceChangeWarning) # 去除pt文件中的优化器等信息 | 2.114174 | 2 |
cacreader/swig-4.0.2/Examples/test-suite/python/li_std_wstring_inherit_runme.py | kyletanyag/LL-Smartcard | 1,031 | 6616381 | import li_std_wstring_inherit
import sys
x = u"hello"
s = li_std_wstring_inherit.wstring(u"he")
s = s + u"llo"
if s != x:
print s, x
raise RuntimeError("bad string mapping")
if s[1:4] != x[1:4]:
raise RuntimeError("bad string mapping")
a = li_std_wstring_inherit.A(s)
b = li_std_wstring_inherit.wstring(" world")
if a + b != "hello world":
raise RuntimeError("bad string mapping")
if a + " world" != "hello world":
raise RuntimeError("bad string mapping")
# This is expected to fail if -builtin is used
# Reverse operators not supported in builtin types
if not li_std_wstring_inherit.is_python_builtin():
if "hello" + b != "hello world":
raise RuntimeError("bad string mapping")
c = "hello" + b
if c.find_last_of("l") != 9:
raise RuntimeError("bad string mapping")
b = li_std_wstring_inherit.B("hi")
b.name = li_std_wstring_inherit.wstring(u"hello")
if b.name != "hello":
raise RuntimeError("bad string mapping")
b.a = li_std_wstring_inherit.A("hello")
if b.a != u"hello":
raise RuntimeError("bad string mapping")
| import li_std_wstring_inherit
import sys
x = u"hello"
s = li_std_wstring_inherit.wstring(u"he")
s = s + u"llo"
if s != x:
print s, x
raise RuntimeError("bad string mapping")
if s[1:4] != x[1:4]:
raise RuntimeError("bad string mapping")
a = li_std_wstring_inherit.A(s)
b = li_std_wstring_inherit.wstring(" world")
if a + b != "hello world":
raise RuntimeError("bad string mapping")
if a + " world" != "hello world":
raise RuntimeError("bad string mapping")
# This is expected to fail if -builtin is used
# Reverse operators not supported in builtin types
if not li_std_wstring_inherit.is_python_builtin():
if "hello" + b != "hello world":
raise RuntimeError("bad string mapping")
c = "hello" + b
if c.find_last_of("l") != 9:
raise RuntimeError("bad string mapping")
b = li_std_wstring_inherit.B("hi")
b.name = li_std_wstring_inherit.wstring(u"hello")
if b.name != "hello":
raise RuntimeError("bad string mapping")
b.a = li_std_wstring_inherit.A("hello")
if b.a != u"hello":
raise RuntimeError("bad string mapping")
| en | 0.953739 | # This is expected to fail if -builtin is used # Reverse operators not supported in builtin types | 2.954914 | 3 |
scale/scheduler/test/sync/test_scheduler_manager.py | kaydoh/scale | 121 | 6616382 | from __future__ import unicode_literals
import django
from django.test import TestCase
from scheduler.manager import SchedulerManager
from scheduler.models import Scheduler
class TestSchedulerManager(TestCase):
def setUp(self):
django.setup()
def test_successful_update(self):
"""Tests doing a successful database update"""
manager = SchedulerManager()
manager.sync_with_database()
| from __future__ import unicode_literals
import django
from django.test import TestCase
from scheduler.manager import SchedulerManager
from scheduler.models import Scheduler
class TestSchedulerManager(TestCase):
def setUp(self):
django.setup()
def test_successful_update(self):
"""Tests doing a successful database update"""
manager = SchedulerManager()
manager.sync_with_database()
| en | 0.829827 | Tests doing a successful database update | 1.90436 | 2 |
change_dataset.py | kutao207/SiamGCN | 2 | 6616383 | import os
import os.path as osp
import shutil
import glob
import copy
import re
import logging
from itertools import repeat, product
import errno
import numpy as np
import pandas as pd
import torch
import torch.utils.data
from torch import Tensor
from torch_sparse import SparseTensor, cat
from torch.utils.data.dataloader import default_collate
from torch._six import container_abcs, string_classes, int_classes
import torch_geometric
from torch_geometric.data import DataLoader, Data, InMemoryDataset, Batch
from utils import load_las, extract_area
from utils import makedirs, files_exist, to_list, find_file
from imblearn.over_sampling import RandomOverSampler
class ChangeBatch(Batch):
def __init__(self, batch=None, ptr=None, **kwargs):
super().__init__(batch=batch, ptr=ptr, **kwargs)
@classmethod
def from_data_list(cls, data_list, follow_batch, exclude_keys):
r'''
datalist: A list object with `batch_size` elements, and each element is a `Data` object.
'''
keys = list(set(data_list[0].keys) - set(exclude_keys)) # ['y', 'x', 'x2', 'scene_num']
assert 'batch' not in keys and 'ptr' not in keys
batch = cls()
for key in data_list[0].__dict__.keys():
# ['x', 'edge_index', 'edge_attr', 'y', 'pos', 'normal', 'face', 'x2', 'scene_num']
if key[:2] != '__' and key[-2:] != '__':
batch[key] = None
batch.__num_graphs__ = len(data_list)
batch.__data_class__ = data_list[0].__class__
for key in keys + ['batch'] + ['batch2']:
batch[key] = []
batch['ptr'] = [0]
batch['ptr2'] = [0]
device = None
slices = {key: [0] for key in keys}
cumsum = {key: [0] for key in keys}
cat_dims = {}
num_nodes_list = []
for i, data in enumerate(data_list):
for key in keys: # keys: ['y', 'x', 'x2', 'scene_num']
item = data[key]
# Increase values by `cumsum` value.
cum = cumsum[key][-1]
if isinstance(item, Tensor) and item.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
item = item + cum
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
value = value + cum
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item + cum
# Treat 0-dimensional tensors as 1-dimensional.
if isinstance(item, Tensor) and item.dim() == 0:
item = item.unsqueeze(0)
batch[key].append(item)
# Gather the size of the `cat` dimension.
size = 1
cat_dim = data.__cat_dim__(key, data[key])
cat_dims[key] = cat_dim
if isinstance(item, Tensor):
size = item.size(cat_dim)
device = item.device
elif isinstance(item, SparseTensor):
size = torch.tensor(item.sizes())[torch.tensor(cat_dim)]
device = item.device()
slices[key].append(size + slices[key][-1])
inc = data.__inc__(key, item)
if isinstance(inc, (tuple, list)):
inc = torch.tensor(inc)
cumsum[key].append(inc + cumsum[key][-1])
if key in follow_batch:
if isinstance(size, Tensor):
for j, size in enumerate(size.tolist()):
tmp = f'{key}_{j}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
else:
tmp = f'{key}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
if hasattr(data, '__num_nodes__'):
num_nodes_list.append(data.__num_nodes__)
else:
num_nodes_list.append(None)
num_nodes = data.num_nodes
if num_nodes is not None:
item = torch.full((num_nodes, ), i, dtype=torch.long,
device=device)
batch.batch.append(item)
batch.ptr.append(batch.ptr[-1] + num_nodes)
num_nodes2 = data.x2.size(0)
if num_nodes2 is not None:
item = torch.full((num_nodes2, ), i, dtype=torch.long,
device=device)
batch.batch2.append(item)
batch.ptr2.append(batch.ptr2[-1] + num_nodes2)
# Fix initial slice values:
for key in keys:
slices[key][0] = slices[key][1] - slices[key][1]
batch.batch = None if len(batch.batch) == 0 else batch.batch
batch.ptr = None if len(batch.ptr) == 1 else batch.ptr
batch.batch2 = None if len(batch.batch2) == 0 else batch.batch2
batch.ptr2 = None if len(batch.ptr2) == 1 else batch.ptr2
batch.__slices__ = slices
batch.__cumsum__ = cumsum
batch.__cat_dims__ = cat_dims
batch.__num_nodes_list__ = num_nodes_list
ref_data = data_list[0]
for key in batch.keys:
items = batch[key]
item = items[0]
if isinstance(item, Tensor):
batch[key] = torch.cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, SparseTensor):
batch[key] = cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, (int, float)):
batch[key] = torch.tensor(items)
if torch_geometric.is_debug_enabled():
batch.debug()
return batch.contiguous()
class ChangeCollater(object):
def __init__(self, follow_batch, exclude_keys):
self.follow_batch = follow_batch
self.exclude_keys = exclude_keys
def collate(self, batch):
elem = batch[0]
if isinstance(elem, Data):
return ChangeBatch.from_data_list(batch, self.follow_batch,
self.exclude_keys)
elif isinstance(elem, torch.Tensor):
return default_collate(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: self.collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
return type(elem)(*(self.collate(s) for s in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
return [self.collate(s) for s in zip(*batch)]
raise TypeError('DataLoader found invalid type: {}'.format(type(elem)))
def __call__(self, batch):
return self.collate(batch)
class MyDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, follow_batch=[], exclude_keys=[], **kwargs):
if "collate_fn" in kwargs:
del kwargs["collate_fn"]
# Save for Pytorch Lightning...
self.follow_batch = follow_batch
super().__init__(dataset, batch_size, shuffle,
collate_fn=ChangeCollater(follow_batch,
exclude_keys), **kwargs)
def __repr__(obj):
if obj is None:
return 'None'
return re.sub('(<.*?)\\s.*(>)', r'\1\2', obj.__repr__())
class ChangeDataset(InMemoryDataset):
def __init__(self, root, train=True, clearance = 3, ignore_labels=[], transform=None, pre_transform=None, pre_filter=None):
self.root = root
self.train = train
self.clearance = clearance
self.ignore_labels = ignore_labels
self.data_2016 = osp.join(root, '2016')
self.data_2020 = osp.join(root, '2020')
self.train_csv_dir = osp.join(root, 'train')
self.class_labels = ['nochange','removed',"added",'change',"color_change"]
if len(self.ignore_labels) > 0:
rm_labels = []
for l in self.ignore_labels:
if l in self.class_labels:
self.class_labels.remove(l)
rm_labels.append(l)
print(f"Labels {rm_labels} have been removed!")
if len(self.ignore_labels) == 0:
raise ValueError("All labels have been ignored!!")
self.labels_to_names_dict = {i:v for i, v in enumerate(self.class_labels)}
self.names_to_labels_dict = {v:i for i, v in enumerate(self.class_labels)}
super().__init__(root, transform, pre_transform, pre_filter)
path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(path)
def labels_to_names(self, labels):
return [self.class_labels[i] for i in labels]
def names_to_labels(self, names):
return [self.names_to_labels_dict[n] for n in names]
@property
def processed_file_names(self):
# return ['training.pt', 'test.pt']
return ['training_'+ f'{self.clearance}'+ '.pt', 'test_'+ f'{self.clearance}'+ '.pt']
def process(self):
torch.save(self.process_set('train'), self.processed_paths[0])
torch.save(self.process_set('test'), self.processed_paths[1])
def _process(self):
f = osp.join(self.processed_dir, 'pre_transoform_'+ f'{self.clearance}'+ '.pt')
if osp.exists(f) and torch.load(f) != __repr__(self.pre_transform):
logging.warning(
'The `pre_transform` argument differs from the one used in '
'the pre-processed version of this dataset. If you really '
'want to make use of another pre-processing technique, make '
'sure to delete `{}` first.'.format(self.processed_dir))
f = osp.join(self.processed_dir, 'pre_filter_'+ f'{self.clearance}'+ '.pt')
if osp.exists(f) and torch.load(f) != __repr__(self.pre_filter):
logging.warning(
'The `pre_filter` argument differs from the one used in the '
'pre-processed version of this dataset. If you really want to '
'make use of another pre-fitering technique, make sure to '
'delete `{}` first.'.format(self.processed_dir))
f = osp.join(self.processed_dir, 'label_names_'+ f'{self.clearance}'+ '.pt')
if osp.exists(f) and torch.load(f) != '_'.join(self.class_labels):
logging.warning('The `class_labels` argument differs from last used one. You may have ignored some class names.')
path = self.processed_paths[0] if self.train else self.processed_paths[1]
# if files_exist(self.processed_paths): # pragma: no cover
# return
if osp.exists(path): # pragma: no cover
return
print('Processing...')
makedirs(self.processed_dir)
self.process()
path = osp.join(self.processed_dir, 'pre_transform_'+ f'{self.clearance}'+ '.pt')
torch.save(__repr__(self.pre_transform), path)
path = osp.join(self.processed_dir, 'pre_filter_'+ f'{self.clearance}'+ '.pt')
torch.save(__repr__(self.pre_filter), path)
path = osp.join(self.processed_dir, 'label_names_'+ f'{self.clearance}'+ '.pt')
torch.save('_'.join(self.class_labels), path)
print('Done!')
def process_set(self, dataset):
csv_files = sorted(glob.glob(osp.join(self.root, dataset, '*.csv')))
files_2016 = glob.glob(osp.join(self.data_2016, '*.laz'))
files_2020 = glob.glob(osp.join(self.data_2020, '*.laz'))
data_list = []
i = 0
for file in csv_files:
# scene_num = re.findall(r'^[0-9]', osp.basename(file))
scene_num = osp.basename(file).split('_')[0]
# if len(scene_num) == 0:
# continue
# else:
# scene_num = scene_num[0]
f16 = find_file(files_2016, scene_num)
f20 = find_file(files_2020, scene_num)
df = pd.read_csv(file)
centers = df[["x", "y", "z"]].to_numpy()
label_names = df["classification"].to_list()
# labels = self.names_to_labels(label_names)
points_16, h16 = load_las(f16)
points_20, h20 = load_las(f20)
i+=1
print(f"Processing {dataset} set {i}/{len(csv_files)} --> {osp.basename(file)} scene_num={scene_num} finding {len(centers)} objects")
for center, label in zip(centers, label_names):
if label in self.ignore_labels:
continue
else:
label = self.names_to_labels_dict[label]
x1 = torch.tensor(extract_area(points_16, center[0:2], self.clearance), dtype=torch.float)
data = Data(x=x1)
data.x2 = torch.tensor(extract_area(points_20, center[0:2], self.clearance), dtype=torch.float)
data.y = torch.tensor([label])
data.scene_num = torch.tensor([int(scene_num)])
data_list.append(data)
if self.pre_filter is not None:
data_list = [d for d in data_list if self.pre_filter(d)]
if self.pre_transform is not None:
data_list = [self.pre_transform(d) for d in data_list]
return self.collate(data_list)
if __name__ == '__main__':
from transforms import NormalizeScale, SamplePoints
pre_transform, transform = NormalizeScale(), SamplePoints(1024)
root_dir = 'F:/shrec2021/data'
train_dataset = ChangeDataset(root_dir, train=True, clearance=3, transform=transform, pre_transform=pre_transform)
test_dataset = ChangeDataset(root_dir, train=False, clearance=3, transform=transform, pre_transform=pre_transform)
print("Dataset finished!")
# train_loader = DataLoader(train_dataset, batch_size=2, shuffle=False, num_workers=0)
# for data in train_loader:
# print(data.x.shape)
| import os
import os.path as osp
import shutil
import glob
import copy
import re
import logging
from itertools import repeat, product
import errno
import numpy as np
import pandas as pd
import torch
import torch.utils.data
from torch import Tensor
from torch_sparse import SparseTensor, cat
from torch.utils.data.dataloader import default_collate
from torch._six import container_abcs, string_classes, int_classes
import torch_geometric
from torch_geometric.data import DataLoader, Data, InMemoryDataset, Batch
from utils import load_las, extract_area
from utils import makedirs, files_exist, to_list, find_file
from imblearn.over_sampling import RandomOverSampler
class ChangeBatch(Batch):
def __init__(self, batch=None, ptr=None, **kwargs):
super().__init__(batch=batch, ptr=ptr, **kwargs)
@classmethod
def from_data_list(cls, data_list, follow_batch, exclude_keys):
r'''
datalist: A list object with `batch_size` elements, and each element is a `Data` object.
'''
keys = list(set(data_list[0].keys) - set(exclude_keys)) # ['y', 'x', 'x2', 'scene_num']
assert 'batch' not in keys and 'ptr' not in keys
batch = cls()
for key in data_list[0].__dict__.keys():
# ['x', 'edge_index', 'edge_attr', 'y', 'pos', 'normal', 'face', 'x2', 'scene_num']
if key[:2] != '__' and key[-2:] != '__':
batch[key] = None
batch.__num_graphs__ = len(data_list)
batch.__data_class__ = data_list[0].__class__
for key in keys + ['batch'] + ['batch2']:
batch[key] = []
batch['ptr'] = [0]
batch['ptr2'] = [0]
device = None
slices = {key: [0] for key in keys}
cumsum = {key: [0] for key in keys}
cat_dims = {}
num_nodes_list = []
for i, data in enumerate(data_list):
for key in keys: # keys: ['y', 'x', 'x2', 'scene_num']
item = data[key]
# Increase values by `cumsum` value.
cum = cumsum[key][-1]
if isinstance(item, Tensor) and item.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
item = item + cum
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
value = value + cum
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item + cum
# Treat 0-dimensional tensors as 1-dimensional.
if isinstance(item, Tensor) and item.dim() == 0:
item = item.unsqueeze(0)
batch[key].append(item)
# Gather the size of the `cat` dimension.
size = 1
cat_dim = data.__cat_dim__(key, data[key])
cat_dims[key] = cat_dim
if isinstance(item, Tensor):
size = item.size(cat_dim)
device = item.device
elif isinstance(item, SparseTensor):
size = torch.tensor(item.sizes())[torch.tensor(cat_dim)]
device = item.device()
slices[key].append(size + slices[key][-1])
inc = data.__inc__(key, item)
if isinstance(inc, (tuple, list)):
inc = torch.tensor(inc)
cumsum[key].append(inc + cumsum[key][-1])
if key in follow_batch:
if isinstance(size, Tensor):
for j, size in enumerate(size.tolist()):
tmp = f'{key}_{j}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
else:
tmp = f'{key}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
if hasattr(data, '__num_nodes__'):
num_nodes_list.append(data.__num_nodes__)
else:
num_nodes_list.append(None)
num_nodes = data.num_nodes
if num_nodes is not None:
item = torch.full((num_nodes, ), i, dtype=torch.long,
device=device)
batch.batch.append(item)
batch.ptr.append(batch.ptr[-1] + num_nodes)
num_nodes2 = data.x2.size(0)
if num_nodes2 is not None:
item = torch.full((num_nodes2, ), i, dtype=torch.long,
device=device)
batch.batch2.append(item)
batch.ptr2.append(batch.ptr2[-1] + num_nodes2)
# Fix initial slice values:
for key in keys:
slices[key][0] = slices[key][1] - slices[key][1]
batch.batch = None if len(batch.batch) == 0 else batch.batch
batch.ptr = None if len(batch.ptr) == 1 else batch.ptr
batch.batch2 = None if len(batch.batch2) == 0 else batch.batch2
batch.ptr2 = None if len(batch.ptr2) == 1 else batch.ptr2
batch.__slices__ = slices
batch.__cumsum__ = cumsum
batch.__cat_dims__ = cat_dims
batch.__num_nodes_list__ = num_nodes_list
ref_data = data_list[0]
for key in batch.keys:
items = batch[key]
item = items[0]
if isinstance(item, Tensor):
batch[key] = torch.cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, SparseTensor):
batch[key] = cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, (int, float)):
batch[key] = torch.tensor(items)
if torch_geometric.is_debug_enabled():
batch.debug()
return batch.contiguous()
class ChangeCollater(object):
def __init__(self, follow_batch, exclude_keys):
self.follow_batch = follow_batch
self.exclude_keys = exclude_keys
def collate(self, batch):
elem = batch[0]
if isinstance(elem, Data):
return ChangeBatch.from_data_list(batch, self.follow_batch,
self.exclude_keys)
elif isinstance(elem, torch.Tensor):
return default_collate(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: self.collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'):
return type(elem)(*(self.collate(s) for s in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
return [self.collate(s) for s in zip(*batch)]
raise TypeError('DataLoader found invalid type: {}'.format(type(elem)))
def __call__(self, batch):
return self.collate(batch)
class MyDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, follow_batch=[], exclude_keys=[], **kwargs):
if "collate_fn" in kwargs:
del kwargs["collate_fn"]
# Save for Pytorch Lightning...
self.follow_batch = follow_batch
super().__init__(dataset, batch_size, shuffle,
collate_fn=ChangeCollater(follow_batch,
exclude_keys), **kwargs)
def __repr__(obj):
if obj is None:
return 'None'
return re.sub('(<.*?)\\s.*(>)', r'\1\2', obj.__repr__())
class ChangeDataset(InMemoryDataset):
def __init__(self, root, train=True, clearance = 3, ignore_labels=[], transform=None, pre_transform=None, pre_filter=None):
self.root = root
self.train = train
self.clearance = clearance
self.ignore_labels = ignore_labels
self.data_2016 = osp.join(root, '2016')
self.data_2020 = osp.join(root, '2020')
self.train_csv_dir = osp.join(root, 'train')
self.class_labels = ['nochange','removed',"added",'change',"color_change"]
if len(self.ignore_labels) > 0:
rm_labels = []
for l in self.ignore_labels:
if l in self.class_labels:
self.class_labels.remove(l)
rm_labels.append(l)
print(f"Labels {rm_labels} have been removed!")
if len(self.ignore_labels) == 0:
raise ValueError("All labels have been ignored!!")
self.labels_to_names_dict = {i:v for i, v in enumerate(self.class_labels)}
self.names_to_labels_dict = {v:i for i, v in enumerate(self.class_labels)}
super().__init__(root, transform, pre_transform, pre_filter)
path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(path)
def labels_to_names(self, labels):
return [self.class_labels[i] for i in labels]
def names_to_labels(self, names):
return [self.names_to_labels_dict[n] for n in names]
@property
def processed_file_names(self):
# return ['training.pt', 'test.pt']
return ['training_'+ f'{self.clearance}'+ '.pt', 'test_'+ f'{self.clearance}'+ '.pt']
def process(self):
torch.save(self.process_set('train'), self.processed_paths[0])
torch.save(self.process_set('test'), self.processed_paths[1])
def _process(self):
f = osp.join(self.processed_dir, 'pre_transoform_'+ f'{self.clearance}'+ '.pt')
if osp.exists(f) and torch.load(f) != __repr__(self.pre_transform):
logging.warning(
'The `pre_transform` argument differs from the one used in '
'the pre-processed version of this dataset. If you really '
'want to make use of another pre-processing technique, make '
'sure to delete `{}` first.'.format(self.processed_dir))
f = osp.join(self.processed_dir, 'pre_filter_'+ f'{self.clearance}'+ '.pt')
if osp.exists(f) and torch.load(f) != __repr__(self.pre_filter):
logging.warning(
'The `pre_filter` argument differs from the one used in the '
'pre-processed version of this dataset. If you really want to '
'make use of another pre-fitering technique, make sure to '
'delete `{}` first.'.format(self.processed_dir))
f = osp.join(self.processed_dir, 'label_names_'+ f'{self.clearance}'+ '.pt')
if osp.exists(f) and torch.load(f) != '_'.join(self.class_labels):
logging.warning('The `class_labels` argument differs from last used one. You may have ignored some class names.')
path = self.processed_paths[0] if self.train else self.processed_paths[1]
# if files_exist(self.processed_paths): # pragma: no cover
# return
if osp.exists(path): # pragma: no cover
return
print('Processing...')
makedirs(self.processed_dir)
self.process()
path = osp.join(self.processed_dir, 'pre_transform_'+ f'{self.clearance}'+ '.pt')
torch.save(__repr__(self.pre_transform), path)
path = osp.join(self.processed_dir, 'pre_filter_'+ f'{self.clearance}'+ '.pt')
torch.save(__repr__(self.pre_filter), path)
path = osp.join(self.processed_dir, 'label_names_'+ f'{self.clearance}'+ '.pt')
torch.save('_'.join(self.class_labels), path)
print('Done!')
def process_set(self, dataset):
csv_files = sorted(glob.glob(osp.join(self.root, dataset, '*.csv')))
files_2016 = glob.glob(osp.join(self.data_2016, '*.laz'))
files_2020 = glob.glob(osp.join(self.data_2020, '*.laz'))
data_list = []
i = 0
for file in csv_files:
# scene_num = re.findall(r'^[0-9]', osp.basename(file))
scene_num = osp.basename(file).split('_')[0]
# if len(scene_num) == 0:
# continue
# else:
# scene_num = scene_num[0]
f16 = find_file(files_2016, scene_num)
f20 = find_file(files_2020, scene_num)
df = pd.read_csv(file)
centers = df[["x", "y", "z"]].to_numpy()
label_names = df["classification"].to_list()
# labels = self.names_to_labels(label_names)
points_16, h16 = load_las(f16)
points_20, h20 = load_las(f20)
i+=1
print(f"Processing {dataset} set {i}/{len(csv_files)} --> {osp.basename(file)} scene_num={scene_num} finding {len(centers)} objects")
for center, label in zip(centers, label_names):
if label in self.ignore_labels:
continue
else:
label = self.names_to_labels_dict[label]
x1 = torch.tensor(extract_area(points_16, center[0:2], self.clearance), dtype=torch.float)
data = Data(x=x1)
data.x2 = torch.tensor(extract_area(points_20, center[0:2], self.clearance), dtype=torch.float)
data.y = torch.tensor([label])
data.scene_num = torch.tensor([int(scene_num)])
data_list.append(data)
if self.pre_filter is not None:
data_list = [d for d in data_list if self.pre_filter(d)]
if self.pre_transform is not None:
data_list = [self.pre_transform(d) for d in data_list]
return self.collate(data_list)
if __name__ == '__main__':
from transforms import NormalizeScale, SamplePoints
pre_transform, transform = NormalizeScale(), SamplePoints(1024)
root_dir = 'F:/shrec2021/data'
train_dataset = ChangeDataset(root_dir, train=True, clearance=3, transform=transform, pre_transform=pre_transform)
test_dataset = ChangeDataset(root_dir, train=False, clearance=3, transform=transform, pre_transform=pre_transform)
print("Dataset finished!")
# train_loader = DataLoader(train_dataset, batch_size=2, shuffle=False, num_workers=0)
# for data in train_loader:
# print(data.x.shape)
| en | 0.357315 | datalist: A list object with `batch_size` elements, and each element is a `Data` object. # ['y', 'x', 'x2', 'scene_num'] # ['x', 'edge_index', 'edge_attr', 'y', 'pos', 'normal', 'face', 'x2', 'scene_num'] # keys: ['y', 'x', 'x2', 'scene_num'] # Increase values by `cumsum` value. # Treat 0-dimensional tensors as 1-dimensional. # Gather the size of the `cat` dimension. # Fix initial slice values: # Save for Pytorch Lightning... # return ['training.pt', 'test.pt'] # if files_exist(self.processed_paths): # pragma: no cover # return # pragma: no cover # scene_num = re.findall(r'^[0-9]', osp.basename(file)) # if len(scene_num) == 0: # continue # else: # scene_num = scene_num[0] # labels = self.names_to_labels(label_names) # train_loader = DataLoader(train_dataset, batch_size=2, shuffle=False, num_workers=0) # for data in train_loader: # print(data.x.shape) | 1.83468 | 2 |
autoencoder_run_DAE.py | Wang-zhechao/Denoising-Autoencoder-fault-detection | 3 | 6616384 | <gh_stars>1-10
import torch
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
import pandas as pd
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
######################################################
######### 数据加载和处理 #########
######################################################
# ------------------------数据读入---------------------
fileData = pd.read_csv(r'./data/meltadata.txt', sep="\t", dtype=np.float32, header=None)
wholeData = fileData.values
# ----------------------定义训练数据范围 ----------------------
trx_start = 0
trx_datastep = 700
# ----------------------定义测试数据范围 ----------------------
tex_start = 0
tex_datastep = 1050
# ----------------------定义学习速率 ----------------------
LR = 0.001
# ----------------------定义正则化系数 ----------------------
lambda1 = 0.02
# ----------------------数据格式转换-------------------
trX, teX = wholeData[trx_start:trx_start + trx_datastep , :13], wholeData[tex_start: tex_start + tex_datastep, :13]
Xtrain = trX.astype(np.float32)
Xtrain = torch.Tensor(Xtrain)
Xtest = teX.astype(np.float32)
Xtest = torch.Tensor(Xtest)
Xtest_row, Xtest_list = Xtest.shape
######################################################
######### 自编码器模型与网络构建 #########
######################################################
# ----------------------网络构建-----------------------
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential( # 编码网络层
nn.Linear(Xtest_list, 16),
nn.Tanh(),
nn.Linear(16, 32),
nn.Tanh(),
nn.Linear(32, 64),
nn.Tanh(),
nn.Linear(64, 128),
)
self.decoder = nn.Sequential( # 解码网络层
nn.Linear(128,64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32,16),
nn.Tanh(),
nn.Linear(16, Xtest_list),
nn.Sigmoid()
)
def forward(self, x): # 前向传递层
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
autoencoder = AutoEncoder() # 模型实例化
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR) # 优化器
loss_func = nn.BCELoss() # 交叉熵损失
loss_reconstruction = nn.MSELoss() # 误差重构中的损失函数
autoencoder.load_state_dict(torch.load('E:\PycharmCode\Autoencoder_DAE_tanh(Ascending dimension)\model\\autoencoder_DAE_tanh_1000.pkl')) # 加载模型
######################################################
######### 函数: 计算置信区间函数 #########
######################################################
def confidence(data, c=0.95 ):
# ddof取值为1是因为在统计学中样本的标准偏差除的是(N-1)而不是N,统计学中的标准偏差除的是N
# SciPy中的std计算默认是采用统计学中标准差的计算方式
mean, std = data.mean(), data.std(ddof=1) # 计算均值和标准差
# print(mean, std)
# 计算置信区间
# 默认0.95的置信水平
lower, higher = stats.norm.interval(c, loc=mean, scale=std)
print(lower, higher)
return lower, higher
######################################################
######### 函数: 误差重构(梯度下降) #########
######################################################
F=[]
def restructure(Xtest):
_, decoded_teX_update = autoencoder(Xtest) # 测试集经过网络并返回编码层和解码层数据
flag = 0 # 定义标签 判断是否是第一次进入循环
echo = 0
loss_val_history = []
loss_val = loss_reconstruction(decoded_teX_update, Xtest)
while echo-20< 0 or loss_val.data.numpy() > 0.015:
if flag == 0 :
f = Variable(torch.zeros(1,13), requires_grad=True)
Xtest_update = Xtest
Xtest_update = Xtest - torch.matmul(torch.ones(tex_datastep, 1), f) # 更新重构原始数据 x = x - f
f_history = f
l1_regularization = lambda1 * torch.norm(f, 1) # 求重构误差f的L1范数
_, decoded_teX_update = autoencoder(Xtest_update) # 求输入变量重构后的解码数据
loss_val = loss_reconstruction(Xtest_update, decoded_teX_update)+ l1_regularization # 在误差函数中加入f的L1正则项,使得f稀疏
loss_val_history.append(loss_val.data.numpy())
# 打印循环次数,偏差,损失函数
# print('echo:| ', echo, 'F:\n', f_history.data.numpy(), '\nchange loss: %.8f' % loss_val.data.numpy(), "\n")
weight1 = torch.ones(loss_val.size()) # 对误差loss_val构建权重为1的权重矩阵
loss_val_backward_first = torch.autograd.grad(loss_val, f, weight1, create_graph= True) # 误差函数loss_val对f求一阶导数
f = f - 0.1*loss_val_backward_first[0] # 梯度下降更新偏差
flag = 1
echo = echo + 1
if echo >= 200:
break
return f_history # 返回重构误差
######################################################
######### 误差重构求解(两种方法) #########
######################################################
# 方法①(选取一段数据求解这段数据的整体重构误差)
# f_history = restructure(Xtest)
# 方法②(选取一段数据对每个数据点进行误差重构)
for i in range(Xtest_row):
F.extend(restructure(Xtest[i,:]).data.numpy())
print('Epoch [{}/{}]' .format(i, Xtest_row))
F = np.asarray(F)
######################################################
######### 故障检测(计算H2和SPE的置信区间) #########
######################################################
encoded_trX, decoded_trX = autoencoder(Xtrain) # 训练集经过网络并返回编码层和解码层数据
encoded_teX, decoded_teX = autoencoder(Xtest) # 训练集经过网络并返回编码层和解码层数据
# ----------------------记录维度与创建数组-------------------
H2_trX_shape, _ = encoded_trX.shape
SPE_trX_shape, _ = decoded_trX.shape
H2_teX_shape, _ = encoded_teX.shape
SPE_teX_shape, _ = decoded_teX.shape
H2_trX_history = []
SPE_trX_history = []
H2_teX_history = []
SPE_teX_history = []
H2_rec_history = []
SPE_rec_history = []
# ----------------------计算H2统计量与其置信限------------------
# 公式:H2 = x.T*x
for i in range(H2_trX_shape):
H2 = torch.matmul(encoded_trX[i, :].T, encoded_trX[i, :])
H2_trX_history.append(H2.detach().numpy())
H2_trX_history = np.array(H2_trX_history)
l_H2_trX, h_H2_trX = confidence(H2_trX_history, 0.99)
for i in range(H2_teX_shape):
H2 = torch.matmul(encoded_teX[i, :].T, encoded_teX[i, :])
H2_teX_history.append(H2.detach().numpy())
H2_teX_history = np.array(H2_teX_history)
l_H2_teX, h_H2_teX = confidence(H2_teX_history, 0.99)
# np.savetxt(".\\SVDD\\SVDD\\data\\H2_teX_history.csv", H2_teX_history , delimiter=',')
# ----------------------计算SPE统计量与其置信限------------------
# 公式:SPE=(x-x').T*(x-x')
# for i in range(SPE_trX_shape):
# SPE = torch.matmul((Xtrain-decoded_trX)[i, :].T, (Xtrain-decoded_trX)[i, :])
# SPE_trX_history.append(SPE.detach().numpy())
# SPE_trX_history = np.array(SPE_trX_history)
# l_SPE_trX, h_SPE_trX = confidence(SPE_trX_history, 0.99)
# for i in range(SPE_teX_shape):
# SPE = torch.matmul((Xtest-decoded_teX)[i, :].T, (Xtest-decoded_teX)[i, :])
# SPE_teX_history.append(SPE.detach().numpy())
# SPE_teX_history = np.array(SPE_teX_history)
# l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99)
# ----------------------加入协方差(逆)------------------
# 公式:SPE=(x-x').T*E^(-1)*(x-x')
# for i in range(SPE_trX_shape):
# x_trX = ((Xtrain-decoded_trX)[i, :].T).detach().numpy().reshape(13,1)
# means = np.mean(x_trX,axis = 0)
# mean_dataMat = x_trX - means
# cov_trX = 1/13*(np.dot(mean_dataMat,mean_dataMat.T))
# cov_trX = np.linalg.inv(cov_trX) #逆
# E_trX = torch.from_numpy(cov_trX)
# SPE_temp = torch.matmul((Xtrain-decoded_trX)[i, :].T, E_trX)
# SPE = torch.matmul(SPE_temp,(Xtrain-decoded_trX)[i, :])
# SPE_trX_history.append(SPE.detach().numpy())
# SPE_trX_history = np.array(SPE_trX_history)
# l_SPE_trX, h_SPE_trX = confidence(SPE_trX_history, 0.99)
# ----------------------加入协方差(伪逆)------------------
# 公式:SPE=(x-x').T*E^(-1)*(x-x')
# for i in range(SPE_teX_shape):
# x_teX = ((Xtest-decoded_teX)[i, :].T).detach().numpy().reshape(13,1)
# means = np.mean(x_teX,axis = 0)
# mean_dataMat = x_teX - means
# cov_teX = 1/13*(np.dot(mean_dataMat,mean_dataMat.T))
# cov_teX = np.linalg.pinv(cov_teX) #伪逆
# E_teX = torch.from_numpy(cov_teX)
# SPE_temp = torch.matmul((Xtest-decoded_teX)[i, :].T, E_teX)
# SPE = torch.matmul(SPE_temp,(Xtest-decoded_teX)[i, :])
# SPE_teX_history.append(SPE.detach().numpy())
# SPE_teX_history = np.array(SPE_teX_history)
# l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99)
# ----------------------变量误差减去均值除以方差的平方和------------------
# 公式:统计量 = sum([((x-x')-men(x-x'))/E]^2)
for i in range(SPE_teX_shape):
x_teX = ((Xtest-decoded_teX)[i, :].T).detach().numpy().reshape(13,1)
means = np.mean(x_teX,axis = 0)
var = np.var(x_teX)
mean_dataMat = x_teX - means
mean_dataMat = mean_dataMat/var
SPE = np.sum(mean_dataMat**2)
SPE_teX_history.append(SPE)
SPE_teX_history = np.array(SPE_teX_history)
l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99)
# np.savetxt(".\\SVDD\\SVDD\\data\\SPE_teX_history.csv", SPE_teX_history , delimiter=',')
# for i in range(H2_rec_shape):
# H2 = torch.matmul(encoded_rec[i, :].T, encoded_rec[i, :])
# H2_rec_history.append(H2.detach().numpy())
# H2_rec_history = np.array(H2_rec_history)
# for i in range(SPE_rec_shape):
# SPE = torch.matmul((decoded_rec1-decoded_rec)[i, :].T, (decoded_rec1-decoded_rec)[i, :])
# SPE_rec_history.append(SPE.detach().numpy())
# SPE_rec_history = np.array(SPE_rec_history)
######################################################
######### 数据打印 #########
######################################################
font_title = {'family' : 'Times New Roman', 'weight': 'normal', 'size': 13}
font_lable = {'family' : 'Times New Roman', 'weight': 'normal', 'size': 10}
encode_show_trX = encoded_trX.cpu().detach().numpy() # 训练集编码数据
test_show = Xtest.cpu().detach().numpy() # 测试集原始数据
decode_show_teX = decoded_teX.cpu().detach().numpy() # 测试集解码数据
# Xtest_update_show = Xtest_update.data.numpy()
# ---------------------损失函数可视化-------------------
mumber_plt = 1
# plt.figure(mumber_plt)
# plt.plot(loss_val_history)
# plt.title("Loss value in the process of error reconstruction")
# # plt.ylim(0, 0.2)
# # ------------pytorch测试数据与解码数据对比---------------
# mumber_plt = mumber_plt + 1
# plt.figure(mumber_plt)
# plt.subplot(7,2,1), plt.plot(test_show[:,0]), plt.plot(decoded_show_rec[:,0] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据0",),plt.ylim(0, 1)
# plt.subplot(7,2,2), plt.plot(test_show[:,1]), plt.plot(decoded_show_rec[:,1] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据1"),plt.ylim(0, 1)
# plt.subplot(7,2,3), plt.plot(test_show[:,2]), plt.plot(decoded_show_rec[:,2] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据2"),plt.ylim(0, 1)
# plt.subplot(7,2,4), plt.plot(test_show[:,3]), plt.plot(decoded_show_rec[:,3] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据3"),plt.ylim(0, 1)
# plt.subplot(7,2,5), plt.plot(test_show[:,4]), plt.plot(decoded_show_rec[:,4] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据4"),plt.ylim(0, 1)
# plt.subplot(7,2,6), plt.plot(test_show[:,5]), plt.plot(decoded_show_rec[:,5] , color = 'red', linewidth = 1.0, linestyle = '--'),
# plt.title("数据5"),plt.ylim(0, 1)
# plt.subplot(7,2,7), plt.plot(test_show[:,6]), plt.plot(decode_show_teX[:,6] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据6"),plt.ylim(0, 1)
# plt.subplot(7,2,8), plt.plot(test_show[:,7]), plt.plot(decoded_show_rec[:,7] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据7"),plt.ylim(0, 1)
# plt.subplot(7,2,9), plt.plot(test_show[:,8]), plt.plot(decoded_show_rec[:,8] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据8"),plt.ylim(0, 1)
# plt.subplot(7,2,10), plt.plot(test_show[:,9]), plt.plot(decoded_show_rec[:,9] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据9"),plt.ylim(0, 1)
# plt.subplot(7,2,11), plt.plot(test_show[:,10]), plt.plot(decoded_show_rec[:,10] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据10"),plt.ylim(0, 1)
# plt.subplot(7,2,12), plt.plot(test_show[:,11]), plt.plot(decoded_show_rec[:,11] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据11"),plt.ylim(0, 1)
# plt.subplot(7,2,13), plt.plot(test_show[:,12]), plt.plot(decoded_show_rec[:,12] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据12"),plt.ylim(0, 1)
# # ------------pytorch重建数据--------------
# mumber_plt = mumber_plt + 1
# plt.figure(mumber_plt)
# plt.subplot(7,2,1), plt.plot(test_show[:,0]), plt.plot(decode_show_teX[:,0] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据0",),plt.ylim(0, 1)
# plt.subplot(7,2,2), plt.plot(test_show[:,1]), plt.plot(decode_show_teX[:,1] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据1"),plt.ylim(0, 1)
# plt.subplot(7,2,3), plt.plot(test_show[:,2]), plt.plot(decode_show_teX[:,2] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据2"),plt.ylim(0, 1)
# plt.subplot(7,2,4), plt.plot(test_show[:,3]), plt.plot(decode_show_teX[:,3] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据3"),plt.ylim(0, 1)
# plt.subplot(7,2,5), plt.plot(test_show[:,4]), plt.plot(decode_show_teX[:,4] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据4"),plt.ylim(0, 1)
# plt.subplot(7,2,6), plt.plot(test_show[:,5]), plt.plot(decode_show_teX[:,5] , color = 'red', linewidth = 1.0, linestyle = '--'),
# plt.title("数据5"),plt.ylim(0, 1)
# plt.subplot(7,2,7), plt.plot(test_show[:,6]), plt.plot(decode_show_teX[:,6] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据6"),plt.ylim(0, 1)
# plt.subplot(7,2,8), plt.plot(test_show[:,7]), plt.plot(decode_show_teX[:,7] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据7"),plt.ylim(0, 1)
# plt.subplot(7,2,9), plt.plot(test_show[:,8]), plt.plot(decode_show_teX[:,8] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据8"),plt.ylim(0, 1)
# plt.subplot(7,2,10), plt.plot(test_show[:,9]), plt.plot(decode_show_teX[:,9] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据9"),plt.ylim(0, 1)
# plt.subplot(7,2,11), plt.plot(test_show[:,10]), plt.plot(decode_show_teX[:,10] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据10"),plt.ylim(0, 1)
# plt.subplot(7,2,12), plt.plot(test_show[:,11]), plt.plot(decode_show_teX[:,11] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据11"),plt.ylim(0, 1)
# plt.subplot(7,2,13), plt.plot(test_show[:,12]), plt.plot(decode_show_teX[:,12] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据12"),plt.ylim(0, 1)
# # # ------------pytorch重构数据显示---------------
mumber_plt = mumber_plt + 1
plt.figure(mumber_plt)
plt.subplot(7, 2, 1)
plt.plot(F[:, 0])
plt.subplot(7, 2, 2)
plt.plot(F[:, 1])
plt.subplot(7, 2, 3)
plt.plot(F[:, 2])
plt.subplot(7, 2, 4)
plt.plot(F[:, 3])
plt.subplot(7, 2, 5)
plt.plot(F[:, 4])
plt.subplot(7, 2, 6)
plt.plot(F[:, 5])
plt.subplot(7, 2, 7)
plt.plot(F[:, 6])
plt.subplot(7, 2, 8)
plt.plot(F[:, 7])
plt.subplot(7, 2, 9)
plt.plot(F[:, 8])
plt.subplot(7, 2, 10)
plt.plot(F[:, 9])
plt.subplot(7, 2, 11)
plt.plot(F[:, 10])
plt.subplot(7, 2, 12)
plt.plot(F[:, 11])
plt.subplot(7, 2, 13)
plt.plot(F[:, 12],color = 'black')
# plt.legend(labels= ["data1","data2","data3","data4","data5","data6","data7","data8","data9",
# "data10","data11","data12","data13"], loc = 'upper left')
# plt.xlabel('Data point', font_lable)
# plt.ylabel('Amplitude', font_lable)
# x=[1,2,3,4,5,6,7,8,9,10,11,12,13]
# plt.bar(x, f_history.data.numpy()[0,:])
# plt.axis([0.5,13.5,-1,1])
# plt.xlabel('Process data', font_lable)
# plt.ylabel('Amplitude', font_lable)
# # ------------pytorc H2与SPE显示--------------
# #H2显示
# mumber_plt = mumber_plt + 1
# plt.figure(mumber_plt)
# plt.subplot(2,1,1)
# plt.title("H2_trX")
# plt.ylim(0,150)
# plt.plot(H2_trX_history)
# plt.plot([0, len(H2_trX_history)], [h_H2_trX, h_H2_trX], color = 'red')
# plt.subplot(2,1,2)
# plt.title("SPE_trX")
# plt.ylim(0,1)
# plt.plot(SPE_trX_history)
# plt.plot([0, len(SPE_trX_history )], [h_SPE_trX, h_SPE_trX], color = 'red')
# #SPE显示
mumber_plt = mumber_plt + 1
plt.figure(mumber_plt)
plt.subplot(2,1,1)
plt.ylim(0,150)
plt.plot(H2_teX_history)
plt.plot([0, len(H2_teX_history)], [h_H2_teX, h_H2_teX], color = 'red')
plt.legend(labels= ["Original test set","confidence limit 99%"], loc = 'upper left')
plt.xlabel('Process data', font_lable)
plt.ylabel('Amplitude', font_lable)
plt.title("H^2 Statistics", font_title)
plt.subplot(2,1,2)
plt.plot(SPE_teX_history)
plt.plot([0, len(SPE_teX_history )], [h_SPE_teX, h_SPE_teX], color = 'red')
plt.legend(labels= ["Original test set","confidence limit 99%"], loc = 'upper left')
plt.xlabel('Process data', font_lable)
plt.ylabel('Amplitude', font_lable)
plt.title("SPE Statistics", font_title)
plt.show()
| import torch
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
import pandas as pd
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
######################################################
######### 数据加载和处理 #########
######################################################
# ------------------------数据读入---------------------
fileData = pd.read_csv(r'./data/meltadata.txt', sep="\t", dtype=np.float32, header=None)
wholeData = fileData.values
# ----------------------定义训练数据范围 ----------------------
trx_start = 0
trx_datastep = 700
# ----------------------定义测试数据范围 ----------------------
tex_start = 0
tex_datastep = 1050
# ----------------------定义学习速率 ----------------------
LR = 0.001
# ----------------------定义正则化系数 ----------------------
lambda1 = 0.02
# ----------------------数据格式转换-------------------
trX, teX = wholeData[trx_start:trx_start + trx_datastep , :13], wholeData[tex_start: tex_start + tex_datastep, :13]
Xtrain = trX.astype(np.float32)
Xtrain = torch.Tensor(Xtrain)
Xtest = teX.astype(np.float32)
Xtest = torch.Tensor(Xtest)
Xtest_row, Xtest_list = Xtest.shape
######################################################
######### 自编码器模型与网络构建 #########
######################################################
# ----------------------网络构建-----------------------
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential( # 编码网络层
nn.Linear(Xtest_list, 16),
nn.Tanh(),
nn.Linear(16, 32),
nn.Tanh(),
nn.Linear(32, 64),
nn.Tanh(),
nn.Linear(64, 128),
)
self.decoder = nn.Sequential( # 解码网络层
nn.Linear(128,64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32,16),
nn.Tanh(),
nn.Linear(16, Xtest_list),
nn.Sigmoid()
)
def forward(self, x): # 前向传递层
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
autoencoder = AutoEncoder() # 模型实例化
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR) # 优化器
loss_func = nn.BCELoss() # 交叉熵损失
loss_reconstruction = nn.MSELoss() # 误差重构中的损失函数
autoencoder.load_state_dict(torch.load('E:\PycharmCode\Autoencoder_DAE_tanh(Ascending dimension)\model\\autoencoder_DAE_tanh_1000.pkl')) # 加载模型
######################################################
######### 函数: 计算置信区间函数 #########
######################################################
def confidence(data, c=0.95 ):
# ddof取值为1是因为在统计学中样本的标准偏差除的是(N-1)而不是N,统计学中的标准偏差除的是N
# SciPy中的std计算默认是采用统计学中标准差的计算方式
mean, std = data.mean(), data.std(ddof=1) # 计算均值和标准差
# print(mean, std)
# 计算置信区间
# 默认0.95的置信水平
lower, higher = stats.norm.interval(c, loc=mean, scale=std)
print(lower, higher)
return lower, higher
######################################################
######### 函数: 误差重构(梯度下降) #########
######################################################
F=[]
def restructure(Xtest):
_, decoded_teX_update = autoencoder(Xtest) # 测试集经过网络并返回编码层和解码层数据
flag = 0 # 定义标签 判断是否是第一次进入循环
echo = 0
loss_val_history = []
loss_val = loss_reconstruction(decoded_teX_update, Xtest)
while echo-20< 0 or loss_val.data.numpy() > 0.015:
if flag == 0 :
f = Variable(torch.zeros(1,13), requires_grad=True)
Xtest_update = Xtest
Xtest_update = Xtest - torch.matmul(torch.ones(tex_datastep, 1), f) # 更新重构原始数据 x = x - f
f_history = f
l1_regularization = lambda1 * torch.norm(f, 1) # 求重构误差f的L1范数
_, decoded_teX_update = autoencoder(Xtest_update) # 求输入变量重构后的解码数据
loss_val = loss_reconstruction(Xtest_update, decoded_teX_update)+ l1_regularization # 在误差函数中加入f的L1正则项,使得f稀疏
loss_val_history.append(loss_val.data.numpy())
# 打印循环次数,偏差,损失函数
# print('echo:| ', echo, 'F:\n', f_history.data.numpy(), '\nchange loss: %.8f' % loss_val.data.numpy(), "\n")
weight1 = torch.ones(loss_val.size()) # 对误差loss_val构建权重为1的权重矩阵
loss_val_backward_first = torch.autograd.grad(loss_val, f, weight1, create_graph= True) # 误差函数loss_val对f求一阶导数
f = f - 0.1*loss_val_backward_first[0] # 梯度下降更新偏差
flag = 1
echo = echo + 1
if echo >= 200:
break
return f_history # 返回重构误差
######################################################
######### 误差重构求解(两种方法) #########
######################################################
# 方法①(选取一段数据求解这段数据的整体重构误差)
# f_history = restructure(Xtest)
# 方法②(选取一段数据对每个数据点进行误差重构)
for i in range(Xtest_row):
F.extend(restructure(Xtest[i,:]).data.numpy())
print('Epoch [{}/{}]' .format(i, Xtest_row))
F = np.asarray(F)
######################################################
######### 故障检测(计算H2和SPE的置信区间) #########
######################################################
encoded_trX, decoded_trX = autoencoder(Xtrain) # 训练集经过网络并返回编码层和解码层数据
encoded_teX, decoded_teX = autoencoder(Xtest) # 训练集经过网络并返回编码层和解码层数据
# ----------------------记录维度与创建数组-------------------
H2_trX_shape, _ = encoded_trX.shape
SPE_trX_shape, _ = decoded_trX.shape
H2_teX_shape, _ = encoded_teX.shape
SPE_teX_shape, _ = decoded_teX.shape
H2_trX_history = []
SPE_trX_history = []
H2_teX_history = []
SPE_teX_history = []
H2_rec_history = []
SPE_rec_history = []
# ----------------------计算H2统计量与其置信限------------------
# 公式:H2 = x.T*x
for i in range(H2_trX_shape):
H2 = torch.matmul(encoded_trX[i, :].T, encoded_trX[i, :])
H2_trX_history.append(H2.detach().numpy())
H2_trX_history = np.array(H2_trX_history)
l_H2_trX, h_H2_trX = confidence(H2_trX_history, 0.99)
for i in range(H2_teX_shape):
H2 = torch.matmul(encoded_teX[i, :].T, encoded_teX[i, :])
H2_teX_history.append(H2.detach().numpy())
H2_teX_history = np.array(H2_teX_history)
l_H2_teX, h_H2_teX = confidence(H2_teX_history, 0.99)
# np.savetxt(".\\SVDD\\SVDD\\data\\H2_teX_history.csv", H2_teX_history , delimiter=',')
# ----------------------计算SPE统计量与其置信限------------------
# 公式:SPE=(x-x').T*(x-x')
# for i in range(SPE_trX_shape):
# SPE = torch.matmul((Xtrain-decoded_trX)[i, :].T, (Xtrain-decoded_trX)[i, :])
# SPE_trX_history.append(SPE.detach().numpy())
# SPE_trX_history = np.array(SPE_trX_history)
# l_SPE_trX, h_SPE_trX = confidence(SPE_trX_history, 0.99)
# for i in range(SPE_teX_shape):
# SPE = torch.matmul((Xtest-decoded_teX)[i, :].T, (Xtest-decoded_teX)[i, :])
# SPE_teX_history.append(SPE.detach().numpy())
# SPE_teX_history = np.array(SPE_teX_history)
# l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99)
# ----------------------加入协方差(逆)------------------
# 公式:SPE=(x-x').T*E^(-1)*(x-x')
# for i in range(SPE_trX_shape):
# x_trX = ((Xtrain-decoded_trX)[i, :].T).detach().numpy().reshape(13,1)
# means = np.mean(x_trX,axis = 0)
# mean_dataMat = x_trX - means
# cov_trX = 1/13*(np.dot(mean_dataMat,mean_dataMat.T))
# cov_trX = np.linalg.inv(cov_trX) #逆
# E_trX = torch.from_numpy(cov_trX)
# SPE_temp = torch.matmul((Xtrain-decoded_trX)[i, :].T, E_trX)
# SPE = torch.matmul(SPE_temp,(Xtrain-decoded_trX)[i, :])
# SPE_trX_history.append(SPE.detach().numpy())
# SPE_trX_history = np.array(SPE_trX_history)
# l_SPE_trX, h_SPE_trX = confidence(SPE_trX_history, 0.99)
# ----------------------加入协方差(伪逆)------------------
# 公式:SPE=(x-x').T*E^(-1)*(x-x')
# for i in range(SPE_teX_shape):
# x_teX = ((Xtest-decoded_teX)[i, :].T).detach().numpy().reshape(13,1)
# means = np.mean(x_teX,axis = 0)
# mean_dataMat = x_teX - means
# cov_teX = 1/13*(np.dot(mean_dataMat,mean_dataMat.T))
# cov_teX = np.linalg.pinv(cov_teX) #伪逆
# E_teX = torch.from_numpy(cov_teX)
# SPE_temp = torch.matmul((Xtest-decoded_teX)[i, :].T, E_teX)
# SPE = torch.matmul(SPE_temp,(Xtest-decoded_teX)[i, :])
# SPE_teX_history.append(SPE.detach().numpy())
# SPE_teX_history = np.array(SPE_teX_history)
# l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99)
# ----------------------变量误差减去均值除以方差的平方和------------------
# 公式:统计量 = sum([((x-x')-men(x-x'))/E]^2)
for i in range(SPE_teX_shape):
x_teX = ((Xtest-decoded_teX)[i, :].T).detach().numpy().reshape(13,1)
means = np.mean(x_teX,axis = 0)
var = np.var(x_teX)
mean_dataMat = x_teX - means
mean_dataMat = mean_dataMat/var
SPE = np.sum(mean_dataMat**2)
SPE_teX_history.append(SPE)
SPE_teX_history = np.array(SPE_teX_history)
l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99)
# np.savetxt(".\\SVDD\\SVDD\\data\\SPE_teX_history.csv", SPE_teX_history , delimiter=',')
# for i in range(H2_rec_shape):
# H2 = torch.matmul(encoded_rec[i, :].T, encoded_rec[i, :])
# H2_rec_history.append(H2.detach().numpy())
# H2_rec_history = np.array(H2_rec_history)
# for i in range(SPE_rec_shape):
# SPE = torch.matmul((decoded_rec1-decoded_rec)[i, :].T, (decoded_rec1-decoded_rec)[i, :])
# SPE_rec_history.append(SPE.detach().numpy())
# SPE_rec_history = np.array(SPE_rec_history)
######################################################
######### 数据打印 #########
######################################################
font_title = {'family' : 'Times New Roman', 'weight': 'normal', 'size': 13}
font_lable = {'family' : 'Times New Roman', 'weight': 'normal', 'size': 10}
encode_show_trX = encoded_trX.cpu().detach().numpy() # 训练集编码数据
test_show = Xtest.cpu().detach().numpy() # 测试集原始数据
decode_show_teX = decoded_teX.cpu().detach().numpy() # 测试集解码数据
# Xtest_update_show = Xtest_update.data.numpy()
# ---------------------损失函数可视化-------------------
mumber_plt = 1
# plt.figure(mumber_plt)
# plt.plot(loss_val_history)
# plt.title("Loss value in the process of error reconstruction")
# # plt.ylim(0, 0.2)
# # ------------pytorch测试数据与解码数据对比---------------
# mumber_plt = mumber_plt + 1
# plt.figure(mumber_plt)
# plt.subplot(7,2,1), plt.plot(test_show[:,0]), plt.plot(decoded_show_rec[:,0] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据0",),plt.ylim(0, 1)
# plt.subplot(7,2,2), plt.plot(test_show[:,1]), plt.plot(decoded_show_rec[:,1] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据1"),plt.ylim(0, 1)
# plt.subplot(7,2,3), plt.plot(test_show[:,2]), plt.plot(decoded_show_rec[:,2] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据2"),plt.ylim(0, 1)
# plt.subplot(7,2,4), plt.plot(test_show[:,3]), plt.plot(decoded_show_rec[:,3] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据3"),plt.ylim(0, 1)
# plt.subplot(7,2,5), plt.plot(test_show[:,4]), plt.plot(decoded_show_rec[:,4] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据4"),plt.ylim(0, 1)
# plt.subplot(7,2,6), plt.plot(test_show[:,5]), plt.plot(decoded_show_rec[:,5] , color = 'red', linewidth = 1.0, linestyle = '--'),
# plt.title("数据5"),plt.ylim(0, 1)
# plt.subplot(7,2,7), plt.plot(test_show[:,6]), plt.plot(decode_show_teX[:,6] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据6"),plt.ylim(0, 1)
# plt.subplot(7,2,8), plt.plot(test_show[:,7]), plt.plot(decoded_show_rec[:,7] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据7"),plt.ylim(0, 1)
# plt.subplot(7,2,9), plt.plot(test_show[:,8]), plt.plot(decoded_show_rec[:,8] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据8"),plt.ylim(0, 1)
# plt.subplot(7,2,10), plt.plot(test_show[:,9]), plt.plot(decoded_show_rec[:,9] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据9"),plt.ylim(0, 1)
# plt.subplot(7,2,11), plt.plot(test_show[:,10]), plt.plot(decoded_show_rec[:,10] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据10"),plt.ylim(0, 1)
# plt.subplot(7,2,12), plt.plot(test_show[:,11]), plt.plot(decoded_show_rec[:,11] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据11"),plt.ylim(0, 1)
# plt.subplot(7,2,13), plt.plot(test_show[:,12]), plt.plot(decoded_show_rec[:,12] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据12"),plt.ylim(0, 1)
# # ------------pytorch重建数据--------------
# mumber_plt = mumber_plt + 1
# plt.figure(mumber_plt)
# plt.subplot(7,2,1), plt.plot(test_show[:,0]), plt.plot(decode_show_teX[:,0] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据0",),plt.ylim(0, 1)
# plt.subplot(7,2,2), plt.plot(test_show[:,1]), plt.plot(decode_show_teX[:,1] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据1"),plt.ylim(0, 1)
# plt.subplot(7,2,3), plt.plot(test_show[:,2]), plt.plot(decode_show_teX[:,2] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据2"),plt.ylim(0, 1)
# plt.subplot(7,2,4), plt.plot(test_show[:,3]), plt.plot(decode_show_teX[:,3] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据3"),plt.ylim(0, 1)
# plt.subplot(7,2,5), plt.plot(test_show[:,4]), plt.plot(decode_show_teX[:,4] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据4"),plt.ylim(0, 1)
# plt.subplot(7,2,6), plt.plot(test_show[:,5]), plt.plot(decode_show_teX[:,5] , color = 'red', linewidth = 1.0, linestyle = '--'),
# plt.title("数据5"),plt.ylim(0, 1)
# plt.subplot(7,2,7), plt.plot(test_show[:,6]), plt.plot(decode_show_teX[:,6] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据6"),plt.ylim(0, 1)
# plt.subplot(7,2,8), plt.plot(test_show[:,7]), plt.plot(decode_show_teX[:,7] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据7"),plt.ylim(0, 1)
# plt.subplot(7,2,9), plt.plot(test_show[:,8]), plt.plot(decode_show_teX[:,8] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据8"),plt.ylim(0, 1)
# plt.subplot(7,2,10), plt.plot(test_show[:,9]), plt.plot(decode_show_teX[:,9] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据9"),plt.ylim(0, 1)
# plt.subplot(7,2,11), plt.plot(test_show[:,10]), plt.plot(decode_show_teX[:,10] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据10"),plt.ylim(0, 1)
# plt.subplot(7,2,12), plt.plot(test_show[:,11]), plt.plot(decode_show_teX[:,11] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据11"),plt.ylim(0, 1)
# plt.subplot(7,2,13), plt.plot(test_show[:,12]), plt.plot(decode_show_teX[:,12] , color = 'red', linewidth = 1.0, linestyle = '--')
# plt.title("数据12"),plt.ylim(0, 1)
# # # ------------pytorch重构数据显示---------------
mumber_plt = mumber_plt + 1
plt.figure(mumber_plt)
plt.subplot(7, 2, 1)
plt.plot(F[:, 0])
plt.subplot(7, 2, 2)
plt.plot(F[:, 1])
plt.subplot(7, 2, 3)
plt.plot(F[:, 2])
plt.subplot(7, 2, 4)
plt.plot(F[:, 3])
plt.subplot(7, 2, 5)
plt.plot(F[:, 4])
plt.subplot(7, 2, 6)
plt.plot(F[:, 5])
plt.subplot(7, 2, 7)
plt.plot(F[:, 6])
plt.subplot(7, 2, 8)
plt.plot(F[:, 7])
plt.subplot(7, 2, 9)
plt.plot(F[:, 8])
plt.subplot(7, 2, 10)
plt.plot(F[:, 9])
plt.subplot(7, 2, 11)
plt.plot(F[:, 10])
plt.subplot(7, 2, 12)
plt.plot(F[:, 11])
plt.subplot(7, 2, 13)
plt.plot(F[:, 12],color = 'black')
# plt.legend(labels= ["data1","data2","data3","data4","data5","data6","data7","data8","data9",
# "data10","data11","data12","data13"], loc = 'upper left')
# plt.xlabel('Data point', font_lable)
# plt.ylabel('Amplitude', font_lable)
# x=[1,2,3,4,5,6,7,8,9,10,11,12,13]
# plt.bar(x, f_history.data.numpy()[0,:])
# plt.axis([0.5,13.5,-1,1])
# plt.xlabel('Process data', font_lable)
# plt.ylabel('Amplitude', font_lable)
# # ------------pytorc H2与SPE显示--------------
# #H2显示
# mumber_plt = mumber_plt + 1
# plt.figure(mumber_plt)
# plt.subplot(2,1,1)
# plt.title("H2_trX")
# plt.ylim(0,150)
# plt.plot(H2_trX_history)
# plt.plot([0, len(H2_trX_history)], [h_H2_trX, h_H2_trX], color = 'red')
# plt.subplot(2,1,2)
# plt.title("SPE_trX")
# plt.ylim(0,1)
# plt.plot(SPE_trX_history)
# plt.plot([0, len(SPE_trX_history )], [h_SPE_trX, h_SPE_trX], color = 'red')
# #SPE显示
mumber_plt = mumber_plt + 1
plt.figure(mumber_plt)
plt.subplot(2,1,1)
plt.ylim(0,150)
plt.plot(H2_teX_history)
plt.plot([0, len(H2_teX_history)], [h_H2_teX, h_H2_teX], color = 'red')
plt.legend(labels= ["Original test set","confidence limit 99%"], loc = 'upper left')
plt.xlabel('Process data', font_lable)
plt.ylabel('Amplitude', font_lable)
plt.title("H^2 Statistics", font_title)
plt.subplot(2,1,2)
plt.plot(SPE_teX_history)
plt.plot([0, len(SPE_teX_history )], [h_SPE_teX, h_SPE_teX], color = 'red')
plt.legend(labels= ["Original test set","confidence limit 99%"], loc = 'upper left')
plt.xlabel('Process data', font_lable)
plt.ylabel('Amplitude', font_lable)
plt.title("SPE Statistics", font_title)
plt.show() | en | 0.118688 | ###################################################### ######### 数据加载和处理 ######### ###################################################### # ------------------------数据读入--------------------- # ----------------------定义训练数据范围 ---------------------- # ----------------------定义测试数据范围 ---------------------- # ----------------------定义学习速率 ---------------------- # ----------------------定义正则化系数 ---------------------- # ----------------------数据格式转换------------------- ###################################################### ######### 自编码器模型与网络构建 ######### ###################################################### # ----------------------网络构建----------------------- # 编码网络层 # 解码网络层 # 前向传递层 # 模型实例化 # 优化器 # 交叉熵损失 # 误差重构中的损失函数 # 加载模型 ###################################################### ######### 函数: 计算置信区间函数 ######### ###################################################### # ddof取值为1是因为在统计学中样本的标准偏差除的是(N-1)而不是N,统计学中的标准偏差除的是N # SciPy中的std计算默认是采用统计学中标准差的计算方式 # 计算均值和标准差 # print(mean, std) # 计算置信区间 # 默认0.95的置信水平 ###################################################### ######### 函数: 误差重构(梯度下降) ######### ###################################################### # 测试集经过网络并返回编码层和解码层数据 # 定义标签 判断是否是第一次进入循环 # 更新重构原始数据 x = x - f # 求重构误差f的L1范数 # 求输入变量重构后的解码数据 # 在误差函数中加入f的L1正则项,使得f稀疏 # 打印循环次数,偏差,损失函数 # print('echo:| ', echo, 'F:\n', f_history.data.numpy(), '\nchange loss: %.8f' % loss_val.data.numpy(), "\n") # 对误差loss_val构建权重为1的权重矩阵 # 误差函数loss_val对f求一阶导数 # 梯度下降更新偏差 # 返回重构误差 ###################################################### ######### 误差重构求解(两种方法) ######### ###################################################### # 方法①(选取一段数据求解这段数据的整体重构误差) # f_history = restructure(Xtest) # 方法②(选取一段数据对每个数据点进行误差重构) ###################################################### ######### 故障检测(计算H2和SPE的置信区间) ######### ###################################################### # 训练集经过网络并返回编码层和解码层数据 # 训练集经过网络并返回编码层和解码层数据 # ----------------------记录维度与创建数组------------------- # ----------------------计算H2统计量与其置信限------------------ # 公式:H2 = x.T*x # np.savetxt(".\\SVDD\\SVDD\\data\\H2_teX_history.csv", H2_teX_history , delimiter=',') # ----------------------计算SPE统计量与其置信限------------------ # 公式:SPE=(x-x').T*(x-x') # for i in range(SPE_trX_shape): # SPE = torch.matmul((Xtrain-decoded_trX)[i, :].T, (Xtrain-decoded_trX)[i, :]) # SPE_trX_history.append(SPE.detach().numpy()) # SPE_trX_history = np.array(SPE_trX_history) # l_SPE_trX, h_SPE_trX = confidence(SPE_trX_history, 0.99) # for i in range(SPE_teX_shape): # SPE = torch.matmul((Xtest-decoded_teX)[i, :].T, (Xtest-decoded_teX)[i, :]) # SPE_teX_history.append(SPE.detach().numpy()) # SPE_teX_history = np.array(SPE_teX_history) # l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99) # ----------------------加入协方差(逆)------------------ # 公式:SPE=(x-x').T*E^(-1)*(x-x') # for i in range(SPE_trX_shape): # x_trX = ((Xtrain-decoded_trX)[i, :].T).detach().numpy().reshape(13,1) # means = np.mean(x_trX,axis = 0) # mean_dataMat = x_trX - means # cov_trX = 1/13*(np.dot(mean_dataMat,mean_dataMat.T)) # cov_trX = np.linalg.inv(cov_trX) #逆 # E_trX = torch.from_numpy(cov_trX) # SPE_temp = torch.matmul((Xtrain-decoded_trX)[i, :].T, E_trX) # SPE = torch.matmul(SPE_temp,(Xtrain-decoded_trX)[i, :]) # SPE_trX_history.append(SPE.detach().numpy()) # SPE_trX_history = np.array(SPE_trX_history) # l_SPE_trX, h_SPE_trX = confidence(SPE_trX_history, 0.99) # ----------------------加入协方差(伪逆)------------------ # 公式:SPE=(x-x').T*E^(-1)*(x-x') # for i in range(SPE_teX_shape): # x_teX = ((Xtest-decoded_teX)[i, :].T).detach().numpy().reshape(13,1) # means = np.mean(x_teX,axis = 0) # mean_dataMat = x_teX - means # cov_teX = 1/13*(np.dot(mean_dataMat,mean_dataMat.T)) # cov_teX = np.linalg.pinv(cov_teX) #伪逆 # E_teX = torch.from_numpy(cov_teX) # SPE_temp = torch.matmul((Xtest-decoded_teX)[i, :].T, E_teX) # SPE = torch.matmul(SPE_temp,(Xtest-decoded_teX)[i, :]) # SPE_teX_history.append(SPE.detach().numpy()) # SPE_teX_history = np.array(SPE_teX_history) # l_SPE_teX, h_SPE_teX = confidence(SPE_teX_history, 0.99) # ----------------------变量误差减去均值除以方差的平方和------------------ # 公式:统计量 = sum([((x-x')-men(x-x'))/E]^2) # np.savetxt(".\\SVDD\\SVDD\\data\\SPE_teX_history.csv", SPE_teX_history , delimiter=',') # for i in range(H2_rec_shape): # H2 = torch.matmul(encoded_rec[i, :].T, encoded_rec[i, :]) # H2_rec_history.append(H2.detach().numpy()) # H2_rec_history = np.array(H2_rec_history) # for i in range(SPE_rec_shape): # SPE = torch.matmul((decoded_rec1-decoded_rec)[i, :].T, (decoded_rec1-decoded_rec)[i, :]) # SPE_rec_history.append(SPE.detach().numpy()) # SPE_rec_history = np.array(SPE_rec_history) ###################################################### ######### 数据打印 ######### ###################################################### # 训练集编码数据 # 测试集原始数据 # 测试集解码数据 # Xtest_update_show = Xtest_update.data.numpy() # ---------------------损失函数可视化------------------- # plt.figure(mumber_plt) # plt.plot(loss_val_history) # plt.title("Loss value in the process of error reconstruction") # # plt.ylim(0, 0.2) # # ------------pytorch测试数据与解码数据对比--------------- # mumber_plt = mumber_plt + 1 # plt.figure(mumber_plt) # plt.subplot(7,2,1), plt.plot(test_show[:,0]), plt.plot(decoded_show_rec[:,0] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据0",),plt.ylim(0, 1) # plt.subplot(7,2,2), plt.plot(test_show[:,1]), plt.plot(decoded_show_rec[:,1] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据1"),plt.ylim(0, 1) # plt.subplot(7,2,3), plt.plot(test_show[:,2]), plt.plot(decoded_show_rec[:,2] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据2"),plt.ylim(0, 1) # plt.subplot(7,2,4), plt.plot(test_show[:,3]), plt.plot(decoded_show_rec[:,3] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据3"),plt.ylim(0, 1) # plt.subplot(7,2,5), plt.plot(test_show[:,4]), plt.plot(decoded_show_rec[:,4] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据4"),plt.ylim(0, 1) # plt.subplot(7,2,6), plt.plot(test_show[:,5]), plt.plot(decoded_show_rec[:,5] , color = 'red', linewidth = 1.0, linestyle = '--'), # plt.title("数据5"),plt.ylim(0, 1) # plt.subplot(7,2,7), plt.plot(test_show[:,6]), plt.plot(decode_show_teX[:,6] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据6"),plt.ylim(0, 1) # plt.subplot(7,2,8), plt.plot(test_show[:,7]), plt.plot(decoded_show_rec[:,7] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据7"),plt.ylim(0, 1) # plt.subplot(7,2,9), plt.plot(test_show[:,8]), plt.plot(decoded_show_rec[:,8] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据8"),plt.ylim(0, 1) # plt.subplot(7,2,10), plt.plot(test_show[:,9]), plt.plot(decoded_show_rec[:,9] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据9"),plt.ylim(0, 1) # plt.subplot(7,2,11), plt.plot(test_show[:,10]), plt.plot(decoded_show_rec[:,10] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据10"),plt.ylim(0, 1) # plt.subplot(7,2,12), plt.plot(test_show[:,11]), plt.plot(decoded_show_rec[:,11] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据11"),plt.ylim(0, 1) # plt.subplot(7,2,13), plt.plot(test_show[:,12]), plt.plot(decoded_show_rec[:,12] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据12"),plt.ylim(0, 1) # # ------------pytorch重建数据-------------- # mumber_plt = mumber_plt + 1 # plt.figure(mumber_plt) # plt.subplot(7,2,1), plt.plot(test_show[:,0]), plt.plot(decode_show_teX[:,0] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据0",),plt.ylim(0, 1) # plt.subplot(7,2,2), plt.plot(test_show[:,1]), plt.plot(decode_show_teX[:,1] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据1"),plt.ylim(0, 1) # plt.subplot(7,2,3), plt.plot(test_show[:,2]), plt.plot(decode_show_teX[:,2] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据2"),plt.ylim(0, 1) # plt.subplot(7,2,4), plt.plot(test_show[:,3]), plt.plot(decode_show_teX[:,3] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据3"),plt.ylim(0, 1) # plt.subplot(7,2,5), plt.plot(test_show[:,4]), plt.plot(decode_show_teX[:,4] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据4"),plt.ylim(0, 1) # plt.subplot(7,2,6), plt.plot(test_show[:,5]), plt.plot(decode_show_teX[:,5] , color = 'red', linewidth = 1.0, linestyle = '--'), # plt.title("数据5"),plt.ylim(0, 1) # plt.subplot(7,2,7), plt.plot(test_show[:,6]), plt.plot(decode_show_teX[:,6] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据6"),plt.ylim(0, 1) # plt.subplot(7,2,8), plt.plot(test_show[:,7]), plt.plot(decode_show_teX[:,7] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据7"),plt.ylim(0, 1) # plt.subplot(7,2,9), plt.plot(test_show[:,8]), plt.plot(decode_show_teX[:,8] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据8"),plt.ylim(0, 1) # plt.subplot(7,2,10), plt.plot(test_show[:,9]), plt.plot(decode_show_teX[:,9] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据9"),plt.ylim(0, 1) # plt.subplot(7,2,11), plt.plot(test_show[:,10]), plt.plot(decode_show_teX[:,10] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据10"),plt.ylim(0, 1) # plt.subplot(7,2,12), plt.plot(test_show[:,11]), plt.plot(decode_show_teX[:,11] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据11"),plt.ylim(0, 1) # plt.subplot(7,2,13), plt.plot(test_show[:,12]), plt.plot(decode_show_teX[:,12] , color = 'red', linewidth = 1.0, linestyle = '--') # plt.title("数据12"),plt.ylim(0, 1) # # # ------------pytorch重构数据显示--------------- # plt.legend(labels= ["data1","data2","data3","data4","data5","data6","data7","data8","data9", # "data10","data11","data12","data13"], loc = 'upper left') # plt.xlabel('Data point', font_lable) # plt.ylabel('Amplitude', font_lable) # x=[1,2,3,4,5,6,7,8,9,10,11,12,13] # plt.bar(x, f_history.data.numpy()[0,:]) # plt.axis([0.5,13.5,-1,1]) # plt.xlabel('Process data', font_lable) # plt.ylabel('Amplitude', font_lable) # # ------------pytorc H2与SPE显示-------------- # #H2显示 # mumber_plt = mumber_plt + 1 # plt.figure(mumber_plt) # plt.subplot(2,1,1) # plt.title("H2_trX") # plt.ylim(0,150) # plt.plot(H2_trX_history) # plt.plot([0, len(H2_trX_history)], [h_H2_trX, h_H2_trX], color = 'red') # plt.subplot(2,1,2) # plt.title("SPE_trX") # plt.ylim(0,1) # plt.plot(SPE_trX_history) # plt.plot([0, len(SPE_trX_history )], [h_SPE_trX, h_SPE_trX], color = 'red') # #SPE显示 | 2.241047 | 2 |
D_Network_analysis/solutions/ex2_1a.py | oercompbiomed/CBM101 | 7 | 6616385 | print("n_nodes : ", G.number_of_nodes())
print("n_edges : ", G.number_of_edges()) | print("n_nodes : ", G.number_of_nodes())
print("n_edges : ", G.number_of_edges()) | none | 1 | 2.204211 | 2 | |
test/misc/test_query_performance/archetype_builder.py | madineniguna/EHR | 23 | 6616386 | # -*- coding: utf-8 -*-
import json
from random import randint, uniform, choice
from pyehr.utils import decode_dict, cleanup_json
class ArchetypeBuilder(object):
def __init__(self, archetype_id, archetype_dir):
self.archetype_id = archetype_id
self.archetype_dir = archetype_dir
def _get_quantity(self, value, units):
return {
'magnitude': value,
'units': units
}
def _get_dv_text(self, text):
return {
'value' : text
}
def _get_dv_coded_text(self, coded_text_value, code_string):
return {
'value' : coded_text_value,
'defining_code' : {
'terminology_id' : {
'value' : 'local::'
},
'code_string' : code_string
}
}
def _clean_archetype(self, archetype):
cleaned = cleanup_json(archetype)
return decode_dict(cleaned)
def _get_dv_duration(self, value, unit):
return '%s%s' %(value, unit)
def _get_dv_date_time(self, datetime_str): #todo: check format
return datetime_str
def _get_dv_multimedia(self, media_type, size):
return {
'media_type' : media_type,
'size' : size
}
def _get_dv_proportion(self, proportion, value):
return {
"numerator" : proportion['numerator'],
"denominator" : proportion['denominator'],
"type" : {
"value" : value
}
}
def _load_file(self):
path = '/'.join([self.archetype_dir, self.archetype_id])
with open("%s.json" % path) as f:
doc = json.loads(f.read())
try:
doc = doc['archetype_details']
except KeyError:
raise Exception("Invalid archetype file: %s" % self.archetype_id)
else:
doc = decode_dict(doc)
return doc
def build(self):
raise NotImplementedError()
class BloodGlucose(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, specimen_arch_detail=None, diet_intake=None,
diet_duration=None, glucose_dose=None, glucose_timing=None,
insulin_dose=None, insulin_route=None, laboratory_result_id=None,
result_datetime= None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-blood_glucose.v1'
self.test_name = test_name or None
self.specimen_arch_detail = specimen_arch_detail or None
self.diet_intake = diet_intake or 'at0.95'
self.diet_duration = diet_duration or 'P'
self.glucose_dose = glucose_dose or randint(3, 10)
self.glucose_timing = glucose_timing or 'at0.104'
self.insulin_dose = insulin_dose or randint(2, 10)
self.insulin_route = insulin_route or 'at0.113'
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(BloodGlucose, self).__init__(archetype_id, archetype_dir)
def build(self):
bg_doc = self._load_file()
if self.specimen_arch_detail: #decide about handling an example of nested archetipe
pass
if self.diet_intake:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.90'][0]['items']['at0.91'] = \
{"value" : self._get_dv_coded_text(self.diet_intake, 'at0.92' )}
if self.diet_duration:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.90'][0]['items']['at0.96'] = \
{'value' : self._get_dv_duration(self.diet_duration, 'H')}
if self.glucose_dose:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.98'][0]['items']['at0.100'] = \
{'value' : self._get_quantity(self.glucose_dose, 'gm')}
if self.glucose_timing:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.98'][0]['items']['at0.99'] = \
{'value': self._get_dv_coded_text(self.glucose_timing, 'at0.103')}
if self.insulin_dose:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.107'][0]['items']['at0.110'] = \
{'value' : self._get_quantity(self.insulin_dose, 'U')}
if self.insulin_route:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.107'][0]['items']['at0.111'] = \
{'value' : self._get_dv_coded_text(self.insulin_route, 'at0.112')}
if self.laboratory_result_id:
bg_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
bg_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(bg_doc)
class BloodPressure(ArchetypeBuilder):
def __init__(self, archetype_dir, systolic=None, diastolic=None, mean_arterial=None, pulse=None):
archetype_id = 'openEHR-EHR-OBSERVATION.blood_pressure.v1'
self.systolic = systolic or randint(80, 250)
self.diastolic = diastolic or randint(60, 100)
self.mean_arterial = mean_arterial or randint(0, 1000)
self.pulse = pulse or randint(0,1000)
super(BloodPressure, self).__init__(archetype_id, archetype_dir)
def build(self):
bp_doc = self._load_file()
if self.systolic:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at0004'] =\
{'value': self._get_quantity(self.systolic, 'mm[Hg]')}
if self.diastolic:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at0005'] =\
{'value': self._get_quantity(self.diastolic, 'mm[Hg]')}
if self.mean_arterial:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at1006'] =\
{'value': self._get_quantity(self.mean_arterial, 'mm[Hg]')}
if self.pulse:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at1007'] =\
{'value': self._get_quantity(self.pulse, 'mm[Hg]')}
return self.archetype_id, self._clean_archetype(bp_doc)
class FullBloodCount(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, haemoglobin=None, mchc=None, mcv=None,
mch=None, lymphocytes=None, basophils=None, monocytes=None,
eosinophils=None, multimedia_representation=None,
laboratory_result_id=None, result_datetime=None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-full_blood_count.v1'
self.test_name = test_name or None
self.haemoglobin = haemoglobin or randint(10, 40)
self.mchc = mchc or randint(25, 50)
self.mcv = mcv or randint(80, 100)
self.mch = mch or randint(30, 40)
self.lymphocytes = lymphocytes or round(uniform(2, 5), 2)
self.basophils = basophils or round(uniform(1, 3), 2)
self.monocytes = monocytes or round(uniform(0, 1.5), 2)
self.eosinophils = eosinophils or randint(3, 7)
self.multimedia_representation = multimedia_representation or None
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(FullBloodCount, self).__init__(archetype_id, archetype_dir)
def build(self):
fbc_doc = self._load_file()
if self.test_name:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.haemoglobin:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.haemoglobin, 'gm/l')}
if self.mchc:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.7'] = \
{'value' : self._get_quantity(self.mchc, 'gm/l')}
if self.mcv:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.8'] = \
{'value' : self._get_quantity(self.mcv, 'fl')}
if self.mch:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.9'] = \
{'value' : self._get_quantity(self.mcv, 'pg')}
if self.lymphocytes:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.16]'] = \
{'value' : self._get_quantity(self.lymphocytes, '10*9/l')}
if self.basophils:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.17]'] = \
{'value' : self._get_quantity(self.basophils, '10*9/l')}
if self.monocytes:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.18]'] = \
{'value' : self._get_quantity(self.monocytes, '10*9/l')}
if self.eosinophils:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.19]'] = \
{'value' : self._get_quantity(self.eosinophils, '10*9/l')}
if self.multimedia_representation:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
[{'value' : self._get_dv_multimedia(self.multimedia_representation['media_type'], self.multimedia_representation['size'])}]
if self.laboratory_result_id:
fbc_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
fbc_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(fbc_doc)
class Lipids(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, specimen_detail=None, total_cholesterol=None,
tryglicerides=None, hdl=None, ldl=None, hdl_ldl_ratio=None,
laboratory_result_id=None, result_datetime= None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-lipids.v1'
self.test_name = test_name or None
self.specimen_detail = specimen_detail or None
self.total_cholesterol = total_cholesterol or randint(150, 300)
self.tryglicerides = tryglicerides or randint(140, 550)
self.hdl = hdl or randint(50, 120)
self.ldl = ldl or randint(50, 120)
self.hdl_ldl_ratio = hdl_ldl_ratio or {'numerator': randint(1, 4), 'denominator': randint(1, 4)}
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(Lipids, self).__init__(archetype_id, archetype_dir)
def build(self):
lpd_doc = self._load_file()
if self.test_name:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.specimen_detail:
pass #decide about handling an example of nested archetipe
if self.total_cholesterol:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.5'] = \
{'value' : self._get_quantity(self.total_cholesterol, "mg/dl")}
if self.tryglicerides:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.tryglicerides, "mg/dl")}
if self.hdl:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.3'] = \
{'value' : self._get_quantity(self.hdl, "mg/dl")}
if self.ldl:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.2'] = \
{'value' : self._get_quantity(self.ldl, "mg/dl")}
if self.hdl_ldl_ratio:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.1'] = \
{'value' : self._get_dv_proportion(self.hdl_ldl_ratio, 1)}
if self.laboratory_result_id:
lpd_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
lpd_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(lpd_doc)
class LiverFunction(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, alp=None, total_bilirubin=None, direct_bilirubin=None, indirect_bilirubin=None, alt=None, \
ast=None, ggt=None, albumin=None, total_protein=None, laboratory_result_id = None, result_datetime= None ):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-liver_function.v1'
self.test_name = test_name or None
self.alp = alp or randint(30, 45)
self.total_bilirubin = total_bilirubin or randint(1, 25)
self.direct_bilirubin = direct_bilirubin or randint (1, 9)
self.indirect_bilirubin = indirect_bilirubin or randint(10, 20)
self.alt = alt or randint(5, 50)
self.ast = ast or randint(10, 50)
self.ggt = ggt or randint(10, 50)
self.albumin = albumin or randint(30, 55)
self.total_protein = total_protein or randint(40, 65)
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(LiverFunction, self).__init__(archetype_id, archetype_dir)
def build(self):
lvf_doc = self._load_file()
if self.test_name:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.alp:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.2'] =\
{'value' : self._get_quantity(self.alp, "U/l")}
if self.total_bilirubin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.4'] =\
{'value' : self._get_quantity(self.total_bilirubin, "µmol/l")}
if self.direct_bilirubin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.11'] =\
{'value' : self._get_quantity(self.direct_bilirubin, "µmol/l")}
if self.indirect_bilirubin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.9'] =\
{'value' : self._get_quantity(self.indirect_bilirubin, "µmol/l")}
if self.alt:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.1'] =\
{'value' : self._get_quantity(self.alt, "U/l")}
if self.ast:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.3'] = \
{'value' : self._get_quantity(self.ast, "U/l")}
if self.ggt:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.3'] = \
{'value' : self._get_quantity(self.ggt, "U/l")}
if self.albumin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.7'] = \
{'value' : self._get_quantity(self.albumin, "gm/l")}
if self.total_protein:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.10'] = \
{'value' : self._get_quantity(self.total_protein, "gm/l")}
if self.laboratory_result_id:
lvf_doc['protocol']['at0004'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
lvf_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(lvf_doc)
class Thyroid(ArchetypeBuilder):
def __init__(self, archetype_dir,test_name=None, tsh=None, ft3= None, total_t3=None, ft4=None, total_t4=None, ft3_index=None, fti=None, \
placer_id=None, filler_id=None, laboratory_result_id=None, result_datetime=None ):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-thyroid.v1'
self.test_name = test_name or None
self.tsh = tsh or round(uniform(0, 4.5), 2)
self.ft3 = ft3 or round(uniform(3,7), 2)
self.total_t3 = total_t3 or round(uniform(3, 7), 2)
self.ft4 = ft4 or round(uniform(3,20), 2)
self.total_t4 = total_t4 or round(uniform(3, 20), 2)
self.ft3_index = ft3_index or {'numerator': randint(1, 4), 'denominator': randint(1, 4)}
self.fti = fti or {'numerator': randint(1, 4), 'denominator': randint(1, 4)}
self.placer_id = placer_id or None
self.filler_id = filler_id or None
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(Thyroid, self).__init__(archetype_id, archetype_dir)
def build(self):
thy_doc = self._load_file()
if self.test_name:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.tsh:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.2'] = \
{'value' : self._get_quantity(self.tsh, 'mIU/l')}
if self.ft3:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.7'] = \
{'value' : self._get_quantity(self.ft3, 'pmol/l')}
if self.total_t3:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.8'] = \
{'value' : self._get_quantity(self.total_t3, 'pmol/l')}
if self.ft4:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.3'] = \
{'value' : self._get_quantity(self.ft3, 'pmol/l')}
if self.total_t4:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.ft3, 'pmol/l')}
if self.ft3_index:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.9'] = \
{'value' : self._get_dv_proportion(self.ft3_index, 1)}
if self.fti:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.6'] = \
{'value' : self._get_dv_proportion(self.fti, 1)}
if self.placer_id:
thy_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0062'] = {'value' : self.placer_id}
if self.filler_id:
thy_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0063'] = {'value' : self.filler_id}
if self.laboratory_result_id:
thy_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
thy_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(thy_doc)
class UreaAndElectrolytes(ArchetypeBuilder):
def __init__(self, archetype_dir,test_name=None, sodum=None, potassium=None, chloride=None, bicarbonate=None, urea=None, creatinine=None, \
sp_ratio=None, laboratory_result_id=None, result_datetime=None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-urea_and_electrolytes.v1'
self.test_name = test_name or None
self.sodum = sodum or randint(125, 150)
self.potassium = potassium or round(uniform(3, 5.5), 2)
self.chloride = chloride or randint(90, 120)
self.bicarbonate = bicarbonate or randint(20, 30)
self.urea = urea or round(uniform(1.5, 8), 2)
self.creatinine = creatinine or round(uniform(0.1, 0.3), 2)
self.sp_ratio = sp_ratio or {'numerator': randint(3, 4), 'denominator': randint(1, 4)}
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(UreaAndElectrolytes, self).__init__(archetype_id, archetype_dir)
def build(self):
uae_doc = self._load_file()
if self.test_name:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] = \
{'value' : self._get_dv_text(self.test_name)}
if self.sodum:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.5'] = \
{'value' : self._get_quantity(self.sodum, 'mmol/l')}
if self.potassium:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.potassium, 'mmol/l')}
if self.chloride:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.3'] = \
{'value' : self._get_quantity(self.chloride, 'mmol/l')}
if self.bicarbonate:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.2'] = \
{'value' : self._get_quantity(self.bicarbonate, 'mmol/l')}
if self.urea:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.1'] = \
{'value' : self._get_quantity(self.bicarbonate, 'mmol/l')}
if self.creatinine:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.7'] = \
{'value' : self._get_quantity(self.bicarbonate, 'mmol/l')}
if self.sp_ratio:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.6'] = \
{'value' : self._get_dv_proportion(self.sp_ratio, 1)}
if self.laboratory_result_id:
uae_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
uae_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(uae_doc)
class UrinAnalysis(ArchetypeBuilder):
def __init__(self, archetype_dir,glucose=None, protein=None, bilirubin=None, ketones=None, blood=None, ph=None, comments=None ):
archetype_id = 'openEHR-EHR-OBSERVATION.urinalysis.v1'
self.glucose = glucose or choice(['at0115', 'at0116', 'at0117', 'at0118', 'at0119', 'at0120'])
self.protein = protein or choice(['at0096', 'at0097', 'at0098', 'at0099', 'at0100', 'at0101'])
self.bilirubin = bilirubin or choice(['at0121', 'at0122', 'at0123', 'at0124'])
self.ketones = ketones or choice(['at0109', 'at0110', 'at0111', 'at0112', 'at0113', 'at0114'])
self.blood = blood or choice(['at0102', 'at0103', 'at0104', 'at0105', 'at0106', 'at0107', 'at0108'])
self.ph = ph or choice(['at0127', 'at0128', 'at0129', 'at0130', 'at0131', 'at0132','at0133', 'at0134', 'at0176', 'at0177', 'at0179'])
self.comments = comments or None
super(UrinAnalysis, self).__init__(archetype_id, archetype_dir)
def build (self):
ualy_doc = self._load_file()
if self.glucose:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0050'] = \
{'value' : self.glucose}
if self.protein:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0095'] = \
{'value' : self.protein}
if self.bilirubin:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0062'] = \
{'value' : self.bilirubin}
if self.ketones:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0037'] = \
{'value' : self.ketones}
if self.blood:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0032'] = \
{'value' : self.blood}
if self.ph:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0126'] = \
{'value' : self.ph}
if self.comments:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0030'] = \
{'value' : self._get_dv_text(self.comments)}
return self.archetype_id, self._clean_archetype(ualy_doc)
class Composition(ArchetypeBuilder):
def __init__(self, archetype_dir, children, label):
archetype_id = 'openEHR-EHR-COMPOSITION.encounter.v1.%s' % label
self.archetype_file_name = 'openEHR-EHR-COMPOSITION.encounter.v1'
super(Composition, self).__init__(archetype_id, archetype_dir)
self.children = children
def build(self):
doc = self._load_file()
doc['context']['event_context']['other_context']['at0001'][0]['items']['at0002'] = self.children
return self.archetype_id, doc
def _load_file(self):
path = '/'.join([self.archetype_dir, self.archetype_file_name])
with open("%s.json" % path) as f:
doc = json.loads(f.read())
try:
doc = doc['archetype_details']
except KeyError:
raise Exception("Invalid archetype file: %s" % self.archetype_id)
else:
doc = decode_dict(doc)
return doc
BUILDERS = {
'blood_pressure' : BloodPressure,
'blood_glucose' : BloodGlucose,
'full_blood_count' : FullBloodCount,
'lipids' : Lipids,
'liver_function': LiverFunction,
'thyroid' : Thyroid,
'urea_and_electrolytes' : UreaAndElectrolytes,
'urin_analysis' : UrinAnalysis,
'composition' : Composition
}
def get_builder(name):
return BUILDERS.get(name, None)
| # -*- coding: utf-8 -*-
import json
from random import randint, uniform, choice
from pyehr.utils import decode_dict, cleanup_json
class ArchetypeBuilder(object):
def __init__(self, archetype_id, archetype_dir):
self.archetype_id = archetype_id
self.archetype_dir = archetype_dir
def _get_quantity(self, value, units):
return {
'magnitude': value,
'units': units
}
def _get_dv_text(self, text):
return {
'value' : text
}
def _get_dv_coded_text(self, coded_text_value, code_string):
return {
'value' : coded_text_value,
'defining_code' : {
'terminology_id' : {
'value' : 'local::'
},
'code_string' : code_string
}
}
def _clean_archetype(self, archetype):
cleaned = cleanup_json(archetype)
return decode_dict(cleaned)
def _get_dv_duration(self, value, unit):
return '%s%s' %(value, unit)
def _get_dv_date_time(self, datetime_str): #todo: check format
return datetime_str
def _get_dv_multimedia(self, media_type, size):
return {
'media_type' : media_type,
'size' : size
}
def _get_dv_proportion(self, proportion, value):
return {
"numerator" : proportion['numerator'],
"denominator" : proportion['denominator'],
"type" : {
"value" : value
}
}
def _load_file(self):
path = '/'.join([self.archetype_dir, self.archetype_id])
with open("%s.json" % path) as f:
doc = json.loads(f.read())
try:
doc = doc['archetype_details']
except KeyError:
raise Exception("Invalid archetype file: %s" % self.archetype_id)
else:
doc = decode_dict(doc)
return doc
def build(self):
raise NotImplementedError()
class BloodGlucose(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, specimen_arch_detail=None, diet_intake=None,
diet_duration=None, glucose_dose=None, glucose_timing=None,
insulin_dose=None, insulin_route=None, laboratory_result_id=None,
result_datetime= None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-blood_glucose.v1'
self.test_name = test_name or None
self.specimen_arch_detail = specimen_arch_detail or None
self.diet_intake = diet_intake or 'at0.95'
self.diet_duration = diet_duration or 'P'
self.glucose_dose = glucose_dose or randint(3, 10)
self.glucose_timing = glucose_timing or 'at0.104'
self.insulin_dose = insulin_dose or randint(2, 10)
self.insulin_route = insulin_route or 'at0.113'
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(BloodGlucose, self).__init__(archetype_id, archetype_dir)
def build(self):
bg_doc = self._load_file()
if self.specimen_arch_detail: #decide about handling an example of nested archetipe
pass
if self.diet_intake:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.90'][0]['items']['at0.91'] = \
{"value" : self._get_dv_coded_text(self.diet_intake, 'at0.92' )}
if self.diet_duration:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.90'][0]['items']['at0.96'] = \
{'value' : self._get_dv_duration(self.diet_duration, 'H')}
if self.glucose_dose:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.98'][0]['items']['at0.100'] = \
{'value' : self._get_quantity(self.glucose_dose, 'gm')}
if self.glucose_timing:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.98'][0]['items']['at0.99'] = \
{'value': self._get_dv_coded_text(self.glucose_timing, 'at0.103')}
if self.insulin_dose:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.107'][0]['items']['at0.110'] = \
{'value' : self._get_quantity(self.insulin_dose, 'U')}
if self.insulin_route:
bg_doc['data']['at0001'][0]['events']['at0002']['state']['at0.89'][0]['items']['at0.107'][0]['items']['at0.111'] = \
{'value' : self._get_dv_coded_text(self.insulin_route, 'at0.112')}
if self.laboratory_result_id:
bg_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
bg_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(bg_doc)
class BloodPressure(ArchetypeBuilder):
def __init__(self, archetype_dir, systolic=None, diastolic=None, mean_arterial=None, pulse=None):
archetype_id = 'openEHR-EHR-OBSERVATION.blood_pressure.v1'
self.systolic = systolic or randint(80, 250)
self.diastolic = diastolic or randint(60, 100)
self.mean_arterial = mean_arterial or randint(0, 1000)
self.pulse = pulse or randint(0,1000)
super(BloodPressure, self).__init__(archetype_id, archetype_dir)
def build(self):
bp_doc = self._load_file()
if self.systolic:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at0004'] =\
{'value': self._get_quantity(self.systolic, 'mm[Hg]')}
if self.diastolic:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at0005'] =\
{'value': self._get_quantity(self.diastolic, 'mm[Hg]')}
if self.mean_arterial:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at1006'] =\
{'value': self._get_quantity(self.mean_arterial, 'mm[Hg]')}
if self.pulse:
bp_doc['data']['at0001'][0]['events'][0]['at0006']['data']['at0003'][0]['items']['at1007'] =\
{'value': self._get_quantity(self.pulse, 'mm[Hg]')}
return self.archetype_id, self._clean_archetype(bp_doc)
class FullBloodCount(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, haemoglobin=None, mchc=None, mcv=None,
mch=None, lymphocytes=None, basophils=None, monocytes=None,
eosinophils=None, multimedia_representation=None,
laboratory_result_id=None, result_datetime=None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-full_blood_count.v1'
self.test_name = test_name or None
self.haemoglobin = haemoglobin or randint(10, 40)
self.mchc = mchc or randint(25, 50)
self.mcv = mcv or randint(80, 100)
self.mch = mch or randint(30, 40)
self.lymphocytes = lymphocytes or round(uniform(2, 5), 2)
self.basophils = basophils or round(uniform(1, 3), 2)
self.monocytes = monocytes or round(uniform(0, 1.5), 2)
self.eosinophils = eosinophils or randint(3, 7)
self.multimedia_representation = multimedia_representation or None
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(FullBloodCount, self).__init__(archetype_id, archetype_dir)
def build(self):
fbc_doc = self._load_file()
if self.test_name:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.haemoglobin:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.haemoglobin, 'gm/l')}
if self.mchc:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.7'] = \
{'value' : self._get_quantity(self.mchc, 'gm/l')}
if self.mcv:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.8'] = \
{'value' : self._get_quantity(self.mcv, 'fl')}
if self.mch:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.9'] = \
{'value' : self._get_quantity(self.mcv, 'pg')}
if self.lymphocytes:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.16]'] = \
{'value' : self._get_quantity(self.lymphocytes, '10*9/l')}
if self.basophils:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.17]'] = \
{'value' : self._get_quantity(self.basophils, '10*9/l')}
if self.monocytes:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.18]'] = \
{'value' : self._get_quantity(self.monocytes, '10*9/l')}
if self.eosinophils:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.14'][0]['items']['at0078.19]'] = \
{'value' : self._get_quantity(self.eosinophils, '10*9/l')}
if self.multimedia_representation:
fbc_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
[{'value' : self._get_dv_multimedia(self.multimedia_representation['media_type'], self.multimedia_representation['size'])}]
if self.laboratory_result_id:
fbc_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
fbc_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(fbc_doc)
class Lipids(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, specimen_detail=None, total_cholesterol=None,
tryglicerides=None, hdl=None, ldl=None, hdl_ldl_ratio=None,
laboratory_result_id=None, result_datetime= None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-lipids.v1'
self.test_name = test_name or None
self.specimen_detail = specimen_detail or None
self.total_cholesterol = total_cholesterol or randint(150, 300)
self.tryglicerides = tryglicerides or randint(140, 550)
self.hdl = hdl or randint(50, 120)
self.ldl = ldl or randint(50, 120)
self.hdl_ldl_ratio = hdl_ldl_ratio or {'numerator': randint(1, 4), 'denominator': randint(1, 4)}
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(Lipids, self).__init__(archetype_id, archetype_dir)
def build(self):
lpd_doc = self._load_file()
if self.test_name:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.specimen_detail:
pass #decide about handling an example of nested archetipe
if self.total_cholesterol:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.5'] = \
{'value' : self._get_quantity(self.total_cholesterol, "mg/dl")}
if self.tryglicerides:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.tryglicerides, "mg/dl")}
if self.hdl:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.3'] = \
{'value' : self._get_quantity(self.hdl, "mg/dl")}
if self.ldl:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.2'] = \
{'value' : self._get_quantity(self.ldl, "mg/dl")}
if self.hdl_ldl_ratio:
lpd_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.1'] = \
{'value' : self._get_dv_proportion(self.hdl_ldl_ratio, 1)}
if self.laboratory_result_id:
lpd_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
lpd_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(lpd_doc)
class LiverFunction(ArchetypeBuilder):
def __init__(self, archetype_dir, test_name=None, alp=None, total_bilirubin=None, direct_bilirubin=None, indirect_bilirubin=None, alt=None, \
ast=None, ggt=None, albumin=None, total_protein=None, laboratory_result_id = None, result_datetime= None ):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-liver_function.v1'
self.test_name = test_name or None
self.alp = alp or randint(30, 45)
self.total_bilirubin = total_bilirubin or randint(1, 25)
self.direct_bilirubin = direct_bilirubin or randint (1, 9)
self.indirect_bilirubin = indirect_bilirubin or randint(10, 20)
self.alt = alt or randint(5, 50)
self.ast = ast or randint(10, 50)
self.ggt = ggt or randint(10, 50)
self.albumin = albumin or randint(30, 55)
self.total_protein = total_protein or randint(40, 65)
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(LiverFunction, self).__init__(archetype_id, archetype_dir)
def build(self):
lvf_doc = self._load_file()
if self.test_name:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.alp:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.2'] =\
{'value' : self._get_quantity(self.alp, "U/l")}
if self.total_bilirubin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.4'] =\
{'value' : self._get_quantity(self.total_bilirubin, "µmol/l")}
if self.direct_bilirubin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.11'] =\
{'value' : self._get_quantity(self.direct_bilirubin, "µmol/l")}
if self.indirect_bilirubin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.9'] =\
{'value' : self._get_quantity(self.indirect_bilirubin, "µmol/l")}
if self.alt:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.1'] =\
{'value' : self._get_quantity(self.alt, "U/l")}
if self.ast:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.3'] = \
{'value' : self._get_quantity(self.ast, "U/l")}
if self.ggt:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.3'] = \
{'value' : self._get_quantity(self.ggt, "U/l")}
if self.albumin:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.7'] = \
{'value' : self._get_quantity(self.albumin, "gm/l")}
if self.total_protein:
lvf_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['0078.10'] = \
{'value' : self._get_quantity(self.total_protein, "gm/l")}
if self.laboratory_result_id:
lvf_doc['protocol']['at0004'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
lvf_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(lvf_doc)
class Thyroid(ArchetypeBuilder):
def __init__(self, archetype_dir,test_name=None, tsh=None, ft3= None, total_t3=None, ft4=None, total_t4=None, ft3_index=None, fti=None, \
placer_id=None, filler_id=None, laboratory_result_id=None, result_datetime=None ):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-thyroid.v1'
self.test_name = test_name or None
self.tsh = tsh or round(uniform(0, 4.5), 2)
self.ft3 = ft3 or round(uniform(3,7), 2)
self.total_t3 = total_t3 or round(uniform(3, 7), 2)
self.ft4 = ft4 or round(uniform(3,20), 2)
self.total_t4 = total_t4 or round(uniform(3, 20), 2)
self.ft3_index = ft3_index or {'numerator': randint(1, 4), 'denominator': randint(1, 4)}
self.fti = fti or {'numerator': randint(1, 4), 'denominator': randint(1, 4)}
self.placer_id = placer_id or None
self.filler_id = filler_id or None
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(Thyroid, self).__init__(archetype_id, archetype_dir)
def build(self):
thy_doc = self._load_file()
if self.test_name:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] =\
{'value' : self._get_dv_text(self.test_name)}
if self.tsh:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.2'] = \
{'value' : self._get_quantity(self.tsh, 'mIU/l')}
if self.ft3:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.7'] = \
{'value' : self._get_quantity(self.ft3, 'pmol/l')}
if self.total_t3:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.8'] = \
{'value' : self._get_quantity(self.total_t3, 'pmol/l')}
if self.ft4:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.3'] = \
{'value' : self._get_quantity(self.ft3, 'pmol/l')}
if self.total_t4:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.ft3, 'pmol/l')}
if self.ft3_index:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.9'] = \
{'value' : self._get_dv_proportion(self.ft3_index, 1)}
if self.fti:
thy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.6'] = \
{'value' : self._get_dv_proportion(self.fti, 1)}
if self.placer_id:
thy_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0062'] = {'value' : self.placer_id}
if self.filler_id:
thy_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0063'] = {'value' : self.filler_id}
if self.laboratory_result_id:
thy_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
thy_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(thy_doc)
class UreaAndElectrolytes(ArchetypeBuilder):
def __init__(self, archetype_dir,test_name=None, sodum=None, potassium=None, chloride=None, bicarbonate=None, urea=None, creatinine=None, \
sp_ratio=None, laboratory_result_id=None, result_datetime=None):
archetype_id = 'openEHR-EHR-OBSERVATION.lab_test-urea_and_electrolytes.v1'
self.test_name = test_name or None
self.sodum = sodum or randint(125, 150)
self.potassium = potassium or round(uniform(3, 5.5), 2)
self.chloride = chloride or randint(90, 120)
self.bicarbonate = bicarbonate or randint(20, 30)
self.urea = urea or round(uniform(1.5, 8), 2)
self.creatinine = creatinine or round(uniform(0.1, 0.3), 2)
self.sp_ratio = sp_ratio or {'numerator': randint(3, 4), 'denominator': randint(1, 4)}
self.laboratory_result_id = laboratory_result_id or None
self.result_datetime = result_datetime or None
super(UreaAndElectrolytes, self).__init__(archetype_id, archetype_dir)
def build(self):
uae_doc = self._load_file()
if self.test_name:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0005'] = \
{'value' : self._get_dv_text(self.test_name)}
if self.sodum:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.5'] = \
{'value' : self._get_quantity(self.sodum, 'mmol/l')}
if self.potassium:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.4'] = \
{'value' : self._get_quantity(self.potassium, 'mmol/l')}
if self.chloride:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.3'] = \
{'value' : self._get_quantity(self.chloride, 'mmol/l')}
if self.bicarbonate:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.2'] = \
{'value' : self._get_quantity(self.bicarbonate, 'mmol/l')}
if self.urea:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.1'] = \
{'value' : self._get_quantity(self.bicarbonate, 'mmol/l')}
if self.creatinine:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.7'] = \
{'value' : self._get_quantity(self.bicarbonate, 'mmol/l')}
if self.sp_ratio:
uae_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0078.6'] = \
{'value' : self._get_dv_proportion(self.sp_ratio, 1)}
if self.laboratory_result_id:
uae_doc['protocol']['at0004'][0]['items']['at0013'][0]['items']['at0068'] = {'value' : self._get_dv_text(self.laboratory_result_id)}
if self.result_datetime:
uae_doc['protocol']['at0004'][0]['items']['at0075'] = {'value' : self._get_dv_date_time(self.result_datetime)}
return self.archetype_id, self._clean_archetype(uae_doc)
class UrinAnalysis(ArchetypeBuilder):
def __init__(self, archetype_dir,glucose=None, protein=None, bilirubin=None, ketones=None, blood=None, ph=None, comments=None ):
archetype_id = 'openEHR-EHR-OBSERVATION.urinalysis.v1'
self.glucose = glucose or choice(['at0115', 'at0116', 'at0117', 'at0118', 'at0119', 'at0120'])
self.protein = protein or choice(['at0096', 'at0097', 'at0098', 'at0099', 'at0100', 'at0101'])
self.bilirubin = bilirubin or choice(['at0121', 'at0122', 'at0123', 'at0124'])
self.ketones = ketones or choice(['at0109', 'at0110', 'at0111', 'at0112', 'at0113', 'at0114'])
self.blood = blood or choice(['at0102', 'at0103', 'at0104', 'at0105', 'at0106', 'at0107', 'at0108'])
self.ph = ph or choice(['at0127', 'at0128', 'at0129', 'at0130', 'at0131', 'at0132','at0133', 'at0134', 'at0176', 'at0177', 'at0179'])
self.comments = comments or None
super(UrinAnalysis, self).__init__(archetype_id, archetype_dir)
def build (self):
ualy_doc = self._load_file()
if self.glucose:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0050'] = \
{'value' : self.glucose}
if self.protein:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0095'] = \
{'value' : self.protein}
if self.bilirubin:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0062'] = \
{'value' : self.bilirubin}
if self.ketones:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0037'] = \
{'value' : self.ketones}
if self.blood:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0032'] = \
{'value' : self.blood}
if self.ph:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0126'] = \
{'value' : self.ph}
if self.comments:
ualy_doc['data']['at0001'][0]['events']['at0002']['data']['at0003'][0]['items']['at0030'] = \
{'value' : self._get_dv_text(self.comments)}
return self.archetype_id, self._clean_archetype(ualy_doc)
class Composition(ArchetypeBuilder):
def __init__(self, archetype_dir, children, label):
archetype_id = 'openEHR-EHR-COMPOSITION.encounter.v1.%s' % label
self.archetype_file_name = 'openEHR-EHR-COMPOSITION.encounter.v1'
super(Composition, self).__init__(archetype_id, archetype_dir)
self.children = children
def build(self):
doc = self._load_file()
doc['context']['event_context']['other_context']['at0001'][0]['items']['at0002'] = self.children
return self.archetype_id, doc
def _load_file(self):
path = '/'.join([self.archetype_dir, self.archetype_file_name])
with open("%s.json" % path) as f:
doc = json.loads(f.read())
try:
doc = doc['archetype_details']
except KeyError:
raise Exception("Invalid archetype file: %s" % self.archetype_id)
else:
doc = decode_dict(doc)
return doc
BUILDERS = {
'blood_pressure' : BloodPressure,
'blood_glucose' : BloodGlucose,
'full_blood_count' : FullBloodCount,
'lipids' : Lipids,
'liver_function': LiverFunction,
'thyroid' : Thyroid,
'urea_and_electrolytes' : UreaAndElectrolytes,
'urin_analysis' : UrinAnalysis,
'composition' : Composition
}
def get_builder(name):
return BUILDERS.get(name, None)
| en | 0.669793 | # -*- coding: utf-8 -*- #todo: check format #decide about handling an example of nested archetipe #decide about handling an example of nested archetipe | 2.459836 | 2 |
async/__init__.py | FreeJournal/freejournal | 4 | 6616387 | from threading import Thread, Event
from functools import wraps
from time import sleep
def run_as_thread(func):
"""
Decorator for making a function run as a new thread
:param func: the function to run
:return: the resulting thread object
"""
@wraps(func)
def thread_func(*args, **kwargs):
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
return thread_func
def repeat_periodic(interval):
def decorator(func):
def wrapper(*args, **kwargs):
stop_event = Event()
@run_as_thread
def loop():
while not stop_event.wait(interval):
try:
func(*args, **kwargs)
except Exception as e:
print "Repeated job error'd with message:", e.message
loop()
return stop_event
return wrapper
return decorator
def wait_for_interrupt(func, stop_event, args=[]):
while True:
try:
sleep(10)
except (KeyboardInterrupt, SystemExit):
stop_event.set()
func(*args)
exit(0)
| from threading import Thread, Event
from functools import wraps
from time import sleep
def run_as_thread(func):
"""
Decorator for making a function run as a new thread
:param func: the function to run
:return: the resulting thread object
"""
@wraps(func)
def thread_func(*args, **kwargs):
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
return thread_func
def repeat_periodic(interval):
def decorator(func):
def wrapper(*args, **kwargs):
stop_event = Event()
@run_as_thread
def loop():
while not stop_event.wait(interval):
try:
func(*args, **kwargs)
except Exception as e:
print "Repeated job error'd with message:", e.message
loop()
return stop_event
return wrapper
return decorator
def wait_for_interrupt(func, stop_event, args=[]):
while True:
try:
sleep(10)
except (KeyboardInterrupt, SystemExit):
stop_event.set()
func(*args)
exit(0)
| en | 0.790809 | Decorator for making a function run as a new thread :param func: the function to run :return: the resulting thread object | 3.577283 | 4 |
currently_overdue_books.py | blaterwolf/lmspy | 0 | 6616388 | <gh_stars>0
from PyQt6 import QtCore, QtGui, QtWidgets
import sqlite3
class Ui_CurrentlyOverdueBooks(object):
def setupUi(self, CurrentlyOverdueBooks, MainMenu):
MainMenu.close()
CurrentlyOverdueBooks.setObjectName("CurrentlyOverdueBooks")
CurrentlyOverdueBooks.resize(740, 670)
CurrentlyOverdueBooks.setStyleSheet(
".QWidget{background-color: #CBB1A0;border-radius: 10px}")
CurrentlyOverdueBooks.setWindowFlags(
QtCore.Qt.WindowType.FramelessWindowHint)
self.verticalLayout = QtWidgets.QVBoxLayout(CurrentlyOverdueBooks)
self.verticalLayout.setObjectName("verticalLayout")
self.border = QtWidgets.QFrame(CurrentlyOverdueBooks)
self.border.setStyleSheet("#border{\n"
" color: #842a2d;\n"
"}")
self.border.setFrameShape(QtWidgets.QFrame.Shape.Box)
self.border.setLineWidth(5)
self.border.setMidLineWidth(5)
self.border.setObjectName("border")
self.gridLayout = QtWidgets.QGridLayout(self.border)
self.gridLayout.setObjectName("gridLayout")
self.overdue_title_label = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(24)
font.setBold(True)
self.overdue_title_label.setFont(font)
self.overdue_title_label.setStyleSheet(
".QWidget{background-color: #CBB1A0;border-radius: 10px}")
self.overdue_title_label.setAlignment(
QtCore.Qt.AlignmentFlag.AlignCenter)
self.overdue_title_label.setObjectName("overdue_title_label")
self.gridLayout.addWidget(self.overdue_title_label, 0, 0, 1, 1)
self.tableWidget = QtWidgets.QTableWidget(self.border)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(6)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
self.tableWidget.horizontalHeader().setDefaultSectionSize(200)
self.gridLayout.addWidget(self.tableWidget, 1, 0, 1, 1)
self.back_button = QtWidgets.QPushButton(self.border)
self.back_button.setCursor(QtGui.QCursor(
QtCore.Qt.CursorShape.PointingHandCursor))
self.back_button.setStyleSheet("QPushButton{\n"
" color: #842a2d;\n"
" font: 17pt \"Franklin Gothic Book\";\n"
" border: 2px solid #842a2d;\n"
" padding: 2px;\n"
" border-radius: 10px;\n"
" opacity: 100;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" background-color: #842a2d;\n"
" color: #CBB1A0;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #b34044;\n"
" border: 5px solid #b34044;\n"
"}")
self.back_button.setObjectName("back_button")
self.back_button.clicked.connect(
lambda: self.return_action(CurrentlyOverdueBooks, MainMenu))
self.gridLayout.addWidget(self.back_button, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.border)
self.retranslateUi(CurrentlyOverdueBooks)
QtCore.QMetaObject.connectSlotsByName(CurrentlyOverdueBooks)
self.load_currently_overdue_books()
def return_action(self, CurrentlyOverdueBooks, MainMenu):
CurrentlyOverdueBooks.close()
MainMenu.show()
def load_currently_overdue_books(self):
con = sqlite3.connect('./db/library.db')
query = """
SELECT Borrow_ID, (Student_FirstName || ' ' || Student_LastName) AS Full_Name, Book_Title, Borrow_Date, Librarian_Name, Payment_Amount
FROM BORROW
LEFT JOIN STUDENT ON STUDENT.Student_ID = BORROW.Student_ID
LEFT JOIN BOOK ON BOOK.Book_ID = BORROW.Book_ID
LEFT JOIN LIBRARIAN ON LIBRARIAN.Librarian_Username = BORROW.Borrow_Issuer
LEFT JOIN PAYMENT ON PAYMENT.Payment_ID = BORROW.Payment_ID
WHERE Borrow_Overdue_Status = 1;
"""
result = con.execute(query)
self.tableWidget.setRowCount(0)
for row, form in enumerate(result):
self.tableWidget.insertRow(row)
for column, item in enumerate(form):
self.tableWidget.setItem(
row, column, QtWidgets.QTableWidgetItem(str(item)))
def retranslateUi(self, CurrentlyOverdueBooks):
_translate = QtCore.QCoreApplication.translate
CurrentlyOverdueBooks.setWindowTitle(
_translate("CurrentlyOverdueBooks", "Borrow History"))
self.overdue_title_label.setText(_translate(
"CurrentlyOverdueBooks", "CURRENTLY OVERDUE BOOKS"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("CurrentlyOverdueBooks", "Borrow ID"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("CurrentlyOverdueBooks", "Student Name"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("CurrentlyOverdueBooks", "Book Title"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("CurrentlyOverdueBooks", "Borrow Date"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("CurrentlyOverdueBooks", "Librarian Issuer"))
item = self.tableWidget.horizontalHeaderItem(5)
item.setText(_translate("CurrentlyOverdueBooks", "Payment Amount"))
self.back_button.setText(_translate("CurrentlyOverdueBooks", "BACK"))
| from PyQt6 import QtCore, QtGui, QtWidgets
import sqlite3
class Ui_CurrentlyOverdueBooks(object):
def setupUi(self, CurrentlyOverdueBooks, MainMenu):
MainMenu.close()
CurrentlyOverdueBooks.setObjectName("CurrentlyOverdueBooks")
CurrentlyOverdueBooks.resize(740, 670)
CurrentlyOverdueBooks.setStyleSheet(
".QWidget{background-color: #CBB1A0;border-radius: 10px}")
CurrentlyOverdueBooks.setWindowFlags(
QtCore.Qt.WindowType.FramelessWindowHint)
self.verticalLayout = QtWidgets.QVBoxLayout(CurrentlyOverdueBooks)
self.verticalLayout.setObjectName("verticalLayout")
self.border = QtWidgets.QFrame(CurrentlyOverdueBooks)
self.border.setStyleSheet("#border{\n"
" color: #842a2d;\n"
"}")
self.border.setFrameShape(QtWidgets.QFrame.Shape.Box)
self.border.setLineWidth(5)
self.border.setMidLineWidth(5)
self.border.setObjectName("border")
self.gridLayout = QtWidgets.QGridLayout(self.border)
self.gridLayout.setObjectName("gridLayout")
self.overdue_title_label = QtWidgets.QLabel(self.border)
font = QtGui.QFont()
font.setPointSize(24)
font.setBold(True)
self.overdue_title_label.setFont(font)
self.overdue_title_label.setStyleSheet(
".QWidget{background-color: #CBB1A0;border-radius: 10px}")
self.overdue_title_label.setAlignment(
QtCore.Qt.AlignmentFlag.AlignCenter)
self.overdue_title_label.setObjectName("overdue_title_label")
self.gridLayout.addWidget(self.overdue_title_label, 0, 0, 1, 1)
self.tableWidget = QtWidgets.QTableWidget(self.border)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(6)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
self.tableWidget.horizontalHeader().setDefaultSectionSize(200)
self.gridLayout.addWidget(self.tableWidget, 1, 0, 1, 1)
self.back_button = QtWidgets.QPushButton(self.border)
self.back_button.setCursor(QtGui.QCursor(
QtCore.Qt.CursorShape.PointingHandCursor))
self.back_button.setStyleSheet("QPushButton{\n"
" color: #842a2d;\n"
" font: 17pt \"Franklin Gothic Book\";\n"
" border: 2px solid #842a2d;\n"
" padding: 2px;\n"
" border-radius: 10px;\n"
" opacity: 100;\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" background-color: #842a2d;\n"
" color: #CBB1A0;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #b34044;\n"
" border: 5px solid #b34044;\n"
"}")
self.back_button.setObjectName("back_button")
self.back_button.clicked.connect(
lambda: self.return_action(CurrentlyOverdueBooks, MainMenu))
self.gridLayout.addWidget(self.back_button, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.border)
self.retranslateUi(CurrentlyOverdueBooks)
QtCore.QMetaObject.connectSlotsByName(CurrentlyOverdueBooks)
self.load_currently_overdue_books()
def return_action(self, CurrentlyOverdueBooks, MainMenu):
CurrentlyOverdueBooks.close()
MainMenu.show()
def load_currently_overdue_books(self):
con = sqlite3.connect('./db/library.db')
query = """
SELECT Borrow_ID, (Student_FirstName || ' ' || Student_LastName) AS Full_Name, Book_Title, Borrow_Date, Librarian_Name, Payment_Amount
FROM BORROW
LEFT JOIN STUDENT ON STUDENT.Student_ID = BORROW.Student_ID
LEFT JOIN BOOK ON BOOK.Book_ID = BORROW.Book_ID
LEFT JOIN LIBRARIAN ON LIBRARIAN.Librarian_Username = BORROW.Borrow_Issuer
LEFT JOIN PAYMENT ON PAYMENT.Payment_ID = BORROW.Payment_ID
WHERE Borrow_Overdue_Status = 1;
"""
result = con.execute(query)
self.tableWidget.setRowCount(0)
for row, form in enumerate(result):
self.tableWidget.insertRow(row)
for column, item in enumerate(form):
self.tableWidget.setItem(
row, column, QtWidgets.QTableWidgetItem(str(item)))
def retranslateUi(self, CurrentlyOverdueBooks):
_translate = QtCore.QCoreApplication.translate
CurrentlyOverdueBooks.setWindowTitle(
_translate("CurrentlyOverdueBooks", "Borrow History"))
self.overdue_title_label.setText(_translate(
"CurrentlyOverdueBooks", "CURRENTLY OVERDUE BOOKS"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("CurrentlyOverdueBooks", "Borrow ID"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("CurrentlyOverdueBooks", "Student Name"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("CurrentlyOverdueBooks", "Book Title"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("CurrentlyOverdueBooks", "Borrow Date"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("CurrentlyOverdueBooks", "Librarian Issuer"))
item = self.tableWidget.horizontalHeaderItem(5)
item.setText(_translate("CurrentlyOverdueBooks", "Payment Amount"))
self.back_button.setText(_translate("CurrentlyOverdueBooks", "BACK")) | en | 0.399443 | #CBB1A0;border-radius: 10px}") #842a2d;\n" #CBB1A0;border-radius: 10px}") #842a2d;\n" #842a2d;\n" #842a2d;\n" #CBB1A0;\n" #b34044;\n" #b34044;\n" SELECT Borrow_ID, (Student_FirstName || ' ' || Student_LastName) AS Full_Name, Book_Title, Borrow_Date, Librarian_Name, Payment_Amount FROM BORROW LEFT JOIN STUDENT ON STUDENT.Student_ID = BORROW.Student_ID LEFT JOIN BOOK ON BOOK.Book_ID = BORROW.Book_ID LEFT JOIN LIBRARIAN ON LIBRARIAN.Librarian_Username = BORROW.Borrow_Issuer LEFT JOIN PAYMENT ON PAYMENT.Payment_ID = BORROW.Payment_ID WHERE Borrow_Overdue_Status = 1; | 2.614314 | 3 |
functions/src/SiteMonitor/index.py | developerDemetri/lambda-utils | 0 | 6616389 | <gh_stars>0
from datetime import datetime
import logging
import os
import boto3
import requests
from requests.exceptions import ReadTimeout
IS_DEBUGGING = str(os.environ.get("DEBUGGING", "no")).strip().lower() == "yes"
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG if IS_DEBUGGING else logging.INFO)
LOGGER.debug("Loading lambda...")
AWS_REGION = str(os.environ.get("AWS_REGION", "us-west-2")).strip()
MAX_TIME = 3
SITE_DYNAMO_TABLE = str(os.environ.get("SITE_DYNAMO_TABLE", "demo")).strip()
STATS_DYNAMO_TABLE = str(os.environ.get("STATS_DYNAMO_TABLE", "demo")).strip()
SNS_TOPIC = str(os.environ.get("SNS_TOPIC", "demo")).strip()
def get_sites(dynamo_client):
LOGGER.info("Retrieving list of sites...")
sites = []
for item in dynamo_client.scan(TableName=SITE_DYNAMO_TABLE, ConsistentRead=True)["Items"]:
sites.append({
"id": int(item["site_id"]["N"]),
"name": str(item["site_name"]["S"]),
"is_down": bool(item["is_down"]["BOOL"])
})
LOGGER.debug(str(sites))
LOGGER.info("Successfully retrieved list of {} sites.".format(len(sites)))
return sites
def save_stats(dynamo_client, sites):
LOGGER.info("Saving all Site info in Dynamo...")
for site in sites:
LOGGER.info("Saving info in Dynamo for {}...".format(site["name"]))
dynamo_client.update_item(
TableName=SITE_DYNAMO_TABLE,
Key={"site_id": {"N": str(site["id"])}},
UpdateExpression="SET is_down = :val",
ExpressionAttributeValues={":val": {"BOOL": site["is_down"]}}
)
LOGGER.debug("Successfully updated Site table for {}.".format(site["name"]))
dynamo_client.put_item(
TableName=STATS_DYNAMO_TABLE,
Item={
"site_id": {"N": str(site["id"])},
"site_name": {"S": site["name"]},
"timestamp": {"S": site["check_time"]},
"is_down": {"BOOL": site["is_down"]},
"response_code": {"N": str(site["response_code"])},
"response_time": {"N": str(site["response_time"])}
}
)
LOGGER.debug("Successfully updated Stats table for {}.".format(site["name"]))
LOGGER.info("Successfully saved all Site info in Dynamo.")
def send_alert(account_id, down_site_list):
if down_site_list:
LOGGER.debug("Down Site List: {}".format(down_site_list))
sns_client = boto3.client("sns")
LOGGER.warning("Alerting for {} site(s) down...".format(len(down_site_list)))
message_parts = ["The following site(s) returned unhealthy responses:"]
for site_info in down_site_list:
message_parts.append("\t{}\t{}".format(site_info["name"], site_info["response_code"]))
message_parts.append("DeveloperDemetri Site Monitor")
LOGGER.debug("\n".join(message_parts))
sns_client.publish(
TopicArn="arn:aws:sns:{}:{}:{}".format(AWS_REGION, account_id, SNS_TOPIC),
Subject="Site Monitor Alert: Webite(s) Down",
Message="\n".join(message_parts)
)
LOGGER.info("Sucessfully alerted for {} site(s) down...".format(len(down_site_list)))
else:
LOGGER.info("No Sites to alert on :)")
def site_monitor_handler(event, context):
LOGGER.debug("Running site monitor...")
dynamo_client = boto3.client("dynamodb")
crashed_sites = []
sites = get_sites(dynamo_client)
for site in sites:
already_down = site["is_down"]
site["check_time"] = datetime.now().isoformat()
try:
resp = requests.get("https://{}".format(site["name"]), timeout=MAX_TIME)
site["is_down"] = bool(resp.status_code != 200)
site["response_code"] = int(resp.status_code)
site["response_time"] = float(resp.elapsed.total_seconds())
except ReadTimeout as err:
LOGGER.warning("Site check {} timed out: {}".format(site["name"], err))
site["is_down"] = True
site["response_code"] = 504
site["response_time"] = MAX_TIME
LOGGER.debug(str(site))
if site["is_down"] and not already_down:
crashed_sites.append(site)
account_id = context.invoked_function_arn.split(":")[4]
save_stats(dynamo_client, sites)
send_alert(account_id, crashed_sites)
LOGGER.info("Successfully ran site monitor.")
| from datetime import datetime
import logging
import os
import boto3
import requests
from requests.exceptions import ReadTimeout
IS_DEBUGGING = str(os.environ.get("DEBUGGING", "no")).strip().lower() == "yes"
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG if IS_DEBUGGING else logging.INFO)
LOGGER.debug("Loading lambda...")
AWS_REGION = str(os.environ.get("AWS_REGION", "us-west-2")).strip()
MAX_TIME = 3
SITE_DYNAMO_TABLE = str(os.environ.get("SITE_DYNAMO_TABLE", "demo")).strip()
STATS_DYNAMO_TABLE = str(os.environ.get("STATS_DYNAMO_TABLE", "demo")).strip()
SNS_TOPIC = str(os.environ.get("SNS_TOPIC", "demo")).strip()
def get_sites(dynamo_client):
LOGGER.info("Retrieving list of sites...")
sites = []
for item in dynamo_client.scan(TableName=SITE_DYNAMO_TABLE, ConsistentRead=True)["Items"]:
sites.append({
"id": int(item["site_id"]["N"]),
"name": str(item["site_name"]["S"]),
"is_down": bool(item["is_down"]["BOOL"])
})
LOGGER.debug(str(sites))
LOGGER.info("Successfully retrieved list of {} sites.".format(len(sites)))
return sites
def save_stats(dynamo_client, sites):
LOGGER.info("Saving all Site info in Dynamo...")
for site in sites:
LOGGER.info("Saving info in Dynamo for {}...".format(site["name"]))
dynamo_client.update_item(
TableName=SITE_DYNAMO_TABLE,
Key={"site_id": {"N": str(site["id"])}},
UpdateExpression="SET is_down = :val",
ExpressionAttributeValues={":val": {"BOOL": site["is_down"]}}
)
LOGGER.debug("Successfully updated Site table for {}.".format(site["name"]))
dynamo_client.put_item(
TableName=STATS_DYNAMO_TABLE,
Item={
"site_id": {"N": str(site["id"])},
"site_name": {"S": site["name"]},
"timestamp": {"S": site["check_time"]},
"is_down": {"BOOL": site["is_down"]},
"response_code": {"N": str(site["response_code"])},
"response_time": {"N": str(site["response_time"])}
}
)
LOGGER.debug("Successfully updated Stats table for {}.".format(site["name"]))
LOGGER.info("Successfully saved all Site info in Dynamo.")
def send_alert(account_id, down_site_list):
if down_site_list:
LOGGER.debug("Down Site List: {}".format(down_site_list))
sns_client = boto3.client("sns")
LOGGER.warning("Alerting for {} site(s) down...".format(len(down_site_list)))
message_parts = ["The following site(s) returned unhealthy responses:"]
for site_info in down_site_list:
message_parts.append("\t{}\t{}".format(site_info["name"], site_info["response_code"]))
message_parts.append("DeveloperDemetri Site Monitor")
LOGGER.debug("\n".join(message_parts))
sns_client.publish(
TopicArn="arn:aws:sns:{}:{}:{}".format(AWS_REGION, account_id, SNS_TOPIC),
Subject="Site Monitor Alert: Webite(s) Down",
Message="\n".join(message_parts)
)
LOGGER.info("Sucessfully alerted for {} site(s) down...".format(len(down_site_list)))
else:
LOGGER.info("No Sites to alert on :)")
def site_monitor_handler(event, context):
LOGGER.debug("Running site monitor...")
dynamo_client = boto3.client("dynamodb")
crashed_sites = []
sites = get_sites(dynamo_client)
for site in sites:
already_down = site["is_down"]
site["check_time"] = datetime.now().isoformat()
try:
resp = requests.get("https://{}".format(site["name"]), timeout=MAX_TIME)
site["is_down"] = bool(resp.status_code != 200)
site["response_code"] = int(resp.status_code)
site["response_time"] = float(resp.elapsed.total_seconds())
except ReadTimeout as err:
LOGGER.warning("Site check {} timed out: {}".format(site["name"], err))
site["is_down"] = True
site["response_code"] = 504
site["response_time"] = MAX_TIME
LOGGER.debug(str(site))
if site["is_down"] and not already_down:
crashed_sites.append(site)
account_id = context.invoked_function_arn.split(":")[4]
save_stats(dynamo_client, sites)
send_alert(account_id, crashed_sites)
LOGGER.info("Successfully ran site monitor.") | none | 1 | 2.20432 | 2 | |
docs/cornell CS class/lesson 19. Dictionaries/demos/indef.py | LizzieDeng/kalman_fliter_analysis | 0 | 6616390 | <filename>docs/cornell CS class/lesson 19. Dictionaries/demos/indef.py
"""
Module to demonstrate how keyword expansion works in a
function definition.
In a function definition, keyword expansion packages all
of the arguments into a single dictions for processing.
This allows you to have functions (the one below) which
have many parameters, but most of them are optional. This
is very popular in GUI libraries where there are a lot of
ways to specify a window or button.
This can be confusing, so you will want to run this one
in the Python Tutor for full effect.
Author: <NAME> (wmw2)
Date: June 7, 2019
"""
def area_of_rectangle(**kw):
"""
Returns the area of the specified rectangle.
By default, a rectangle is specified as four numbers:
(left,bottom,right, top). Left and right are the
x-coordinates of the left and right edge. Bottom and
top are the y-coordinates of the bottoma and top edges.
We assume that normal high school coordinates where
top is greater than bottom and right is greater than
left (yes, there are situations this may not be true).
However, this function allows full flexibility for
defining a rectangle. Want to specify (left,bottom,
width,height) instead? Or how about (top,right,width,
height)? We even allow (center,middle,width,height),
where convention is that center is the x-coordinate of
the rectangle center and middle is the y-coordinate of
the center.
If the user provides contradictory arguments (e.g.
left=1, center=3, and width=10), parameters are
prioritized as described below.
Parameter left: The left edge of the rectangle
Precondition: left is an int or float
Parameter right: The right edge of the rectangle
Precondition: right is an int or float
Parameter width: The width of the rectangle (ignored if both left & right provided)
Precondition: width is an int or float
Parameter center: The horizontal center of the rectangle
(ignored if any two of left, right, and width provided)
Precondition: center is an int or float
Parameter bottom: The bottom edge of the rectangle
Precondition: bottom is an int or float
Parameter top: The top edge of the rectangle
Precondition: top is an int or float
Parameter height: The height of the rectangle (ignored if both bottom & top provided)
Precondition: height is an int or float
Parameter middle: The vertical center of the rectangle
(ignored if any two of bottom, top, and height provided)
Precondition: right is an int or float
"""
# Compute the width of the rectangle
width = None
if 'left' in kw and 'right' in kw:
width = kw['right']-kw['left']
elif 'width' in kw:
width = kw['width']
elif 'center' in kw:
if 'left' in kw:
width = 2*(kw['center']-kw['left'])
elif 'right' in kw:
width = 2*(kw['right']-kw['center'])
assert width != None, 'There were not enough arguments to determine the width'
# Compute the height of the rectangle
height = None
if 'bottom' in kw and 'top' in kw:
height = kw['top']-kw['bottom']
elif 'height' in kw:
height = kw['height']
elif 'middle' in kw:
if 'bottom' in kw:
height = 2*(kw['center']-kw['bottom'])
elif 'top' in kw:
height = 2*(kw['top']-kw['center'])
assert height != None, 'There were not enough arguments to determine the width'
return width*height
# Try this in the Python Tutor
#a = area_of_rectangle(left=1,bottom=0,right=4,top=5)
#b = area_of_rectangle(center=1,bottom=0,right=4,height=5)
| <filename>docs/cornell CS class/lesson 19. Dictionaries/demos/indef.py
"""
Module to demonstrate how keyword expansion works in a
function definition.
In a function definition, keyword expansion packages all
of the arguments into a single dictions for processing.
This allows you to have functions (the one below) which
have many parameters, but most of them are optional. This
is very popular in GUI libraries where there are a lot of
ways to specify a window or button.
This can be confusing, so you will want to run this one
in the Python Tutor for full effect.
Author: <NAME> (wmw2)
Date: June 7, 2019
"""
def area_of_rectangle(**kw):
"""
Returns the area of the specified rectangle.
By default, a rectangle is specified as four numbers:
(left,bottom,right, top). Left and right are the
x-coordinates of the left and right edge. Bottom and
top are the y-coordinates of the bottoma and top edges.
We assume that normal high school coordinates where
top is greater than bottom and right is greater than
left (yes, there are situations this may not be true).
However, this function allows full flexibility for
defining a rectangle. Want to specify (left,bottom,
width,height) instead? Or how about (top,right,width,
height)? We even allow (center,middle,width,height),
where convention is that center is the x-coordinate of
the rectangle center and middle is the y-coordinate of
the center.
If the user provides contradictory arguments (e.g.
left=1, center=3, and width=10), parameters are
prioritized as described below.
Parameter left: The left edge of the rectangle
Precondition: left is an int or float
Parameter right: The right edge of the rectangle
Precondition: right is an int or float
Parameter width: The width of the rectangle (ignored if both left & right provided)
Precondition: width is an int or float
Parameter center: The horizontal center of the rectangle
(ignored if any two of left, right, and width provided)
Precondition: center is an int or float
Parameter bottom: The bottom edge of the rectangle
Precondition: bottom is an int or float
Parameter top: The top edge of the rectangle
Precondition: top is an int or float
Parameter height: The height of the rectangle (ignored if both bottom & top provided)
Precondition: height is an int or float
Parameter middle: The vertical center of the rectangle
(ignored if any two of bottom, top, and height provided)
Precondition: right is an int or float
"""
# Compute the width of the rectangle
width = None
if 'left' in kw and 'right' in kw:
width = kw['right']-kw['left']
elif 'width' in kw:
width = kw['width']
elif 'center' in kw:
if 'left' in kw:
width = 2*(kw['center']-kw['left'])
elif 'right' in kw:
width = 2*(kw['right']-kw['center'])
assert width != None, 'There were not enough arguments to determine the width'
# Compute the height of the rectangle
height = None
if 'bottom' in kw and 'top' in kw:
height = kw['top']-kw['bottom']
elif 'height' in kw:
height = kw['height']
elif 'middle' in kw:
if 'bottom' in kw:
height = 2*(kw['center']-kw['bottom'])
elif 'top' in kw:
height = 2*(kw['top']-kw['center'])
assert height != None, 'There were not enough arguments to determine the width'
return width*height
# Try this in the Python Tutor
#a = area_of_rectangle(left=1,bottom=0,right=4,top=5)
#b = area_of_rectangle(center=1,bottom=0,right=4,height=5)
| en | 0.76929 | Module to demonstrate how keyword expansion works in a function definition. In a function definition, keyword expansion packages all of the arguments into a single dictions for processing. This allows you to have functions (the one below) which have many parameters, but most of them are optional. This is very popular in GUI libraries where there are a lot of ways to specify a window or button. This can be confusing, so you will want to run this one in the Python Tutor for full effect. Author: <NAME> (wmw2) Date: June 7, 2019 Returns the area of the specified rectangle. By default, a rectangle is specified as four numbers: (left,bottom,right, top). Left and right are the x-coordinates of the left and right edge. Bottom and top are the y-coordinates of the bottoma and top edges. We assume that normal high school coordinates where top is greater than bottom and right is greater than left (yes, there are situations this may not be true). However, this function allows full flexibility for defining a rectangle. Want to specify (left,bottom, width,height) instead? Or how about (top,right,width, height)? We even allow (center,middle,width,height), where convention is that center is the x-coordinate of the rectangle center and middle is the y-coordinate of the center. If the user provides contradictory arguments (e.g. left=1, center=3, and width=10), parameters are prioritized as described below. Parameter left: The left edge of the rectangle Precondition: left is an int or float Parameter right: The right edge of the rectangle Precondition: right is an int or float Parameter width: The width of the rectangle (ignored if both left & right provided) Precondition: width is an int or float Parameter center: The horizontal center of the rectangle (ignored if any two of left, right, and width provided) Precondition: center is an int or float Parameter bottom: The bottom edge of the rectangle Precondition: bottom is an int or float Parameter top: The top edge of the rectangle Precondition: top is an int or float Parameter height: The height of the rectangle (ignored if both bottom & top provided) Precondition: height is an int or float Parameter middle: The vertical center of the rectangle (ignored if any two of bottom, top, and height provided) Precondition: right is an int or float # Compute the width of the rectangle # Compute the height of the rectangle # Try this in the Python Tutor #a = area_of_rectangle(left=1,bottom=0,right=4,top=5) #b = area_of_rectangle(center=1,bottom=0,right=4,height=5) | 4.222881 | 4 |
ccn/constraints_group.py | atatomir/CCN | 0 | 6616391 | import numpy as np
import networkx as nx
from torch import neg_
from .constraint import Constraint
from .literal import Literal
class ConstraintsGroup:
def __init__(self, arg):
if isinstance(arg, str):
# ConstraintGroup(string)
with open(arg, 'r') as f:
self.constraints = [Constraint(line) for line in f]
else:
# ConstraintGroup([Constraint])
self.constraints = arg
# Keep the initial order of constraints for coherent_with
self.constraints_list = self.constraints
self.constraints = frozenset(self.constraints_list)
def __add__(self, other):
return ConstraintsGroup(self.constraints.union(other.constraints))
def __str__(self):
return '\n'.join([str(constraint) for constraint in sorted(self.constraints)])
def __iter__(self):
return iter(self.constraints)
def __eq__(self, other):
if not isinstance(other, ConstraintsGroup): return False
return self.constraints == other.constraints
def __len__(self):
return len(self.constraints)
def head_encoded(self, num_classes):
pos_head = []
neg_head = []
for constraint in self.constraints:
pos, neg = constraint.head_encoded(num_classes)
pos_head.append(pos)
neg_head.append(neg)
return np.array(pos_head), np.array(neg_head)
def body_encoded(self, num_classes):
pos_body = []
neg_body = []
for constraint in self.constraints:
pos, neg = constraint.body_encoded(num_classes)
pos_body.append(pos)
neg_body.append(neg)
return np.array(pos_body), np.array(neg_body)
def encoded(self, num_classes):
head = self.head_encoded(num_classes)
body = self.body_encoded(num_classes)
return head, body
def coherent_with(self, preds):
coherent = [constraint.coherent_with(preds) for constraint in self.constraints_list]
return np.array(coherent).transpose()
def atoms(self):
atoms = set()
for constraint in self.constraints:
atoms = atoms.union(constraint.atoms())
return atoms
def heads(self):
heads = set()
for constraint in self.constraints:
heads.add(constraint.head.atom)
return heads
def graph(self):
G = nx.DiGraph()
G.add_nodes_from(self.atoms())
for constraint in self.constraints:
for lit in constraint.body:
x = lit.atom
y = constraint.head.atom
G.add_edge(x, y)
G[x][y]['body'] = lit.positive
G[x][y]['head'] = constraint.head.positive
return G
def duograph(self):
atoms = self.atoms()
pos_atoms = [str(Literal(atom, True)) for atom in atoms]
neg_atoms = [str(Literal(atom, False)) for atom in atoms]
G = nx.DiGraph()
G.add_nodes_from(pos_atoms + neg_atoms)
for constraint in self.constraints:
for lit in constraint.body:
G.add_edge(str(lit), str(constraint.head))
return G
def stratify(self):
G = self.graph()
for node in G.nodes():
G.nodes[node]['deps'] = 0
G.nodes[node]['constraints'] = []
for x, y in G.edges():
G.nodes[y]['deps'] += 1
for constraint in self.constraints:
G.nodes[constraint.head.atom]['constraints'].append(constraint)
result = []
ready = [node for node in G.nodes() if G.nodes[node]['deps'] == 0]
while len(ready) > 0:
resolved = [cons for node in ready for cons in G.nodes[node]['constraints']]
if len(resolved) > 0:
result.append(ConstraintsGroup(resolved))
next = []
for node in ready:
for other in G[node]:
G.nodes[other]['deps'] -= 1
if G.nodes[other]['deps'] == 0:
next.append(other)
ready = next
return result
def test_str():
cons0 = Constraint('0 :- 1 n2')
cons1 = Constraint('n0 :- 1')
cons2 = Constraint('1 :- n2')
group = ConstraintsGroup([cons0, cons1, cons2])
assert str(group) == "n0 :- 1\n0 :- 1 n2\n1 :- n2"
def test_from_file():
group = ConstraintsGroup('../constraints/example')
assert str(group) == "n0 :- 1\n0 :- 1 n2\n1 :- n2"
def test_coherent_with():
group = ConstraintsGroup('../constraints/example')
assert (group.coherent_with(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.7, 0.2, 0.3, 0.4],
[0.8, 0.2, 0.9, 0.4]
])) == np.array(
[[False, True, False],
[ True, True, False],
[ True, False, True]])).all()
def test_add():
c1 = Constraint('n0 :- 1 n2 3')
c2 = Constraint('0 :- n1 n2 4')
group0 = ConstraintsGroup([c1])
group1 = ConstraintsGroup([c2])
group = group0 + group1
assert group == ConstraintsGroup([c1, c2])
def test_atoms():
group = ConstraintsGroup('../constraints/full')
assert group.atoms() == set(range(41))
def test_graph():
group = ConstraintsGroup('../constraints/example')
graph = group.graph()
assert set(graph.nodes()) == {0, 1, 2}
assert set(graph.edges()) == {(1, 0), (2, 1), (2, 0)}
def test_duograph():
group = ConstraintsGroup('../constraints/example')
graph = group.duograph()
print(graph)
print(graph.nodes())
print(graph.edges())
assert set(graph.nodes()) == {'0', '1', '2', 'n0', 'n1', 'n2'}
assert set(graph.edges()) == {('1', '0'), ('1', 'n0'), ('n2', '1'), ('n2', '0')}
def test_heads():
group = ConstraintsGroup('../constraints/example')
assert group.heads() == {0, 1}
def test_stratify():
group = ConstraintsGroup([
Constraint('1 :- 0'),
Constraint('n2 :- n0 4'),
Constraint('3 :- n1 2')
])
groups = group.stratify()
assert len(groups) == 2
assert groups[0].heads() == {1, 2}
assert groups[1].heads() == {3}
| import numpy as np
import networkx as nx
from torch import neg_
from .constraint import Constraint
from .literal import Literal
class ConstraintsGroup:
def __init__(self, arg):
if isinstance(arg, str):
# ConstraintGroup(string)
with open(arg, 'r') as f:
self.constraints = [Constraint(line) for line in f]
else:
# ConstraintGroup([Constraint])
self.constraints = arg
# Keep the initial order of constraints for coherent_with
self.constraints_list = self.constraints
self.constraints = frozenset(self.constraints_list)
def __add__(self, other):
return ConstraintsGroup(self.constraints.union(other.constraints))
def __str__(self):
return '\n'.join([str(constraint) for constraint in sorted(self.constraints)])
def __iter__(self):
return iter(self.constraints)
def __eq__(self, other):
if not isinstance(other, ConstraintsGroup): return False
return self.constraints == other.constraints
def __len__(self):
return len(self.constraints)
def head_encoded(self, num_classes):
pos_head = []
neg_head = []
for constraint in self.constraints:
pos, neg = constraint.head_encoded(num_classes)
pos_head.append(pos)
neg_head.append(neg)
return np.array(pos_head), np.array(neg_head)
def body_encoded(self, num_classes):
pos_body = []
neg_body = []
for constraint in self.constraints:
pos, neg = constraint.body_encoded(num_classes)
pos_body.append(pos)
neg_body.append(neg)
return np.array(pos_body), np.array(neg_body)
def encoded(self, num_classes):
head = self.head_encoded(num_classes)
body = self.body_encoded(num_classes)
return head, body
def coherent_with(self, preds):
coherent = [constraint.coherent_with(preds) for constraint in self.constraints_list]
return np.array(coherent).transpose()
def atoms(self):
atoms = set()
for constraint in self.constraints:
atoms = atoms.union(constraint.atoms())
return atoms
def heads(self):
heads = set()
for constraint in self.constraints:
heads.add(constraint.head.atom)
return heads
def graph(self):
G = nx.DiGraph()
G.add_nodes_from(self.atoms())
for constraint in self.constraints:
for lit in constraint.body:
x = lit.atom
y = constraint.head.atom
G.add_edge(x, y)
G[x][y]['body'] = lit.positive
G[x][y]['head'] = constraint.head.positive
return G
def duograph(self):
atoms = self.atoms()
pos_atoms = [str(Literal(atom, True)) for atom in atoms]
neg_atoms = [str(Literal(atom, False)) for atom in atoms]
G = nx.DiGraph()
G.add_nodes_from(pos_atoms + neg_atoms)
for constraint in self.constraints:
for lit in constraint.body:
G.add_edge(str(lit), str(constraint.head))
return G
def stratify(self):
G = self.graph()
for node in G.nodes():
G.nodes[node]['deps'] = 0
G.nodes[node]['constraints'] = []
for x, y in G.edges():
G.nodes[y]['deps'] += 1
for constraint in self.constraints:
G.nodes[constraint.head.atom]['constraints'].append(constraint)
result = []
ready = [node for node in G.nodes() if G.nodes[node]['deps'] == 0]
while len(ready) > 0:
resolved = [cons for node in ready for cons in G.nodes[node]['constraints']]
if len(resolved) > 0:
result.append(ConstraintsGroup(resolved))
next = []
for node in ready:
for other in G[node]:
G.nodes[other]['deps'] -= 1
if G.nodes[other]['deps'] == 0:
next.append(other)
ready = next
return result
def test_str():
cons0 = Constraint('0 :- 1 n2')
cons1 = Constraint('n0 :- 1')
cons2 = Constraint('1 :- n2')
group = ConstraintsGroup([cons0, cons1, cons2])
assert str(group) == "n0 :- 1\n0 :- 1 n2\n1 :- n2"
def test_from_file():
group = ConstraintsGroup('../constraints/example')
assert str(group) == "n0 :- 1\n0 :- 1 n2\n1 :- n2"
def test_coherent_with():
group = ConstraintsGroup('../constraints/example')
assert (group.coherent_with(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.7, 0.2, 0.3, 0.4],
[0.8, 0.2, 0.9, 0.4]
])) == np.array(
[[False, True, False],
[ True, True, False],
[ True, False, True]])).all()
def test_add():
c1 = Constraint('n0 :- 1 n2 3')
c2 = Constraint('0 :- n1 n2 4')
group0 = ConstraintsGroup([c1])
group1 = ConstraintsGroup([c2])
group = group0 + group1
assert group == ConstraintsGroup([c1, c2])
def test_atoms():
group = ConstraintsGroup('../constraints/full')
assert group.atoms() == set(range(41))
def test_graph():
group = ConstraintsGroup('../constraints/example')
graph = group.graph()
assert set(graph.nodes()) == {0, 1, 2}
assert set(graph.edges()) == {(1, 0), (2, 1), (2, 0)}
def test_duograph():
group = ConstraintsGroup('../constraints/example')
graph = group.duograph()
print(graph)
print(graph.nodes())
print(graph.edges())
assert set(graph.nodes()) == {'0', '1', '2', 'n0', 'n1', 'n2'}
assert set(graph.edges()) == {('1', '0'), ('1', 'n0'), ('n2', '1'), ('n2', '0')}
def test_heads():
group = ConstraintsGroup('../constraints/example')
assert group.heads() == {0, 1}
def test_stratify():
group = ConstraintsGroup([
Constraint('1 :- 0'),
Constraint('n2 :- n0 4'),
Constraint('3 :- n1 2')
])
groups = group.stratify()
assert len(groups) == 2
assert groups[0].heads() == {1, 2}
assert groups[1].heads() == {3}
| en | 0.577452 | # ConstraintGroup(string) # ConstraintGroup([Constraint]) # Keep the initial order of constraints for coherent_with | 2.436182 | 2 |
geotrek/feedback/tests/test_suricate_sync.py | GeotrekCE/Geotrek | 0 | 6616392 | <reponame>GeotrekCE/Geotrek
import io
import os
import uuid
from unittest import mock
from unittest.mock import MagicMock
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.urls.base import reverse
from django.utils.translation import gettext_lazy as _
from mapentity.tests.factories import SuperUserFactory, UserFactory
from geotrek.authent.tests.factories import UserProfileFactory
from geotrek.common.models import Attachment
from geotrek.feedback.forms import ReportForm
from geotrek.feedback.helpers import SuricateMessenger, SuricateRequestManager
from geotrek.feedback.models import (AttachedMessage, Report, ReportActivity,
ReportProblemMagnitude, ReportStatus, WorkflowManager)
from geotrek.feedback.tests.factories import (ReportFactory,
ReportStatusFactory,
WorkflowManagerFactory)
SURICATE_REPORT_SETTINGS = {
"URL": "http://suricate.wsstandard.example.com/",
"ID_ORIGIN": "geotrek",
"PRIVATE_KEY_CLIENT_SERVER": "",
"PRIVATE_KEY_SERVER_CLIENT": "",
"AUTH": ("", ""),
}
SURICATE_MANAGEMENT_SETTINGS = {
"URL": "http://suricate.wsmanagement.example.com/",
"ID_ORIGIN": "geotrek",
"PRIVATE_KEY_CLIENT_SERVER": "",
"PRIVATE_KEY_SERVER_CLIENT": "",
"AUTH": ("", ""),
}
SURICATE_WORKFLOW_SETTINGS = {
"TIMER_FOR_WAITING_REPORTS_IN_DAYS": 6,
"TIMER_FOR_PROGRAMMED_REPORTS_IN_DAYS": 7,
"SURICATE_RELOCATED_REPORT_MESSAGE": "Le Signalement ne concerne pas le Département du Gard - Relocalisé hors du Département"
}
def mocked_json(file_name):
filename = os.path.join(os.path.dirname(__file__), "data", file_name)
with open(filename, "r") as f:
return bytes(f.read(), encoding="UTF-8")
def mocked_image(file_name):
filename = os.path.join(os.path.dirname(__file__), "data", file_name)
with open(filename, "rb") as f:
return bytearray(f.read())
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@override_settings(SURICATE_MANAGEMENT_SETTINGS=SURICATE_MANAGEMENT_SETTINGS)
class SuricateTests(TestCase):
"""Test Suricate API"""
def build_get_request_patch(self, mocked: MagicMock, cause_JPG_error=False, remove_one_alert=False):
"""Mock get requests to Suricate API"""
def build_response_patch(url, params=None, **kwargs):
mock_response = MagicMock()
if "GetActivities" in url:
mock_response.status_code = 200
mock_response.content = mocked_json("suricate_activities.json")
elif "GetStatusList" in url:
mock_response.status_code = 200
mock_response.content = mocked_json("suricate_statuses.json")
elif "GetAlerts" in url and not remove_one_alert:
mock_response.content = mocked_json("suricate_alerts.json")
mock_response.status_code = 200
elif "GetAlerts" in url and remove_one_alert:
mock_response.content = mocked_json("suricate_alerts_later.json")
mock_response.status_code = 200
elif "wsLockAlert" in url or "wsUnlockAlert" in url or "wsUpdateGPS" in url:
mock_response.content = mocked_json("suricate_positive.json")
mock_response.status_code = 200
elif cause_JPG_error:
mock_response.status_code = 404
elif ".jpg" in url or ".png" in url or ".JPG" in url:
mock_response.content = mocked_image("theme-fauna.png")
mock_response.status_code = 200
else:
mock_response.status_code = 404
return mock_response
mocked.side_effect = build_response_patch
def build_post_request_patch(self, mocked: MagicMock):
"""Mock post requests to Suricate API"""
def build_response_patch(url, params=None, **kwargs):
mock_response = MagicMock()
if "SendReport" in url or "UpdateStatus" in url or "MessageSentinel" in url:
mock_response.status_code = 200
mock_response.content = mocked_json(
"suricate_positive.json"
)
else:
mock_response.status_code = 404
return mock_response
mocked.side_effect = build_response_patch
def build_failed_request_patch(self, mocked: MagicMock):
"""Mock error responses from Suricate API"""
mock_response = mock.Mock()
mock_response.content = mocked_json("suricate_negative.json")
mock_response.status_code = 400
mocked.return_value = mock_response
def build_timeout_request_patch(self, mocked: MagicMock):
"""Mock error responses from Suricate API"""
mock_response = mock.Mock()
mock_response.status_code = 408 # reqest timeout
mock_response.content = {}
mocked.return_value = mock_response
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
UserProfileFactory.create(user=cls.user)
cls.workflow_manager = WorkflowManagerFactory(user=cls.user)
cls.admin = SuperUserFactory(username="Admin", password="<PASSWORD>")
def setUp(self):
self.client.force_login(self.admin)
class SuricateAPITests(SuricateTests):
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_statuses(self, mocked_get, mocked_logger):
"""Test GET requests on Statuses endpoint creates statuses objects"""
self.build_get_request_patch(mocked_get)
call_command("sync_suricate", statuses=True)
self.assertEqual(ReportStatus.objects.count(), 5)
mocked_logger.info.assert_called_with("New status - id: classified, label: Classé sans suite")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_activities(self, mocked_get, mocked_logger):
"""Test GET requests on Activities endpoint creates statuses objects"""
self.build_get_request_patch(mocked_get)
call_command("sync_suricate", activities=True)
self.assertEqual(ReportActivity.objects.count(), 32)
mocked_logger.info.assert_called_with("New activity - id: 51, label: Roller, Skateboard")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_activities_and_statuses(self, mocked):
"""Test GET requests on both Activities and Statuses endpoint creates objects"""
self.build_get_request_patch(mocked)
call_command("sync_suricate", activities=True, statuses=True)
self.assertEqual(ReportActivity.objects.count(), 32)
self.assertEqual(ReportStatus.objects.count(), 5)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@mock.patch("geotrek.feedback.management.commands.sync_suricate.logger")
def test_command_disabled(self, mocked):
"""Test sync_suricate command is disabled when setting is False"""
call_command("sync_suricate", activities=True, statuses=True)
mocked.error.assert_called_with("To use this command, please activate setting SURICATE_MANAGEMENT_ENABLED or SURICATE_WORKFLOW_ENABLED.")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_alerts_creates_alerts_and_send_mail(self, mocked_get, mocked_logger):
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
self.assertEqual(len(mail.outbox), 0)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.count(), 8)
self.assertEqual(ReportProblemMagnitude.objects.count(), 3)
self.assertEqual(AttachedMessage.objects.count(), 44)
self.assertEqual(Attachment.objects.count(), 6)
self.assertEqual(len(mail.outbox), 1)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, "Geotrek - New reports from Suricate")
self.assertIn("New reports have been imported from Suricate", sent_mail.body)
self.assertIn("Please consult your reports in Geotrek-Admin", sent_mail.body)
for report in Report.objects.all():
self.assertIn(report.full_url, sent_mail.body)
r = Report.objects.all()[0]
r.category = None
r.save()
# Fetch it again to verify 'super.save' was called (management mode)
r.refresh_from_db()
self.assertIsNone(r.category)
# Test new filed report are not assigned to workflow manager when mode is management
r = Report.objects.get(external_uuid="E7C73347-5056-AA2B-DDBFDCD9328CD742")
self.assertIsNone(r.assigned_user)
# Assert no new mail on update
self.assertEqual(len(mail.outbox), 1)
# Test sync specific report overwrites local info
r.comment = ""
r.save()
r.refresh_from_db()
self.assertEquals(r.comment, "")
call_command("sync_suricate", report=r.pk, verbosity=2)
r.refresh_from_db()
self.assertEquals(r.comment, "Ne pas prendre la route Départementale 155 en direction de Malons")
# Test sync last report overwrites local info
r = Report.objects.get(external_uuid="7EE5DF25-5056-AA2B-DDBEEFA5768CD53E")
self.assertEquals(r.comment, "Lames cassées")
r.comment = ""
r.save()
r.refresh_from_db()
self.assertEquals(r.comment, "")
call_command("sync_suricate", report=0, verbosity=2)
r.refresh_from_db()
self.assertEquals(r.comment, "Lames cassées")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.ContentFile.__init__")
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_sync_handles_malformed_images(self, mocked_get, mocked_logger, mocked_save):
self.build_get_request_patch(mocked_get)
"""Test Suricate sync is not interupted by corruped images"""
mocked_save.side_effect = Exception("This image is bad")
call_command("sync_suricate", verbosity=2)
mocked_logger.error.assert_called()
@override_settings(SURICATE_WORKFLOW_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_alerts_creates_alerts_and_send_mail_and_assign(self, mocked_get):
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
self.assertEqual(len(mail.outbox), 0)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.count(), 8)
self.assertEqual(ReportProblemMagnitude.objects.count(), 3)
self.assertEqual(AttachedMessage.objects.count(), 44)
self.assertEqual(Attachment.objects.count(), 6)
self.assertEqual(len(mail.outbox), 1)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, "Geotrek - New reports from Suricate")
# Test update report does not send email and saves
r = Report.objects.all()[0]
r.category = None
r.save()
# Fetch it again to verify 'super.save' was called (management mode)
r.refresh_from_db()
self.assertIsNone(r.category)
# Test new filed report are assigned to workflow manager
r = Report.objects.get(external_uuid="E7C73347-5056-AA2B-DDBFDCD9328CD742")
self.assertIn(r.assigned_user.pk, list(WorkflowManager.objects.values_list('user', flat=True)))
# Assert no new mail on update
self.assertEqual(len(mail.outbox), 1)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_failed_attachments_are_downloaded_on_next_sync(self, mocked_get, mocked_logger):
"""Test failed requests to download attachments are retried on next sync"""
self.assertEqual(Attachment.objects.count(), 0)
# Fail to download all images
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# All attachments are missing their image file
self.assertFalse(atta.attachment_file.name)
# Succesfully download all images
self.build_get_request_patch(mocked_get, cause_JPG_error=False)
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# No attachments are missing their image file
self.assertTrue(atta.attachment_file.name)
# Succesfully download all images a second time to cover "skip file" case
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# No attachments are missing their image file
self.assertTrue(atta.attachment_file.name)
@override_settings(PAPERCLIP_ENABLE_LINK=False)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
def test_sync_needs_paperclip_enabled(self):
"""Test failed requests to download attachments are retried on next sync"""
with self.assertRaises(Exception):
call_command("sync_suricate", verbosity=2)
@override_settings(SURICATE_REPORT_ENABLED=True)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_save_on_report_posts_to_suricate_in_report_mode(self, post_report):
"""Test post to suricate on save Report in Suricate Report Mode"""
report = Report.objects.create()
post_report.assert_called_once_with(report)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_save_on_report_posts_to_suricate_in_management_mode(self, post_report):
"""Test post to suricate on save Report in Suricate Management Mode"""
# Create a report with an UID - emulates report from Suricate
uid = uuid.uuid4()
Report.objects.create(external_uuid=uid)
post_report.assert_not_called()
# Create a report with no UID - emulates new report from Geotrek
report = Report.objects.create(external_uuid=None)
post_report.assert_called_once_with(report)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_save_on_report_doesnt_post_to_suricate_in_no_suricate_mode(self, post_report):
"""Test save does not post to suricate on save Report in No Suricate Mode"""
Report.objects.create()
post_report.assert_not_called()
@mock.patch("geotrek.feedback.helpers.requests.post")
def test_post_request_to_suricate(self, mock_post):
"""Test post request itself
Request post is mock
"""
# Create a report without saving it
report = ReportFactory.build()
# Define a mock response
self.build_post_request_patch(mock_post)
# Call the function with the report
result = SuricateMessenger().post_report(report)
self.assertEqual(result, None)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_1(self, mock_get):
"""Test get request itself fails
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_2(self, mock_get):
"""Test get request itself fails
"""
# Mock error 400
self.build_failed_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test(self, mock_get, mocked_stdout):
"""Assert connection test command outputs OK
"""
# Mock error 408
self.build_get_request_patch(mock_get)
call_command("sync_suricate", test=True)
# Assert outputs OK
self.assertEquals(mocked_stdout.getvalue(), 'API Standard :\nOK\nAPI Gestion :\nOK\n')
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test_fails_API(self, mock_get, mocked_stdout):
"""Assert connection test command outputs error when it fails on Suricate API side
"""
# Mock negative response
self.build_failed_request_patch(mock_get)
# Assert outputs KO
call_command("sync_suricate", test=True)
self.assertEquals(mocked_stdout.getvalue(), "API Standard :\nKO - Status code: 400\nAPI Gestion :\nKO - Status code: 400\n")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test_fails_HTTP(self, mock_get, mocked_stdout):
"""Assert connection test command outputs error when it fails on HTTP
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Assert outputs KO
call_command("sync_suricate", test=True)
self.assertEquals(mocked_stdout.getvalue(), "API Standard :\nKO - Status code: 408\nAPI Gestion :\nKO - Status code: 408\n")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_relocated_report_is_deleted_on_next_sync(self, mocked_get, mocked_logger):
"""Test reports relocated outside of BBOX are deleted on next sync"""
self.build_get_request_patch(mocked_get, remove_one_alert=False)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.filter(external_uuid="742CBF16-5056-AA2B-DD1FD403F72D6B9B").count(), 1)
self.assertEqual(Report.objects.count(), 8)
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, remove_one_alert=True)
call_command("sync_suricate", verbosity=2)
# One out of the 9 was removed from response because this report now lives outside of BBOX according to Suricate
# 7 out of 8 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.filter(external_uuid="742CBF16-5056-AA2B-DD1FD403F72D6B9B").count(), 0)
self.assertEqual(Report.objects.count(), 7)
class SuricateInterfaceTests(SuricateTests):
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_import_from_interface_disabled(self, mocked):
user = UserFactory.create(username='Slush', password='<PASSWORD>')
self.client.force_login(user)
self.build_get_request_patch(mocked)
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'import-suricate')
self.assertNotContains(response, _('Data to import from Suricate'))
response = self.client.post(
url, {
'import-suricate': 'Import',
'parser': 'everything',
}
)
self.assertEqual(Report.objects.count(), 0)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.SuricateParser.get_alerts")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_import_from_interface_enabled(self, mocked_get, mocked_parser):
user = UserFactory.create(username='Slush', password='<PASSWORD>')
self.client.force_login(user)
# mocked_parser = mock.Mock()
self.build_get_request_patch(mocked_get)
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'import-suricate')
self.assertContains(response, _('Data to import from Suricate'))
response = self.client.post(
url, {
'import-suricate': 'Import',
'parser': 'everything',
}
)
self.assertEqual(response.status_code, 200)
mocked_parser.assert_called_once()
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_1(self, mock_get):
"""Test get request itself fails
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
class SuricateWorkflowTests(SuricateTests):
fixtures = ['geotrek/maintenance/fixtures/basic.json']
@classmethod
def setUpTestData(cls):
SuricateTests.setUpTestData()
cls.filed_status = ReportStatusFactory(identifier='filed', label="Déposé")
cls.classified_status = ReportStatusFactory(identifier='classified', label="Classé sans suite")
cls.programmed_status = ReportStatusFactory(identifier='programmed', label="Programmé")
cls.waiting_status = ReportStatusFactory(identifier='waiting', label="En cours")
cls.rejected_status = ReportStatusFactory(identifier='rejected', label="Rejeté")
cls.late_intervention_status = ReportStatusFactory(identifier='late_intervention', label="Intervention en retard")
cls.late_resolution_status = ReportStatusFactory(identifier='late_resolution', label="Resolution en retard")
cls.solved_intervention_status = ReportStatusFactory(identifier='solved_intervention', label="Intervention terminée")
cls.resolved_status = ReportStatusFactory(identifier='solved', label="Résolu")
cls.report = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4())
cls.admin = SuperUserFactory(username="Admiin", password="<PASSWORD>")
cls.interv_report = ReportFactory(status=cls.programmed_status)
def raise_multiple(exceptions):
if not exceptions: # list emptied, recursion ends
return
try:
raise exceptions.pop() # pop removes list entries
finally:
raise_multiple(exceptions) # recursion
def test_for_all_suricate_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'No Suricate' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Report' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
exceptions.append(e)
raise_multiple(exceptions)
return inner
def test_for_report_and_basic_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'No Suricate' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Report' mode",)
exceptions.append(e)
raise_multiple(exceptions)
return inner
def test_for_management_and_workflow_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
raise_multiple(exceptions)
return inner
def test_for_workflow_mode(test_func):
def inner(self, *args, **kwargs):
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
raise
return inner
def test_for_management_mode(test_func):
def inner(self, *args, **kwargs):
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
raise
return inner
class TestWorkflowFirstSteps(SuricateWorkflowTests):
@classmethod
def setUpTestData(cls):
SuricateWorkflowTests.setUpTestData()
cls.report_filed_1 = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4(), assigned_user=cls.admin)
cls.report_filed_2 = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4(), assigned_user=cls.admin)
@override_settings(SURICATE_WORKFLOW_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
@mock.patch("geotrek.feedback.helpers.requests.post")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.message_sentinel")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.update_status")
def test_classify_alert_notifies_suricate_when_workflow_enabled(self, mocked_notify_suricate_status, mocked_mail_sentinel, mocked_post, mocked_get):
form = ReportForm(
instance=self.report_filed_1,
data={
'geom': self.report_filed_1.geom,
'email': self.report_filed_1.email,
'status': self.classified_status.pk,
'message_sentinel': "Problème déjà réglé"
}
)
self.assertTrue(form.is_valid)
form.save()
mocked_mail_sentinel.assert_called_once_with(self.report_filed_1.formatted_external_uuid, "Problème déjà réglé")
mocked_notify_suricate_status.assert_called_once_with(self.report_filed_1.formatted_external_uuid, self.classified_status.identifier, "Problème déjà réglé")
@override_settings(SURICATE_WORKFLOW_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
@mock.patch("geotrek.feedback.helpers.requests.post")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.message_sentinel")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.update_status")
def test_classify_alert_does_not_notify_suricate_when_workflow_disabled(self, mocked_notify_suricate_status, mocked_mail_sentinel, mocked_post, mocked_get):
form = ReportForm(
instance=self.report_filed_2,
data={
'geom': self.report_filed_2.geom,
'email': self.report_filed_2.email,
'status': self.classified_status.pk,
'message_sentinel': "Problème déjà réglé"
}
)
self.assertTrue(form.is_valid)
form.save()
mocked_mail_sentinel.assert_not_called()
mocked_notify_suricate_status.assert_not_called()
| import io
import os
import uuid
from unittest import mock
from unittest.mock import MagicMock
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.urls.base import reverse
from django.utils.translation import gettext_lazy as _
from mapentity.tests.factories import SuperUserFactory, UserFactory
from geotrek.authent.tests.factories import UserProfileFactory
from geotrek.common.models import Attachment
from geotrek.feedback.forms import ReportForm
from geotrek.feedback.helpers import SuricateMessenger, SuricateRequestManager
from geotrek.feedback.models import (AttachedMessage, Report, ReportActivity,
ReportProblemMagnitude, ReportStatus, WorkflowManager)
from geotrek.feedback.tests.factories import (ReportFactory,
ReportStatusFactory,
WorkflowManagerFactory)
SURICATE_REPORT_SETTINGS = {
"URL": "http://suricate.wsstandard.example.com/",
"ID_ORIGIN": "geotrek",
"PRIVATE_KEY_CLIENT_SERVER": "",
"PRIVATE_KEY_SERVER_CLIENT": "",
"AUTH": ("", ""),
}
SURICATE_MANAGEMENT_SETTINGS = {
"URL": "http://suricate.wsmanagement.example.com/",
"ID_ORIGIN": "geotrek",
"PRIVATE_KEY_CLIENT_SERVER": "",
"PRIVATE_KEY_SERVER_CLIENT": "",
"AUTH": ("", ""),
}
SURICATE_WORKFLOW_SETTINGS = {
"TIMER_FOR_WAITING_REPORTS_IN_DAYS": 6,
"TIMER_FOR_PROGRAMMED_REPORTS_IN_DAYS": 7,
"SURICATE_RELOCATED_REPORT_MESSAGE": "Le Signalement ne concerne pas le Département du Gard - Relocalisé hors du Département"
}
def mocked_json(file_name):
filename = os.path.join(os.path.dirname(__file__), "data", file_name)
with open(filename, "r") as f:
return bytes(f.read(), encoding="UTF-8")
def mocked_image(file_name):
filename = os.path.join(os.path.dirname(__file__), "data", file_name)
with open(filename, "rb") as f:
return bytearray(f.read())
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@override_settings(SURICATE_MANAGEMENT_SETTINGS=SURICATE_MANAGEMENT_SETTINGS)
class SuricateTests(TestCase):
"""Test Suricate API"""
def build_get_request_patch(self, mocked: MagicMock, cause_JPG_error=False, remove_one_alert=False):
"""Mock get requests to Suricate API"""
def build_response_patch(url, params=None, **kwargs):
mock_response = MagicMock()
if "GetActivities" in url:
mock_response.status_code = 200
mock_response.content = mocked_json("suricate_activities.json")
elif "GetStatusList" in url:
mock_response.status_code = 200
mock_response.content = mocked_json("suricate_statuses.json")
elif "GetAlerts" in url and not remove_one_alert:
mock_response.content = mocked_json("suricate_alerts.json")
mock_response.status_code = 200
elif "GetAlerts" in url and remove_one_alert:
mock_response.content = mocked_json("suricate_alerts_later.json")
mock_response.status_code = 200
elif "wsLockAlert" in url or "wsUnlockAlert" in url or "wsUpdateGPS" in url:
mock_response.content = mocked_json("suricate_positive.json")
mock_response.status_code = 200
elif cause_JPG_error:
mock_response.status_code = 404
elif ".jpg" in url or ".png" in url or ".JPG" in url:
mock_response.content = mocked_image("theme-fauna.png")
mock_response.status_code = 200
else:
mock_response.status_code = 404
return mock_response
mocked.side_effect = build_response_patch
def build_post_request_patch(self, mocked: MagicMock):
"""Mock post requests to Suricate API"""
def build_response_patch(url, params=None, **kwargs):
mock_response = MagicMock()
if "SendReport" in url or "UpdateStatus" in url or "MessageSentinel" in url:
mock_response.status_code = 200
mock_response.content = mocked_json(
"suricate_positive.json"
)
else:
mock_response.status_code = 404
return mock_response
mocked.side_effect = build_response_patch
def build_failed_request_patch(self, mocked: MagicMock):
"""Mock error responses from Suricate API"""
mock_response = mock.Mock()
mock_response.content = mocked_json("suricate_negative.json")
mock_response.status_code = 400
mocked.return_value = mock_response
def build_timeout_request_patch(self, mocked: MagicMock):
"""Mock error responses from Suricate API"""
mock_response = mock.Mock()
mock_response.status_code = 408 # reqest timeout
mock_response.content = {}
mocked.return_value = mock_response
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
UserProfileFactory.create(user=cls.user)
cls.workflow_manager = WorkflowManagerFactory(user=cls.user)
cls.admin = SuperUserFactory(username="Admin", password="<PASSWORD>")
def setUp(self):
self.client.force_login(self.admin)
class SuricateAPITests(SuricateTests):
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_statuses(self, mocked_get, mocked_logger):
"""Test GET requests on Statuses endpoint creates statuses objects"""
self.build_get_request_patch(mocked_get)
call_command("sync_suricate", statuses=True)
self.assertEqual(ReportStatus.objects.count(), 5)
mocked_logger.info.assert_called_with("New status - id: classified, label: Classé sans suite")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_activities(self, mocked_get, mocked_logger):
"""Test GET requests on Activities endpoint creates statuses objects"""
self.build_get_request_patch(mocked_get)
call_command("sync_suricate", activities=True)
self.assertEqual(ReportActivity.objects.count(), 32)
mocked_logger.info.assert_called_with("New activity - id: 51, label: Roller, Skateboard")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_activities_and_statuses(self, mocked):
"""Test GET requests on both Activities and Statuses endpoint creates objects"""
self.build_get_request_patch(mocked)
call_command("sync_suricate", activities=True, statuses=True)
self.assertEqual(ReportActivity.objects.count(), 32)
self.assertEqual(ReportStatus.objects.count(), 5)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@mock.patch("geotrek.feedback.management.commands.sync_suricate.logger")
def test_command_disabled(self, mocked):
"""Test sync_suricate command is disabled when setting is False"""
call_command("sync_suricate", activities=True, statuses=True)
mocked.error.assert_called_with("To use this command, please activate setting SURICATE_MANAGEMENT_ENABLED or SURICATE_WORKFLOW_ENABLED.")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_alerts_creates_alerts_and_send_mail(self, mocked_get, mocked_logger):
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
self.assertEqual(len(mail.outbox), 0)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.count(), 8)
self.assertEqual(ReportProblemMagnitude.objects.count(), 3)
self.assertEqual(AttachedMessage.objects.count(), 44)
self.assertEqual(Attachment.objects.count(), 6)
self.assertEqual(len(mail.outbox), 1)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, "Geotrek - New reports from Suricate")
self.assertIn("New reports have been imported from Suricate", sent_mail.body)
self.assertIn("Please consult your reports in Geotrek-Admin", sent_mail.body)
for report in Report.objects.all():
self.assertIn(report.full_url, sent_mail.body)
r = Report.objects.all()[0]
r.category = None
r.save()
# Fetch it again to verify 'super.save' was called (management mode)
r.refresh_from_db()
self.assertIsNone(r.category)
# Test new filed report are not assigned to workflow manager when mode is management
r = Report.objects.get(external_uuid="E7C73347-5056-AA2B-DDBFDCD9328CD742")
self.assertIsNone(r.assigned_user)
# Assert no new mail on update
self.assertEqual(len(mail.outbox), 1)
# Test sync specific report overwrites local info
r.comment = ""
r.save()
r.refresh_from_db()
self.assertEquals(r.comment, "")
call_command("sync_suricate", report=r.pk, verbosity=2)
r.refresh_from_db()
self.assertEquals(r.comment, "Ne pas prendre la route Départementale 155 en direction de Malons")
# Test sync last report overwrites local info
r = Report.objects.get(external_uuid="7EE5DF25-5056-AA2B-DDBEEFA5768CD53E")
self.assertEquals(r.comment, "Lames cassées")
r.comment = ""
r.save()
r.refresh_from_db()
self.assertEquals(r.comment, "")
call_command("sync_suricate", report=0, verbosity=2)
r.refresh_from_db()
self.assertEquals(r.comment, "Lames cassées")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.ContentFile.__init__")
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_sync_handles_malformed_images(self, mocked_get, mocked_logger, mocked_save):
self.build_get_request_patch(mocked_get)
"""Test Suricate sync is not interupted by corruped images"""
mocked_save.side_effect = Exception("This image is bad")
call_command("sync_suricate", verbosity=2)
mocked_logger.error.assert_called()
@override_settings(SURICATE_WORKFLOW_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_alerts_creates_alerts_and_send_mail_and_assign(self, mocked_get):
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
self.assertEqual(len(mail.outbox), 0)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.count(), 8)
self.assertEqual(ReportProblemMagnitude.objects.count(), 3)
self.assertEqual(AttachedMessage.objects.count(), 44)
self.assertEqual(Attachment.objects.count(), 6)
self.assertEqual(len(mail.outbox), 1)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, "Geotrek - New reports from Suricate")
# Test update report does not send email and saves
r = Report.objects.all()[0]
r.category = None
r.save()
# Fetch it again to verify 'super.save' was called (management mode)
r.refresh_from_db()
self.assertIsNone(r.category)
# Test new filed report are assigned to workflow manager
r = Report.objects.get(external_uuid="E7C73347-5056-AA2B-DDBFDCD9328CD742")
self.assertIn(r.assigned_user.pk, list(WorkflowManager.objects.values_list('user', flat=True)))
# Assert no new mail on update
self.assertEqual(len(mail.outbox), 1)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_failed_attachments_are_downloaded_on_next_sync(self, mocked_get, mocked_logger):
"""Test failed requests to download attachments are retried on next sync"""
self.assertEqual(Attachment.objects.count(), 0)
# Fail to download all images
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# All attachments are missing their image file
self.assertFalse(atta.attachment_file.name)
# Succesfully download all images
self.build_get_request_patch(mocked_get, cause_JPG_error=False)
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# No attachments are missing their image file
self.assertTrue(atta.attachment_file.name)
# Succesfully download all images a second time to cover "skip file" case
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# No attachments are missing their image file
self.assertTrue(atta.attachment_file.name)
@override_settings(PAPERCLIP_ENABLE_LINK=False)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
def test_sync_needs_paperclip_enabled(self):
"""Test failed requests to download attachments are retried on next sync"""
with self.assertRaises(Exception):
call_command("sync_suricate", verbosity=2)
@override_settings(SURICATE_REPORT_ENABLED=True)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_save_on_report_posts_to_suricate_in_report_mode(self, post_report):
"""Test post to suricate on save Report in Suricate Report Mode"""
report = Report.objects.create()
post_report.assert_called_once_with(report)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_save_on_report_posts_to_suricate_in_management_mode(self, post_report):
"""Test post to suricate on save Report in Suricate Management Mode"""
# Create a report with an UID - emulates report from Suricate
uid = uuid.uuid4()
Report.objects.create(external_uuid=uid)
post_report.assert_not_called()
# Create a report with no UID - emulates new report from Geotrek
report = Report.objects.create(external_uuid=None)
post_report.assert_called_once_with(report)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_save_on_report_doesnt_post_to_suricate_in_no_suricate_mode(self, post_report):
"""Test save does not post to suricate on save Report in No Suricate Mode"""
Report.objects.create()
post_report.assert_not_called()
@mock.patch("geotrek.feedback.helpers.requests.post")
def test_post_request_to_suricate(self, mock_post):
"""Test post request itself
Request post is mock
"""
# Create a report without saving it
report = ReportFactory.build()
# Define a mock response
self.build_post_request_patch(mock_post)
# Call the function with the report
result = SuricateMessenger().post_report(report)
self.assertEqual(result, None)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_1(self, mock_get):
"""Test get request itself fails
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_2(self, mock_get):
"""Test get request itself fails
"""
# Mock error 400
self.build_failed_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test(self, mock_get, mocked_stdout):
"""Assert connection test command outputs OK
"""
# Mock error 408
self.build_get_request_patch(mock_get)
call_command("sync_suricate", test=True)
# Assert outputs OK
self.assertEquals(mocked_stdout.getvalue(), 'API Standard :\nOK\nAPI Gestion :\nOK\n')
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test_fails_API(self, mock_get, mocked_stdout):
"""Assert connection test command outputs error when it fails on Suricate API side
"""
# Mock negative response
self.build_failed_request_patch(mock_get)
# Assert outputs KO
call_command("sync_suricate", test=True)
self.assertEquals(mocked_stdout.getvalue(), "API Standard :\nKO - Status code: 400\nAPI Gestion :\nKO - Status code: 400\n")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test_fails_HTTP(self, mock_get, mocked_stdout):
"""Assert connection test command outputs error when it fails on HTTP
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Assert outputs KO
call_command("sync_suricate", test=True)
self.assertEquals(mocked_stdout.getvalue(), "API Standard :\nKO - Status code: 408\nAPI Gestion :\nKO - Status code: 408\n")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_relocated_report_is_deleted_on_next_sync(self, mocked_get, mocked_logger):
"""Test reports relocated outside of BBOX are deleted on next sync"""
self.build_get_request_patch(mocked_get, remove_one_alert=False)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.filter(external_uuid="742CBF16-5056-AA2B-DD1FD403F72D6B9B").count(), 1)
self.assertEqual(Report.objects.count(), 8)
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, remove_one_alert=True)
call_command("sync_suricate", verbosity=2)
# One out of the 9 was removed from response because this report now lives outside of BBOX according to Suricate
# 7 out of 8 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.filter(external_uuid="742CBF16-5056-AA2B-DD1FD403F72D6B9B").count(), 0)
self.assertEqual(Report.objects.count(), 7)
class SuricateInterfaceTests(SuricateTests):
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_import_from_interface_disabled(self, mocked):
user = UserFactory.create(username='Slush', password='<PASSWORD>')
self.client.force_login(user)
self.build_get_request_patch(mocked)
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'import-suricate')
self.assertNotContains(response, _('Data to import from Suricate'))
response = self.client.post(
url, {
'import-suricate': 'Import',
'parser': 'everything',
}
)
self.assertEqual(Report.objects.count(), 0)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.SuricateParser.get_alerts")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_import_from_interface_enabled(self, mocked_get, mocked_parser):
user = UserFactory.create(username='Slush', password='<PASSWORD>')
self.client.force_login(user)
# mocked_parser = mock.Mock()
self.build_get_request_patch(mocked_get)
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'import-suricate')
self.assertContains(response, _('Data to import from Suricate'))
response = self.client.post(
url, {
'import-suricate': 'Import',
'parser': 'everything',
}
)
self.assertEqual(response.status_code, 200)
mocked_parser.assert_called_once()
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_1(self, mock_get):
"""Test get request itself fails
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
class SuricateWorkflowTests(SuricateTests):
fixtures = ['geotrek/maintenance/fixtures/basic.json']
@classmethod
def setUpTestData(cls):
SuricateTests.setUpTestData()
cls.filed_status = ReportStatusFactory(identifier='filed', label="Déposé")
cls.classified_status = ReportStatusFactory(identifier='classified', label="Classé sans suite")
cls.programmed_status = ReportStatusFactory(identifier='programmed', label="Programmé")
cls.waiting_status = ReportStatusFactory(identifier='waiting', label="En cours")
cls.rejected_status = ReportStatusFactory(identifier='rejected', label="Rejeté")
cls.late_intervention_status = ReportStatusFactory(identifier='late_intervention', label="Intervention en retard")
cls.late_resolution_status = ReportStatusFactory(identifier='late_resolution', label="Resolution en retard")
cls.solved_intervention_status = ReportStatusFactory(identifier='solved_intervention', label="Intervention terminée")
cls.resolved_status = ReportStatusFactory(identifier='solved', label="Résolu")
cls.report = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4())
cls.admin = SuperUserFactory(username="Admiin", password="<PASSWORD>")
cls.interv_report = ReportFactory(status=cls.programmed_status)
def raise_multiple(exceptions):
if not exceptions: # list emptied, recursion ends
return
try:
raise exceptions.pop() # pop removes list entries
finally:
raise_multiple(exceptions) # recursion
def test_for_all_suricate_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'No Suricate' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Report' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
exceptions.append(e)
raise_multiple(exceptions)
return inner
def test_for_report_and_basic_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'No Suricate' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Report' mode",)
exceptions.append(e)
raise_multiple(exceptions)
return inner
def test_for_management_and_workflow_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
raise_multiple(exceptions)
return inner
def test_for_workflow_mode(test_func):
def inner(self, *args, **kwargs):
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
raise
return inner
def test_for_management_mode(test_func):
def inner(self, *args, **kwargs):
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
raise
return inner
class TestWorkflowFirstSteps(SuricateWorkflowTests):
@classmethod
def setUpTestData(cls):
SuricateWorkflowTests.setUpTestData()
cls.report_filed_1 = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4(), assigned_user=cls.admin)
cls.report_filed_2 = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4(), assigned_user=cls.admin)
@override_settings(SURICATE_WORKFLOW_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
@mock.patch("geotrek.feedback.helpers.requests.post")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.message_sentinel")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.update_status")
def test_classify_alert_notifies_suricate_when_workflow_enabled(self, mocked_notify_suricate_status, mocked_mail_sentinel, mocked_post, mocked_get):
form = ReportForm(
instance=self.report_filed_1,
data={
'geom': self.report_filed_1.geom,
'email': self.report_filed_1.email,
'status': self.classified_status.pk,
'message_sentinel': "Problème déjà réglé"
}
)
self.assertTrue(form.is_valid)
form.save()
mocked_mail_sentinel.assert_called_once_with(self.report_filed_1.formatted_external_uuid, "Problème déjà réglé")
mocked_notify_suricate_status.assert_called_once_with(self.report_filed_1.formatted_external_uuid, self.classified_status.identifier, "Problème déjà réglé")
@override_settings(SURICATE_WORKFLOW_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
@mock.patch("geotrek.feedback.helpers.requests.post")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.message_sentinel")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.update_status")
def test_classify_alert_does_not_notify_suricate_when_workflow_disabled(self, mocked_notify_suricate_status, mocked_mail_sentinel, mocked_post, mocked_get):
form = ReportForm(
instance=self.report_filed_2,
data={
'geom': self.report_filed_2.geom,
'email': self.report_filed_2.email,
'status': self.classified_status.pk,
'message_sentinel': "Problème déjà réglé"
}
)
self.assertTrue(form.is_valid)
form.save()
mocked_mail_sentinel.assert_not_called()
mocked_notify_suricate_status.assert_not_called() | en | 0.876297 | Test Suricate API Mock get requests to Suricate API Mock post requests to Suricate API Mock error responses from Suricate API Mock error responses from Suricate API # reqest timeout Test GET requests on Statuses endpoint creates statuses objects Test GET requests on Activities endpoint creates statuses objects Test GET requests on both Activities and Statuses endpoint creates objects Test sync_suricate command is disabled when setting is False Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email # 8 out of 9 are imported because one of them is out of bbox by design # Fetch it again to verify 'super.save' was called (management mode) # Test new filed report are not assigned to workflow manager when mode is management # Assert no new mail on update # Test sync specific report overwrites local info # Test sync last report overwrites local info Test Suricate sync is not interupted by corruped images Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email # 8 out of 9 are imported because one of them is out of bbox by design # Test update report does not send email and saves # Fetch it again to verify 'super.save' was called (management mode) # Test new filed report are assigned to workflow manager # Assert no new mail on update Test failed requests to download attachments are retried on next sync # Fail to download all images # All attachments are missing their image file # Succesfully download all images # No attachments are missing their image file # Succesfully download all images a second time to cover "skip file" case # No attachments are missing their image file Test failed requests to download attachments are retried on next sync Test post to suricate on save Report in Suricate Report Mode Test post to suricate on save Report in Suricate Management Mode # Create a report with an UID - emulates report from Suricate # Create a report with no UID - emulates new report from Geotrek Test save does not post to suricate on save Report in No Suricate Mode Test post request itself Request post is mock # Create a report without saving it # Define a mock response # Call the function with the report Test get request itself fails # Mock error 408 # Get raises an exception Test get request itself fails # Mock error 400 # Get raises an exception Assert connection test command outputs OK # Mock error 408 # Assert outputs OK Assert connection test command outputs error when it fails on Suricate API side # Mock negative response # Assert outputs KO Assert connection test command outputs error when it fails on HTTP # Mock error 408 # Assert outputs KO Test reports relocated outside of BBOX are deleted on next sync # 8 out of 9 are imported because one of them is out of bbox by design Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email # One out of the 9 was removed from response because this report now lives outside of BBOX according to Suricate # 7 out of 8 are imported because one of them is out of bbox by design # mocked_parser = mock.Mock() Test get request itself fails # Mock error 408 # Get raises an exception # list emptied, recursion ends # pop removes list entries # recursion | 1.858662 | 2 |
align_rudder/envs/mazes/eight_rooms.py | ml-jku/align-rudder | 12 | 6616393 | <reponame>ml-jku/align-rudder<filename>align_rudder/envs/mazes/eight_rooms.py
import numpy as np
def rooms_maze(width, rooms):
# Width should be greater than equal to 13
np.random.seed(2)
x = np.zeros([width, width, rooms], dtype=np.uint8)
x_optimal_policy = np.zeros([width, width, rooms], dtype=np.uint8)
x[0, :, :] = 1
x[:, 0, :] = 1
x[:, -1, :] = 1
x[-1, :, :] = 1
n = rooms - 7
# optimal policies for each door type
# Door in North
top_door = np.zeros([width, width], dtype=np.uint8)
top_door[1, int(width/2):-1] = 2
top_door[1, 0:int(width/2)] = 3
top_door[1, int(width/2)] = 0
# Door in West
left_door = np.zeros([width, width], dtype=np.uint8)
left_door[:, :] = 2
left_door[0:int(width/2), 1] = 1
left_door[int(width/2):-1, 1] = 0
left_door[int(width/2), 1] = 2
# Door in East
right_door = np.zeros([width, width], dtype=np.uint8)
right_door[:, :] = 3
right_door[0:int(width/2), width - 2] = 1
right_door[int(width/2):-1, width - 2] = 0
right_door[int(width/2), width - 2] = 3
# Door in south
down_door = np.zeros([width, width], dtype=np.uint8)
down_door[:, :] = 1
down_door[width - 2, 0:int(width/2)] = 3
down_door[width - 2, int(width/2):] = 2
down_door[width - 2, int(width/2)] = 1
# Room 2 policy
room_2 = np.zeros([width, width], dtype=np.uint8)
room_2[0, int(width/2)] = 1
room_2[1:, :] = 3
room_2[0:int(width/2), width - 2] = 1
room_2[int(width/2):, width - 2] = 0
room_2[int(width/2), width - 2] = 3
# Room 3 Policy
room_3 = np.zeros([width, width], dtype=np.uint8)
room_3[0, int(width/2)] = 1
room_3[1:, :] = 2
room_3[0:int(width/2), 1] = 1
room_3[int(width/2):, 1] = 0
room_3[int(width/2), 1] = 2
# Room 4 policy
room_4 = np.zeros([width, width], dtype=np.uint8)
room_4[0, int(width/2)] = 1
room_4[1:, :] = 3
room_4[0:int(width/2), width - 2] = 1
room_4[int(width/2):, width - 2] = 0
room_4[int(width/2), width - 2] = 3
# Room 5 Policy
room_5 = np.zeros([width, width], dtype=np.uint8)
room_5[0, int(width/2)] = 1
room_5[1:, :] = 2
room_5[0:int(width/2), 1] = 1
room_5[int(width/2):, 1] = 0
room_5[int(width/2), 1] = 2
# Room 6 policy
room_6 = np.zeros([width, width], dtype=np.uint8)
room_6[0, int(width/2)] = 1
room_6[1:, :] = 3
room_6[0:int(width/2), width - 2] = 1
room_6[int(width/2):, width - 2] = 0
room_6[int(width/2), width - 2] = 3
# Room 5 Policy
room_7 = np.zeros([width, width], dtype=np.uint8)
room_7[0, int(width/2)] = 1
room_7[1:, :] = 2
room_7[0:int(width/2), 1] = 1
room_7[int(width/2):, 1] = 0
room_7[int(width/2), 1] = 2
# Room 8 Policy
room_8 = np.zeros([width, width], dtype=np.uint8)
room_8[:, :] = 1
room_8[width - 2, :] = 3
doors = []
paired_doors = []
for i in range(n):
wall = np.random.randint(4)
if wall == 0:
doors.append([0, int(width/2), i])
paired_doors.append([[0, int(width/2), i], [[0, int(width/2), rooms - 7]], 0])
x[0, int(width/2), i] = 4
# North/top door: add to the optimal policy
x_optimal_policy[:, :, i] = top_door
elif wall == 1:
doors.append([int(width/2), 0, i])
paired_doors.append([[int(width/2), 0, i], [[0, int(width/2), rooms - 7]], 2])
x[int(width/2), 0, i] = 4
# Left door
x_optimal_policy[:, :, i] = left_door
elif wall == 2:
doors.append([int(width/2), width-1, i])
paired_doors.append([[int(width/2), -1, i], [[0, int(width / 2), rooms - 7]], 3])
x[int(width/2), width - 1, i] = 4
# Right door
x_optimal_policy[:, :, i] = right_door
else:
doors.append([width-1, int(width/2), i])
paired_doors.append([[width-1, int(width/2), i], [[0, int(width / 2), rooms - 7]], 1])
x[width-1, int(width/2), i] = 4
# down door
x_optimal_policy[:, :, i] = down_door
room_door_8 = 1
room_door_7 = 2
room_door_6 = 2
room_door_5 = 2
room_door_4 = 2
room_door_3 = 2
room_door_2 = 2
const_doors = room_door_8 + room_door_7 + room_door_6 + room_door_5 +\
room_door_4 + room_door_3 + room_door_2
init_doors = len(doors)
# Room 2
doors.append([0, int(width/2), rooms - 7])
doors.append([int(width/2), width-1, rooms - 7])
x[0, int(width/2), rooms - 7] = 4
x[int(width / 2), width-1, rooms - 7] = 4
# Entry door
paired_doors.append([[0, int(width / 2), rooms - 7], doors[0:init_doors], 0])
# Exit door
paired_doors.append([[int(width/2), width-1, rooms - 7], [[0, int(width/2), rooms - 6]], 3])
# Optimal Policy
x_optimal_policy[:, :, rooms - 7] = room_2
# Room 3
doors.append([0, int(width/2), rooms - 6])
doors.append([int(width / 2), 0, rooms - 6])
x[0, int(width/2), rooms - 6] = 4
x[int(width/2), 0, rooms - 6] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 6], [[int(width/2), width-1, rooms - 6]], 0])
# Exit door
paired_doors.append([[int(width / 2), 0, rooms - 6], [[0, int(width/2), rooms - 5]], 2])
# Optimal Policy
x_optimal_policy[:, :, rooms - 6] = room_3
# Room 4
doors.append([0, int(width/2), rooms - 5])
doors.append([int(width / 2), width - 1, rooms - 5])
x[0, int(width/2), rooms - 5] = 4
x[int(width/2), width - 1, rooms - 5] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 5], [[int(width/2), 0, rooms - 6]], 0])
# Exit door
paired_doors.append([[int(width / 2), width - 1, rooms - 5], [[0, int(width/2), rooms - 4]], 3])
# Optimal Policy
x_optimal_policy[:, :, rooms - 5] = room_4
# Room 5
doors.append([0, int(width/2), rooms - 4])
doors.append([int(width / 2), 0, rooms - 4])
x[0, int(width/2), rooms - 4] = 4
x[int(width/2), 0, rooms - 4] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 4], [[int(width/2), width-1, rooms - 5]], 0])
# Exit door
paired_doors.append([[int(width / 2), 0, rooms - 4], [[0, int(width/2), rooms - 3]], 2])
# Optimal Policy
x_optimal_policy[:, :, rooms - 4] = room_5
# Room 6
doors.append([0, int(width/2), rooms - 3])
doors.append([int(width / 2), width - 1, rooms - 3])
x[0, int(width/2), rooms - 3] = 4
x[int(width/2), width - 1, rooms - 3] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 3], [[int(width/2), 0, rooms - 4]], 0])
# Exit door
paired_doors.append([[int(width / 2), width - 1, rooms - 3], [[0, int(width/2), rooms - 2]], 3])
# Optimal Policy
x_optimal_policy[:, :, rooms - 3] = room_6
# Room 7
doors.append([0, int(width/2), rooms - 2])
doors.append([int(width / 2), 0, rooms - 2])
x[0, int(width/2), rooms - 2] = 4
x[int(width/2), 0, rooms - 2] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 2], [[int(width/2), width-1, rooms - 3]], 0])
# Exit door
paired_doors.append([[int(width / 2), 0, rooms - 2], [[0, int(width/2), rooms - 1]], 2])
# Optimal Policy
x_optimal_policy[:, :, rooms - 2] = room_7
# Room 8
doors.append([0, int(width/2), rooms - 1])
x[0, int(width/2), rooms - 1] = 4
paired_doors.append([[0, int(width/2), rooms - 1], [[int(width / 2), 0, rooms - 2]], 0])
# Optimal Policy
x_optimal_policy[:, :, rooms - 1] = room_8
x[-2, -2, -1] = 3
return x, doors, paired_doors, x_optimal_policy
| import numpy as np
def rooms_maze(width, rooms):
# Width should be greater than equal to 13
np.random.seed(2)
x = np.zeros([width, width, rooms], dtype=np.uint8)
x_optimal_policy = np.zeros([width, width, rooms], dtype=np.uint8)
x[0, :, :] = 1
x[:, 0, :] = 1
x[:, -1, :] = 1
x[-1, :, :] = 1
n = rooms - 7
# optimal policies for each door type
# Door in North
top_door = np.zeros([width, width], dtype=np.uint8)
top_door[1, int(width/2):-1] = 2
top_door[1, 0:int(width/2)] = 3
top_door[1, int(width/2)] = 0
# Door in West
left_door = np.zeros([width, width], dtype=np.uint8)
left_door[:, :] = 2
left_door[0:int(width/2), 1] = 1
left_door[int(width/2):-1, 1] = 0
left_door[int(width/2), 1] = 2
# Door in East
right_door = np.zeros([width, width], dtype=np.uint8)
right_door[:, :] = 3
right_door[0:int(width/2), width - 2] = 1
right_door[int(width/2):-1, width - 2] = 0
right_door[int(width/2), width - 2] = 3
# Door in south
down_door = np.zeros([width, width], dtype=np.uint8)
down_door[:, :] = 1
down_door[width - 2, 0:int(width/2)] = 3
down_door[width - 2, int(width/2):] = 2
down_door[width - 2, int(width/2)] = 1
# Room 2 policy
room_2 = np.zeros([width, width], dtype=np.uint8)
room_2[0, int(width/2)] = 1
room_2[1:, :] = 3
room_2[0:int(width/2), width - 2] = 1
room_2[int(width/2):, width - 2] = 0
room_2[int(width/2), width - 2] = 3
# Room 3 Policy
room_3 = np.zeros([width, width], dtype=np.uint8)
room_3[0, int(width/2)] = 1
room_3[1:, :] = 2
room_3[0:int(width/2), 1] = 1
room_3[int(width/2):, 1] = 0
room_3[int(width/2), 1] = 2
# Room 4 policy
room_4 = np.zeros([width, width], dtype=np.uint8)
room_4[0, int(width/2)] = 1
room_4[1:, :] = 3
room_4[0:int(width/2), width - 2] = 1
room_4[int(width/2):, width - 2] = 0
room_4[int(width/2), width - 2] = 3
# Room 5 Policy
room_5 = np.zeros([width, width], dtype=np.uint8)
room_5[0, int(width/2)] = 1
room_5[1:, :] = 2
room_5[0:int(width/2), 1] = 1
room_5[int(width/2):, 1] = 0
room_5[int(width/2), 1] = 2
# Room 6 policy
room_6 = np.zeros([width, width], dtype=np.uint8)
room_6[0, int(width/2)] = 1
room_6[1:, :] = 3
room_6[0:int(width/2), width - 2] = 1
room_6[int(width/2):, width - 2] = 0
room_6[int(width/2), width - 2] = 3
# Room 5 Policy
room_7 = np.zeros([width, width], dtype=np.uint8)
room_7[0, int(width/2)] = 1
room_7[1:, :] = 2
room_7[0:int(width/2), 1] = 1
room_7[int(width/2):, 1] = 0
room_7[int(width/2), 1] = 2
# Room 8 Policy
room_8 = np.zeros([width, width], dtype=np.uint8)
room_8[:, :] = 1
room_8[width - 2, :] = 3
doors = []
paired_doors = []
for i in range(n):
wall = np.random.randint(4)
if wall == 0:
doors.append([0, int(width/2), i])
paired_doors.append([[0, int(width/2), i], [[0, int(width/2), rooms - 7]], 0])
x[0, int(width/2), i] = 4
# North/top door: add to the optimal policy
x_optimal_policy[:, :, i] = top_door
elif wall == 1:
doors.append([int(width/2), 0, i])
paired_doors.append([[int(width/2), 0, i], [[0, int(width/2), rooms - 7]], 2])
x[int(width/2), 0, i] = 4
# Left door
x_optimal_policy[:, :, i] = left_door
elif wall == 2:
doors.append([int(width/2), width-1, i])
paired_doors.append([[int(width/2), -1, i], [[0, int(width / 2), rooms - 7]], 3])
x[int(width/2), width - 1, i] = 4
# Right door
x_optimal_policy[:, :, i] = right_door
else:
doors.append([width-1, int(width/2), i])
paired_doors.append([[width-1, int(width/2), i], [[0, int(width / 2), rooms - 7]], 1])
x[width-1, int(width/2), i] = 4
# down door
x_optimal_policy[:, :, i] = down_door
room_door_8 = 1
room_door_7 = 2
room_door_6 = 2
room_door_5 = 2
room_door_4 = 2
room_door_3 = 2
room_door_2 = 2
const_doors = room_door_8 + room_door_7 + room_door_6 + room_door_5 +\
room_door_4 + room_door_3 + room_door_2
init_doors = len(doors)
# Room 2
doors.append([0, int(width/2), rooms - 7])
doors.append([int(width/2), width-1, rooms - 7])
x[0, int(width/2), rooms - 7] = 4
x[int(width / 2), width-1, rooms - 7] = 4
# Entry door
paired_doors.append([[0, int(width / 2), rooms - 7], doors[0:init_doors], 0])
# Exit door
paired_doors.append([[int(width/2), width-1, rooms - 7], [[0, int(width/2), rooms - 6]], 3])
# Optimal Policy
x_optimal_policy[:, :, rooms - 7] = room_2
# Room 3
doors.append([0, int(width/2), rooms - 6])
doors.append([int(width / 2), 0, rooms - 6])
x[0, int(width/2), rooms - 6] = 4
x[int(width/2), 0, rooms - 6] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 6], [[int(width/2), width-1, rooms - 6]], 0])
# Exit door
paired_doors.append([[int(width / 2), 0, rooms - 6], [[0, int(width/2), rooms - 5]], 2])
# Optimal Policy
x_optimal_policy[:, :, rooms - 6] = room_3
# Room 4
doors.append([0, int(width/2), rooms - 5])
doors.append([int(width / 2), width - 1, rooms - 5])
x[0, int(width/2), rooms - 5] = 4
x[int(width/2), width - 1, rooms - 5] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 5], [[int(width/2), 0, rooms - 6]], 0])
# Exit door
paired_doors.append([[int(width / 2), width - 1, rooms - 5], [[0, int(width/2), rooms - 4]], 3])
# Optimal Policy
x_optimal_policy[:, :, rooms - 5] = room_4
# Room 5
doors.append([0, int(width/2), rooms - 4])
doors.append([int(width / 2), 0, rooms - 4])
x[0, int(width/2), rooms - 4] = 4
x[int(width/2), 0, rooms - 4] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 4], [[int(width/2), width-1, rooms - 5]], 0])
# Exit door
paired_doors.append([[int(width / 2), 0, rooms - 4], [[0, int(width/2), rooms - 3]], 2])
# Optimal Policy
x_optimal_policy[:, :, rooms - 4] = room_5
# Room 6
doors.append([0, int(width/2), rooms - 3])
doors.append([int(width / 2), width - 1, rooms - 3])
x[0, int(width/2), rooms - 3] = 4
x[int(width/2), width - 1, rooms - 3] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 3], [[int(width/2), 0, rooms - 4]], 0])
# Exit door
paired_doors.append([[int(width / 2), width - 1, rooms - 3], [[0, int(width/2), rooms - 2]], 3])
# Optimal Policy
x_optimal_policy[:, :, rooms - 3] = room_6
# Room 7
doors.append([0, int(width/2), rooms - 2])
doors.append([int(width / 2), 0, rooms - 2])
x[0, int(width/2), rooms - 2] = 4
x[int(width/2), 0, rooms - 2] = 4
# Entry door
paired_doors.append([[0, int(width/2), rooms - 2], [[int(width/2), width-1, rooms - 3]], 0])
# Exit door
paired_doors.append([[int(width / 2), 0, rooms - 2], [[0, int(width/2), rooms - 1]], 2])
# Optimal Policy
x_optimal_policy[:, :, rooms - 2] = room_7
# Room 8
doors.append([0, int(width/2), rooms - 1])
x[0, int(width/2), rooms - 1] = 4
paired_doors.append([[0, int(width/2), rooms - 1], [[int(width / 2), 0, rooms - 2]], 0])
# Optimal Policy
x_optimal_policy[:, :, rooms - 1] = room_8
x[-2, -2, -1] = 3
return x, doors, paired_doors, x_optimal_policy | en | 0.65808 | # Width should be greater than equal to 13 # optimal policies for each door type # Door in North # Door in West # Door in East # Door in south # Room 2 policy # Room 3 Policy # Room 4 policy # Room 5 Policy # Room 6 policy # Room 5 Policy # Room 8 Policy # North/top door: add to the optimal policy # Left door # Right door # down door # Room 2 # Entry door # Exit door # Optimal Policy # Room 3 # Entry door # Exit door # Optimal Policy # Room 4 # Entry door # Exit door # Optimal Policy # Room 5 # Entry door # Exit door # Optimal Policy # Room 6 # Entry door # Exit door # Optimal Policy # Room 7 # Entry door # Exit door # Optimal Policy # Room 8 # Optimal Policy | 3.008015 | 3 |
Residual/CIFAR10_pipeline.py | KWYi/PytorchStudy | 0 | 6616394 | import os
import numpy as np
import PIL
import random
from torch.utils.data import Dataset
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Lambda, ToTensor
from PIL import Image
class CustomCIFAR10(Dataset):
def __init__(self, train=True):
super(CustomCIFAR10, self).__init__()
self.cifar10_train = CIFAR10(root='datasets', train = train, download=True)
# self._cifar10_train => index[0] = data, index[1] = label, format = PIL
images = list()
for i in range(len(self.cifar10_train)):
images.append(np.array(self.cifar10_train[i][0]))
self.per_pixel_mean_grid = np.mean(images, axis=0).astype(np.float32)
print(self.per_pixel_mean_grid.shape)
if not train:
self.cifar10_test = CIFAR10(root='datasets', train=train, download=False)
self.train = train
def __getitem__(self, index):
transforms = list()
transforms.append(Lambda(self.__to_numpy))
transforms.append(Lambda(self.__per_pixel_mean_normalization))
if self.train:
# if random.random() > 0.5:
# transforms.append(Lambda(self.__horizontal_flip))
transforms.append(Lambda(self.__pad_and_random_crop))
transforms.append(ToTensor())
transforms = Compose(transforms)
if self.train:
return transforms(self.cifar10_train[index][0]), self.cifar10_train[index][1]
else:
return transforms(self.cifar10_test[index][0]), self.cifar10_test[index][1]
def __len__(self):
if self.train:
return len(self.cifar10_train)
else:
return len(self.cifar10_test)
# Static Methods
def __to_numpy(self, x):
assert isinstance(x, PIL.Image.Image) # assert: 뒤의 객체가 True가 아니면 Error Raise
return np.array(x).astype(np.float32)
def __per_pixel_mean_normalization(self, x):
return (x-self.per_pixel_mean_grid)/255.
def __pad_and_random_crop(self, x):
p = 4
x = np.pad(x, ((p,p), (p,p), (0,0)), mode='constant', constant_values=0.)
y_index = random.randint(0, 2*p -1)
x_index = random.randint(0, 2*p -1)
x = x[y_index: y_index+32, x_index: x_index+32, :]
return x
if __name__=='__main__':
dataset = CustomCIFAR10() | import os
import numpy as np
import PIL
import random
from torch.utils.data import Dataset
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Lambda, ToTensor
from PIL import Image
class CustomCIFAR10(Dataset):
def __init__(self, train=True):
super(CustomCIFAR10, self).__init__()
self.cifar10_train = CIFAR10(root='datasets', train = train, download=True)
# self._cifar10_train => index[0] = data, index[1] = label, format = PIL
images = list()
for i in range(len(self.cifar10_train)):
images.append(np.array(self.cifar10_train[i][0]))
self.per_pixel_mean_grid = np.mean(images, axis=0).astype(np.float32)
print(self.per_pixel_mean_grid.shape)
if not train:
self.cifar10_test = CIFAR10(root='datasets', train=train, download=False)
self.train = train
def __getitem__(self, index):
transforms = list()
transforms.append(Lambda(self.__to_numpy))
transforms.append(Lambda(self.__per_pixel_mean_normalization))
if self.train:
# if random.random() > 0.5:
# transforms.append(Lambda(self.__horizontal_flip))
transforms.append(Lambda(self.__pad_and_random_crop))
transforms.append(ToTensor())
transforms = Compose(transforms)
if self.train:
return transforms(self.cifar10_train[index][0]), self.cifar10_train[index][1]
else:
return transforms(self.cifar10_test[index][0]), self.cifar10_test[index][1]
def __len__(self):
if self.train:
return len(self.cifar10_train)
else:
return len(self.cifar10_test)
# Static Methods
def __to_numpy(self, x):
assert isinstance(x, PIL.Image.Image) # assert: 뒤의 객체가 True가 아니면 Error Raise
return np.array(x).astype(np.float32)
def __per_pixel_mean_normalization(self, x):
return (x-self.per_pixel_mean_grid)/255.
def __pad_and_random_crop(self, x):
p = 4
x = np.pad(x, ((p,p), (p,p), (0,0)), mode='constant', constant_values=0.)
y_index = random.randint(0, 2*p -1)
x_index = random.randint(0, 2*p -1)
x = x[y_index: y_index+32, x_index: x_index+32, :]
return x
if __name__=='__main__':
dataset = CustomCIFAR10() | en | 0.185934 | # self._cifar10_train => index[0] = data, index[1] = label, format = PIL # if random.random() > 0.5: # transforms.append(Lambda(self.__horizontal_flip)) # Static Methods # assert: 뒤의 객체가 True가 아니면 Error Raise | 2.513473 | 3 |
metrics/__init__.py | vios-s/DGNet | 28 | 6616395 | <gh_stars>10-100
from .dice_loss import *
from .focal_loss import *
from .gan_loss import * | from .dice_loss import *
from .focal_loss import *
from .gan_loss import * | none | 1 | 0.943133 | 1 | |
Dataset/Leetcode/test/62/442.py | kkcookies99/UAST | 0 | 6616396 | <gh_stars>0
class Solution:
def XXX(self, m: int, n: int) -> int:
f = [[0 for c in range(n+1)] for r in range(m+1)]
f[1][1] = 1
for r in range(1,m+1):
for c in range(1,n+1):
f[r][c] = f[r][c] + f[r-1][c] + f[r][c-1]
return f[m][n]
| class Solution:
def XXX(self, m: int, n: int) -> int:
f = [[0 for c in range(n+1)] for r in range(m+1)]
f[1][1] = 1
for r in range(1,m+1):
for c in range(1,n+1):
f[r][c] = f[r][c] + f[r-1][c] + f[r][c-1]
return f[m][n] | none | 1 | 3.067096 | 3 | |
planer-uwr-scraping/scrap.py | Florian3k/planer-uwr | 1 | 6616397 | <reponame>Florian3k/planer-uwr
import os
from scrapy.settings import Settings
from scrapy.crawler import CrawlerProcess
from spiders.courses import CoursesSpider
from spiders.offer import OfferSpider
if __name__ == '__main__':
basedir = os.path.dirname(__file__)
if not os.path.exists(os.path.join(basedir, 'output')):
os.makedirs(os.path.join(basedir, 'output'))
process = CrawlerProcess(settings=Settings({
'LOG_LEVEL': 'ERROR',
'FEED_FORMAT': 'json',
'COOKIES_ENABLED': False,
'ROBOTSTXT_OBEY': False,
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/85.0.4183.102 Safari/537.36'
}))
process.crawl(OfferSpider)
process.crawl(CoursesSpider)
process.start()
| import os
from scrapy.settings import Settings
from scrapy.crawler import CrawlerProcess
from spiders.courses import CoursesSpider
from spiders.offer import OfferSpider
if __name__ == '__main__':
basedir = os.path.dirname(__file__)
if not os.path.exists(os.path.join(basedir, 'output')):
os.makedirs(os.path.join(basedir, 'output'))
process = CrawlerProcess(settings=Settings({
'LOG_LEVEL': 'ERROR',
'FEED_FORMAT': 'json',
'COOKIES_ENABLED': False,
'ROBOTSTXT_OBEY': False,
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) ' +
'Chrome/85.0.4183.102 Safari/537.36'
}))
process.crawl(OfferSpider)
process.crawl(CoursesSpider)
process.start() | none | 1 | 2.100123 | 2 | |
src/naovoce/urls.py | jsmesami/naovoce | 18 | 6616398 | from django.contrib import admin
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
import utils.views
import user.views
import naovoce.views
app_name = "naovoce"
urlpatterns = [
url(r'^fruitadmin/', admin.site.urls),
url(r'^api/v1/', include('naovoce.api.urls', namespace='api')),
url(r'^fruit/', include('fruit.urls', namespace='fruit')),
url(r'^gallery/', include('gallery.urls', namespace='gallery')),
url(r'^newsletter/', include('newsletter.urls', namespace='newsletter')),
url(r'^pickers/', include('user.urls', namespace='pickers')),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/profile/$', user.views.accounts_profile),
url(r'^robots\.txt$', utils.views.plain_text_view, dict(template_name='robots.txt'), name='robots'),
url(r'^map/$', naovoce.views.map_view, name='map'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.contrib import admin
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
import utils.views
import user.views
import naovoce.views
app_name = "naovoce"
urlpatterns = [
url(r'^fruitadmin/', admin.site.urls),
url(r'^api/v1/', include('naovoce.api.urls', namespace='api')),
url(r'^fruit/', include('fruit.urls', namespace='fruit')),
url(r'^gallery/', include('gallery.urls', namespace='gallery')),
url(r'^newsletter/', include('newsletter.urls', namespace='newsletter')),
url(r'^pickers/', include('user.urls', namespace='pickers')),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/profile/$', user.views.accounts_profile),
url(r'^robots\.txt$', utils.views.plain_text_view, dict(template_name='robots.txt'), name='robots'),
url(r'^map/$', naovoce.views.map_view, name='map'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| none | 1 | 1.926821 | 2 | |
app/problem/models.py | pushyzheng/docker-oj-web | 2 | 6616399 | # encoding:utf-8
from app import db
from datetime import datetime
from utils import ModelParent
class Problem(db.Model, ModelParent):
__tablename__ = 'problems'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(32))
content = db.Column(db.Text)
content_html = db.Column(db.Text)
time_limit = db.Column(db.Integer) # 时间限制
memory_limit = db.Column(db.Integer) # 内存限制
passing_rate = db.Column(db.Float) # 通过率
difficulty = db.Column(db.String(10)) # 难度
author = db.Column(db.String(32), db.ForeignKey("users.id")) # 作者
labels = db.relationship('Label', backref='problem', lazy='dynamic') # 标签
timestamp = db.Column(db.DateTime, default=datetime.now)
def to_dict(self):
result = super().to_dict()
labels = []
for each in self.labels:
labels.append(each.name)
result['labels'] = labels
return result
| # encoding:utf-8
from app import db
from datetime import datetime
from utils import ModelParent
class Problem(db.Model, ModelParent):
__tablename__ = 'problems'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(32))
content = db.Column(db.Text)
content_html = db.Column(db.Text)
time_limit = db.Column(db.Integer) # 时间限制
memory_limit = db.Column(db.Integer) # 内存限制
passing_rate = db.Column(db.Float) # 通过率
difficulty = db.Column(db.String(10)) # 难度
author = db.Column(db.String(32), db.ForeignKey("users.id")) # 作者
labels = db.relationship('Label', backref='problem', lazy='dynamic') # 标签
timestamp = db.Column(db.DateTime, default=datetime.now)
def to_dict(self):
result = super().to_dict()
labels = []
for each in self.labels:
labels.append(each.name)
result['labels'] = labels
return result
| zh | 0.926716 | # encoding:utf-8 # 时间限制 # 内存限制 # 通过率 # 难度 # 作者 # 标签 | 2.511377 | 3 |
app/1nst@gram_Us3r_Hunt3r.py | wagrodrigo/pcsensor | 0 | 6616400 | <reponame>wagrodrigo/pcsensor
import os
import sys
import json
import requests
# Objects created
WHITELIST_FILE = "whitelist.txt"
QUERY_KEYS_FILE = "keywords.txt"
base_url = "https://www.instagram.com/%s"
base_url_search = "https://www.instagram.com/web/search/topsearch/?context=blended&query=%s&rank_token=0.879513604327528&include_reel=true"
client = requests.Session()
client.headers.update({'referer': "https://www.instagram.com/instagram/"})
def load_list_from_file(filename: str) -> list:
"""Load contents of <filename> line by line to python list"""
if os.path.isfile(filename):
try:
with open(filename, "r") as file_handle:
data = [line.strip() for line in file_handle.readlines()]
return data
except Exception as ex:
print(ex)
return []
# Whitelist load
def load_whitelist() -> list:
return load_list_from_file(WHITELIST_FILE)
# Query_keys load
def load_query_keys() -> list:
return load_list_from_file(QUERY_KEYS_FILE)
if __name__ == "__main__":
query_keys = load_query_keys()
#search using query
for entry in query_keys:
# receive the raw response from API
result = client.get(base_url_search % entry).text
# Convert the result to a dictionary
result_json = json.loads(result)
# Reading of whitelist.txt that contains que whitelist user
whitelist_accounts = load_whitelist()
# Comparing the result with whitelist and verified users and returning results
for user in result_json["users"]:
if not user["user"]["is_verified"] and user["user"]["username"] not in whitelist_accounts:
print(base_url % user["user"]["username"])
| import os
import sys
import json
import requests
# Objects created
WHITELIST_FILE = "whitelist.txt"
QUERY_KEYS_FILE = "keywords.txt"
base_url = "https://www.instagram.com/%s"
base_url_search = "https://www.instagram.com/web/search/topsearch/?context=blended&query=%s&rank_token=0.879513604327528&include_reel=true"
client = requests.Session()
client.headers.update({'referer': "https://www.instagram.com/instagram/"})
def load_list_from_file(filename: str) -> list:
"""Load contents of <filename> line by line to python list"""
if os.path.isfile(filename):
try:
with open(filename, "r") as file_handle:
data = [line.strip() for line in file_handle.readlines()]
return data
except Exception as ex:
print(ex)
return []
# Whitelist load
def load_whitelist() -> list:
return load_list_from_file(WHITELIST_FILE)
# Query_keys load
def load_query_keys() -> list:
return load_list_from_file(QUERY_KEYS_FILE)
if __name__ == "__main__":
query_keys = load_query_keys()
#search using query
for entry in query_keys:
# receive the raw response from API
result = client.get(base_url_search % entry).text
# Convert the result to a dictionary
result_json = json.loads(result)
# Reading of whitelist.txt that contains que whitelist user
whitelist_accounts = load_whitelist()
# Comparing the result with whitelist and verified users and returning results
for user in result_json["users"]:
if not user["user"]["is_verified"] and user["user"]["username"] not in whitelist_accounts:
print(base_url % user["user"]["username"]) | en | 0.8376 | # Objects created Load contents of <filename> line by line to python list # Whitelist load # Query_keys load #search using query # receive the raw response from API # Convert the result to a dictionary # Reading of whitelist.txt that contains que whitelist user # Comparing the result with whitelist and verified users and returning results | 3.252984 | 3 |
prisonarchitect/lexer/tokens.py | kylestev/prisonarchitect | 0 | 6616401 | <reponame>kylestev/prisonarchitect<gh_stars>0
from collections import namedtuple
Token = namedtuple('Token', ['value', 'type'])
float_re = (r'^\d+\.\d+$', 'V_FLOAT') # before int
int_re = (r'^\d+$', 'V_INT')
bool_re = (r'^(true|false)$', 'V_BOOL')
section_begin_re = (r'^BEGIN$', 'SEC_START')
section_end_re = (r'^END$', 'SEC_END')
name_mixin = r'([A-Za-z0-9]+(?:[A-Za-z0-9_]*)*)'
name_re = (r'^{0}$'.format(name_mixin), 'T_NAME')
obj_prop_re = (r'^{0}((?:\.[a-zA-Z\d_]*)+)$'.format(name_mixin), 'T_OBJ_PROP')
quote_re = (r'^"$', 'T_QUOTE')
atom_re = (r'^.+$', 'V_ATOM') # last, catch-all
parse_order = [float_re, int_re, bool_re,
section_begin_re, section_end_re,
name_re, obj_prop_re, quote_re,
atom_re]
| from collections import namedtuple
Token = namedtuple('Token', ['value', 'type'])
float_re = (r'^\d+\.\d+$', 'V_FLOAT') # before int
int_re = (r'^\d+$', 'V_INT')
bool_re = (r'^(true|false)$', 'V_BOOL')
section_begin_re = (r'^BEGIN$', 'SEC_START')
section_end_re = (r'^END$', 'SEC_END')
name_mixin = r'([A-Za-z0-9]+(?:[A-Za-z0-9_]*)*)'
name_re = (r'^{0}$'.format(name_mixin), 'T_NAME')
obj_prop_re = (r'^{0}((?:\.[a-zA-Z\d_]*)+)$'.format(name_mixin), 'T_OBJ_PROP')
quote_re = (r'^"$', 'T_QUOTE')
atom_re = (r'^.+$', 'V_ATOM') # last, catch-all
parse_order = [float_re, int_re, bool_re,
section_begin_re, section_end_re,
name_re, obj_prop_re, quote_re,
atom_re] | en | 0.583012 | # before int # last, catch-all | 2.399977 | 2 |
host/sign_lfs.py | adamjedrzejewski/ToddLinux | 1 | 6616402 | <reponame>adamjedrzejewski/ToddLinux<gh_stars>1-10
#!/usr/bin/env python3
import os
from datetime import datetime
import sys
from argparse import ArgumentParser
SIGN_FILE = "lfs_sign.lock"
def main() -> int:
parser = ArgumentParser(description="Sign LFS Chroot Environment")
parser.add_argument('path', help='path to lfs chroot environment', type=str)
parser.add_argument("-f", "--force", help="Add sign file even when folder is not empty", action="store_true")
args = parser.parse_args()
force = args.force
lfs_dir = os.path.abspath(args.path)
os.chdir(lfs_dir)
if len(os.listdir(".")) != 0:
if force:
print("Warning: adding sign file to non-empty directory")
else:
print("lfs path is not empty, use `--force` to overwrite")
return 1
with open(SIGN_FILE, "w") as file:
file.write(f"ToddLinux Chroot Environment created on {datetime.now()}")
os.chmod(SIGN_FILE, 0o444)
print(f"added sign file to '{lfs_dir}'")
return 0
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python3
import os
from datetime import datetime
import sys
from argparse import ArgumentParser
SIGN_FILE = "lfs_sign.lock"
def main() -> int:
parser = ArgumentParser(description="Sign LFS Chroot Environment")
parser.add_argument('path', help='path to lfs chroot environment', type=str)
parser.add_argument("-f", "--force", help="Add sign file even when folder is not empty", action="store_true")
args = parser.parse_args()
force = args.force
lfs_dir = os.path.abspath(args.path)
os.chdir(lfs_dir)
if len(os.listdir(".")) != 0:
if force:
print("Warning: adding sign file to non-empty directory")
else:
print("lfs path is not empty, use `--force` to overwrite")
return 1
with open(SIGN_FILE, "w") as file:
file.write(f"ToddLinux Chroot Environment created on {datetime.now()}")
os.chmod(SIGN_FILE, 0o444)
print(f"added sign file to '{lfs_dir}'")
return 0
if __name__ == "__main__":
sys.exit(main()) | fr | 0.221828 | #!/usr/bin/env python3 | 3.075273 | 3 |
decrypt.py | Reagan1947/cryptoProject | 0 | 6616403 | <filename>decrypt.py
from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, GT, pair
from ttt import *
# type annotations
pk_t = {'g': G1, 'g2': G2, 'h': G1, 'f': G1, 'e_gg_alpha': GT}
mk_t = {'beta': ZR, 'g2_alpha': G2}
sk_t = {'D': G2, 'Dj': G2, 'Djp': G1, 'S': str}
ct_t = {'C_tilde': GT, 'C': G1, 'Cy': G1, 'Cyp': G2}
debug = False
class CPabe_BSW07(ABEnc):
def __init__(self, groupObj):
ABEnc.__init__(self)
global util, group
util = SecretUtil(groupObj, verbose=False)
group = groupObj
@Input(pk_t, sk_t, ct_t)
@Output(GT)
def decrypt(self, pk, sk, ct):
policy = util.createPolicy(ct['policy'])
pruned_list = util.prune(policy, sk['S'])
if pruned_list == False:
return False
z = util.getCoefficients(policy)
A = 1
for i in pruned_list:
j = i.getAttributeAndIndex()
k = i.getAttribute()
A *= (pair(ct['Cy'][j], sk['Dj'][k]) / pair(sk['Djp'][k], ct['Cyp'][j])) ** z[j]
return ct['C_tilde'] / (pair(ct['C'], sk['D']) / A) | <filename>decrypt.py
from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, GT, pair
from ttt import *
# type annotations
pk_t = {'g': G1, 'g2': G2, 'h': G1, 'f': G1, 'e_gg_alpha': GT}
mk_t = {'beta': ZR, 'g2_alpha': G2}
sk_t = {'D': G2, 'Dj': G2, 'Djp': G1, 'S': str}
ct_t = {'C_tilde': GT, 'C': G1, 'Cy': G1, 'Cyp': G2}
debug = False
class CPabe_BSW07(ABEnc):
def __init__(self, groupObj):
ABEnc.__init__(self)
global util, group
util = SecretUtil(groupObj, verbose=False)
group = groupObj
@Input(pk_t, sk_t, ct_t)
@Output(GT)
def decrypt(self, pk, sk, ct):
policy = util.createPolicy(ct['policy'])
pruned_list = util.prune(policy, sk['S'])
if pruned_list == False:
return False
z = util.getCoefficients(policy)
A = 1
for i in pruned_list:
j = i.getAttributeAndIndex()
k = i.getAttribute()
A *= (pair(ct['Cy'][j], sk['Dj'][k]) / pair(sk['Djp'][k], ct['Cyp'][j])) ** z[j]
return ct['C_tilde'] / (pair(ct['C'], sk['D']) / A) | en | 0.517708 | # type annotations | 2.073065 | 2 |
deploy_tix/bugzilla_rest_client.py | rpappalax/deploy-tix | 0 | 6616404 | <reponame>rpappalax/deploy-tix<gh_stars>0
"""This module enables CRUD operations with Bugzilla 5.1 REST API
.. _Bugzilla REST API Docs:
https://wiki.mozilla.org/Bugzilla:REST_API
http://bugzilla.readthedocs.org/en/latest/api/index.html
"""
import os
import sys
import json
import requests
from output_helper import OutputHelper
PRODUCT_PROD = 'Cloud Services'
PRODUCT_DEV = 'Mozilla Services'
COMPONENT_PROD = 'Operations: Deployment Requests'
COMPONENT_DEV = 'General'
HEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}
URL_BUGZILLA_PROD = 'https://bugzilla.mozilla.org'
if os.environ['BUGZILLA_USERNAME']:
BUGZILLA_USERNAME = os.environ['BUGZILLA_USERNAME']
if os.environ['BUGZILLA_PASSWORD']:
BUGZILLA_PASSWORD = os.environ['BUGZILLA_PASSWORD']
URL_BUGZILLA_DEV = 'https://bugzilla-dev.allizom.org'
if os.environ['BUGZILLA_DEV_USERNAME']:
BUGZILLA_DEV_USERNAME = os.environ['BUGZILLA_DEV_USERNAME']
if os.environ['BUGZILLA_DEV_PASSWORD']:
BUGZILLA_DEV_PASSWORD = os.environ['BUGZILLA_DEV_PASSWORD']
class InvalidCredentials(Exception):
pass
class BugzillaRESTClient(object):
""""Used for CRUD operations against Bugzilla REST API"""
def __init__(self, bugzilla_mozilla):
self.output = OutputHelper()
self.bugzilla_mozilla = bugzilla_mozilla
# bugzilla-dev doesn't mirror the same components,
# so we'll populate these conditionally
if bugzilla_mozilla:
self.username = BUGZILLA_USERNAME
self.password = <PASSWORD>
self.bugzilla_product = PRODUCT_PROD
self.bugzilla_component = COMPONENT_PROD
self.host = URL_BUGZILLA_PROD
else:
self.username = BUGZILLA_DEV_USERNAME
self.password = <PASSWORD>
self.bugzilla_product = PRODUCT_DEV
self.bugzilla_component = COMPONENT_DEV
self.host = URL_BUGZILLA_DEV
self.token = self.get_token(self.host)
def _get_json_create(
self, release_num, application,
environment, status, description, cc_mail=''
):
"""Returns bugzilla JSON string to POST to REST API.
example:
short_desc = '[deployment] {0} {1} - {2}'.format(
application, release_num, environment)
"""
if environment == 'prod':
environment = 'PRODUCTION'
short_desc = 'Please deploy {1} {0} to {2}'.format(
release_num,
application,
environment.upper()
)
data = {
'product': self.bugzilla_product,
'component': self.bugzilla_component,
'version': 'unspecified',
'op_sys': 'All',
'rep_platform': 'All',
'short_desc': short_desc,
'description': description,
'status': status
}
if cc_mail:
data.update(
{
'cc': [cc_mail]
}
)
return data
def _get_json_update(self, comment, bug_id):
"""Returns bugzilla JSON as string to PUT to REST API."""
data = {
'ids': [bug_id],
'comment': comment
}
return data
def _get_json_search(self, summary):
"""Returns bugzilla JSON as string to GET from REST API."""
data = {
'summary': summary,
'product': self.bugzilla_product,
'component': self.bugzilla_component
}
return data
def get_token(self, host):
"""Fetch and return bugzilla token as string."""
params = {
'login': self.username,
'password': <PASSWORD>.password
}
url = '{0}/rest/login'.format(host)
req = requests.get(url, params=params)
decoded = json.loads(req.text)
try:
if 'token' not in decoded:
raise InvalidCredentials
except InvalidCredentials:
err_header = self.output.get_header('BUGZILLA ERROR')
err_msg = '{0}\n{1}\n{2}\n\n'.format(
err_header,
decoded['message'],
decoded['documentation']
)
sys.exit(err_msg)
else:
return decoded['token']
def bug_create(
self, release_num, application, environment,
status, description, cc_mail=''
):
"""Create bugzilla bug with description
Note:
On bugzilla-dev - available status:
NEW, UNCONFIRMED, ASSIGNED, RESOLVED
On bugzilla - available status:
NEW, UNCONFIRMED, RESOLVED, REOPENED, VERIFIED
FIXED, INVALID, WONTFIX, DUPLICATE, WORKSFORME, INCOMPLETE
Returns:
json string to POST to REST API
"""
self.output.log('Creating new bug via bugzilla REST API...', True)
url = '{0}/rest/bug?token={1}'.format(self.host, self.token)
data = self._get_json_create(
release_num, application,
environment, status, description, cc_mail
)
self.output.log(data)
req = requests.post(url, data=json.dumps(data), headers=HEADERS)
try:
new_bug_id = req.json()['id']
except KeyError:
print('\nERROR: {0}!\n'.format(req.text))
exit(1)
self.output.log('\nNew bug ID: {0}\nDONE!\n\n'.format(new_bug_id))
return new_bug_id
def bug_update(self, application, comment, bug_id=''):
"""Update bugzilla bug with new comment
Returns:
json string to POST to REST API
"""
if not bug_id:
bug_id = self.bug_search(application)
self.output.log(
'Updating bug #{0} via bugzilla REST API...'.format(bug_id), True)
url = '{0}/rest/bug/{1}/comment?token={2}'.format(
self.host, bug_id, self.token)
data = self._get_json_update(comment, bug_id)
self.output.log(data)
req = requests.post(url, data=json.dumps(data), headers=HEADERS)
new_comment_id = req.json()['id']
if new_comment_id:
self.output.log(
'\nComment created! - new comment ID: {0}\n \
DONE!\n\n'.format(new_comment_id))
else:
self.output.log(
'\nERROR: Comment not created!\n\n'.format(new_comment_id))
return new_comment_id
def _bug_latest_matching(self, json_bugs_matching):
"""Returns bug id from bug with latest time stamp from
json_search_results
Returns:
bug id as string
"""
self.output.log('Retrieve all matching bugs', True)
bugs_unsorted = []
bugs = json_bugs_matching["bugs"]
for i in range(len(bugs)):
id = bugs[i]["id"]
creation_time = bugs[i]["creation_time"]
bugs_unsorted.append([id, creation_time])
self.output.log(bugs_unsorted)
self.output.log('Sort bugs by creation_time', True)
bugs_sorted = sorted(
bugs_unsorted, key=lambda bugs_sorted: bugs_sorted[1])
self.output.log(bugs_unsorted)
self.output.log('DONE!')
self.output.log('Get last bug from sorted list', True)
bug_latest = bugs_sorted[-1]
# return id only
return bug_latest[0]
def bug_search(self, summary):
"""Search for bugzilla bugs matching summary string
Returns:
json string to GET from REST API
"""
self.output.log('Searching bugs with summary: {0} \n \
via bugzilla REST API...'.format(summary), True)
url = '{0}/rest/bug'.format(self.host)
print('----------')
data = self._get_json_search(summary)
self.output.log(data)
req = requests.get(url, params=data)
return self._bug_latest_matching(req.json())
def main():
# Example: bug create
bugzilla_mozilla = False
bz = BugzillaRESTClient(bugzilla_mozilla)
bug_info = {
'release_num': '0.18.0',
'application': 'Loop-Client',
'environment': 'STAGE',
'status': 'NEW',
'description': 'this release has been deployed to stage.'
}
print(bz.bug_create(**bug_info))
# Example: bug search
search_info = {
'summary': 'Loop-Client'
}
print(bz.bug_search(**search_info))
if __name__ == '__main__':
main()
| """This module enables CRUD operations with Bugzilla 5.1 REST API
.. _Bugzilla REST API Docs:
https://wiki.mozilla.org/Bugzilla:REST_API
http://bugzilla.readthedocs.org/en/latest/api/index.html
"""
import os
import sys
import json
import requests
from output_helper import OutputHelper
PRODUCT_PROD = 'Cloud Services'
PRODUCT_DEV = 'Mozilla Services'
COMPONENT_PROD = 'Operations: Deployment Requests'
COMPONENT_DEV = 'General'
HEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}
URL_BUGZILLA_PROD = 'https://bugzilla.mozilla.org'
if os.environ['BUGZILLA_USERNAME']:
BUGZILLA_USERNAME = os.environ['BUGZILLA_USERNAME']
if os.environ['BUGZILLA_PASSWORD']:
BUGZILLA_PASSWORD = os.environ['BUGZILLA_PASSWORD']
URL_BUGZILLA_DEV = 'https://bugzilla-dev.allizom.org'
if os.environ['BUGZILLA_DEV_USERNAME']:
BUGZILLA_DEV_USERNAME = os.environ['BUGZILLA_DEV_USERNAME']
if os.environ['BUGZILLA_DEV_PASSWORD']:
BUGZILLA_DEV_PASSWORD = os.environ['BUGZILLA_DEV_PASSWORD']
class InvalidCredentials(Exception):
pass
class BugzillaRESTClient(object):
""""Used for CRUD operations against Bugzilla REST API"""
def __init__(self, bugzilla_mozilla):
self.output = OutputHelper()
self.bugzilla_mozilla = bugzilla_mozilla
# bugzilla-dev doesn't mirror the same components,
# so we'll populate these conditionally
if bugzilla_mozilla:
self.username = BUGZILLA_USERNAME
self.password = <PASSWORD>
self.bugzilla_product = PRODUCT_PROD
self.bugzilla_component = COMPONENT_PROD
self.host = URL_BUGZILLA_PROD
else:
self.username = BUGZILLA_DEV_USERNAME
self.password = <PASSWORD>
self.bugzilla_product = PRODUCT_DEV
self.bugzilla_component = COMPONENT_DEV
self.host = URL_BUGZILLA_DEV
self.token = self.get_token(self.host)
def _get_json_create(
self, release_num, application,
environment, status, description, cc_mail=''
):
"""Returns bugzilla JSON string to POST to REST API.
example:
short_desc = '[deployment] {0} {1} - {2}'.format(
application, release_num, environment)
"""
if environment == 'prod':
environment = 'PRODUCTION'
short_desc = 'Please deploy {1} {0} to {2}'.format(
release_num,
application,
environment.upper()
)
data = {
'product': self.bugzilla_product,
'component': self.bugzilla_component,
'version': 'unspecified',
'op_sys': 'All',
'rep_platform': 'All',
'short_desc': short_desc,
'description': description,
'status': status
}
if cc_mail:
data.update(
{
'cc': [cc_mail]
}
)
return data
def _get_json_update(self, comment, bug_id):
"""Returns bugzilla JSON as string to PUT to REST API."""
data = {
'ids': [bug_id],
'comment': comment
}
return data
def _get_json_search(self, summary):
"""Returns bugzilla JSON as string to GET from REST API."""
data = {
'summary': summary,
'product': self.bugzilla_product,
'component': self.bugzilla_component
}
return data
def get_token(self, host):
"""Fetch and return bugzilla token as string."""
params = {
'login': self.username,
'password': <PASSWORD>.password
}
url = '{0}/rest/login'.format(host)
req = requests.get(url, params=params)
decoded = json.loads(req.text)
try:
if 'token' not in decoded:
raise InvalidCredentials
except InvalidCredentials:
err_header = self.output.get_header('BUGZILLA ERROR')
err_msg = '{0}\n{1}\n{2}\n\n'.format(
err_header,
decoded['message'],
decoded['documentation']
)
sys.exit(err_msg)
else:
return decoded['token']
def bug_create(
self, release_num, application, environment,
status, description, cc_mail=''
):
"""Create bugzilla bug with description
Note:
On bugzilla-dev - available status:
NEW, UNCONFIRMED, ASSIGNED, RESOLVED
On bugzilla - available status:
NEW, UNCONFIRMED, RESOLVED, REOPENED, VERIFIED
FIXED, INVALID, WONTFIX, DUPLICATE, WORKSFORME, INCOMPLETE
Returns:
json string to POST to REST API
"""
self.output.log('Creating new bug via bugzilla REST API...', True)
url = '{0}/rest/bug?token={1}'.format(self.host, self.token)
data = self._get_json_create(
release_num, application,
environment, status, description, cc_mail
)
self.output.log(data)
req = requests.post(url, data=json.dumps(data), headers=HEADERS)
try:
new_bug_id = req.json()['id']
except KeyError:
print('\nERROR: {0}!\n'.format(req.text))
exit(1)
self.output.log('\nNew bug ID: {0}\nDONE!\n\n'.format(new_bug_id))
return new_bug_id
def bug_update(self, application, comment, bug_id=''):
"""Update bugzilla bug with new comment
Returns:
json string to POST to REST API
"""
if not bug_id:
bug_id = self.bug_search(application)
self.output.log(
'Updating bug #{0} via bugzilla REST API...'.format(bug_id), True)
url = '{0}/rest/bug/{1}/comment?token={2}'.format(
self.host, bug_id, self.token)
data = self._get_json_update(comment, bug_id)
self.output.log(data)
req = requests.post(url, data=json.dumps(data), headers=HEADERS)
new_comment_id = req.json()['id']
if new_comment_id:
self.output.log(
'\nComment created! - new comment ID: {0}\n \
DONE!\n\n'.format(new_comment_id))
else:
self.output.log(
'\nERROR: Comment not created!\n\n'.format(new_comment_id))
return new_comment_id
def _bug_latest_matching(self, json_bugs_matching):
"""Returns bug id from bug with latest time stamp from
json_search_results
Returns:
bug id as string
"""
self.output.log('Retrieve all matching bugs', True)
bugs_unsorted = []
bugs = json_bugs_matching["bugs"]
for i in range(len(bugs)):
id = bugs[i]["id"]
creation_time = bugs[i]["creation_time"]
bugs_unsorted.append([id, creation_time])
self.output.log(bugs_unsorted)
self.output.log('Sort bugs by creation_time', True)
bugs_sorted = sorted(
bugs_unsorted, key=lambda bugs_sorted: bugs_sorted[1])
self.output.log(bugs_unsorted)
self.output.log('DONE!')
self.output.log('Get last bug from sorted list', True)
bug_latest = bugs_sorted[-1]
# return id only
return bug_latest[0]
def bug_search(self, summary):
"""Search for bugzilla bugs matching summary string
Returns:
json string to GET from REST API
"""
self.output.log('Searching bugs with summary: {0} \n \
via bugzilla REST API...'.format(summary), True)
url = '{0}/rest/bug'.format(self.host)
print('----------')
data = self._get_json_search(summary)
self.output.log(data)
req = requests.get(url, params=data)
return self._bug_latest_matching(req.json())
def main():
# Example: bug create
bugzilla_mozilla = False
bz = BugzillaRESTClient(bugzilla_mozilla)
bug_info = {
'release_num': '0.18.0',
'application': 'Loop-Client',
'environment': 'STAGE',
'status': 'NEW',
'description': 'this release has been deployed to stage.'
}
print(bz.bug_create(**bug_info))
# Example: bug search
search_info = {
'summary': 'Loop-Client'
}
print(bz.bug_search(**search_info))
if __name__ == '__main__':
main() | en | 0.517444 | This module enables CRUD operations with Bugzilla 5.1 REST API .. _Bugzilla REST API Docs: https://wiki.mozilla.org/Bugzilla:REST_API http://bugzilla.readthedocs.org/en/latest/api/index.html "Used for CRUD operations against Bugzilla REST API # bugzilla-dev doesn't mirror the same components, # so we'll populate these conditionally Returns bugzilla JSON string to POST to REST API. example: short_desc = '[deployment] {0} {1} - {2}'.format( application, release_num, environment) Returns bugzilla JSON as string to PUT to REST API. Returns bugzilla JSON as string to GET from REST API. Fetch and return bugzilla token as string. Create bugzilla bug with description Note: On bugzilla-dev - available status: NEW, UNCONFIRMED, ASSIGNED, RESOLVED On bugzilla - available status: NEW, UNCONFIRMED, RESOLVED, REOPENED, VERIFIED FIXED, INVALID, WONTFIX, DUPLICATE, WORKSFORME, INCOMPLETE Returns: json string to POST to REST API Update bugzilla bug with new comment Returns: json string to POST to REST API #{0} via bugzilla REST API...'.format(bug_id), True) Returns bug id from bug with latest time stamp from json_search_results Returns: bug id as string # return id only Search for bugzilla bugs matching summary string Returns: json string to GET from REST API # Example: bug create # Example: bug search | 2.450239 | 2 |
setup.py | DercioBobo/escopil | 0 | 6616405 | <gh_stars>0
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in escopil/__init__.py
from escopil import __version__ as version
setup(
name="escopil",
version=version,
description="Escopil Apps",
author="Duys",
author_email="<EMAIL>",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in escopil/__init__.py
from escopil import __version__ as version
setup(
name="escopil",
version=version,
description="Escopil Apps",
author="Duys",
author_email="<EMAIL>",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
) | en | 0.341552 | # get version from __version__ variable in escopil/__init__.py | 1.555483 | 2 |
ephesus/allenestplot.py | tdaylan/tesstarg | 1 | 6616406 | import allesfitter
import os
pathalle = os.getcwd() + '/'
allesfitter.show_initial_guess(pathalle)
allesfitter.ns_output(pathalle)
| import allesfitter
import os
pathalle = os.getcwd() + '/'
allesfitter.show_initial_guess(pathalle)
allesfitter.ns_output(pathalle)
| none | 1 | 1.410217 | 1 | |
sciutils/plot.py | tillahoffmann/sciutils | 0 | 6616407 | import collections
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy import special
def evaluate_pcolormesh_edges(x, scale='linear'):
"""
Evaluate the `n + 1` edges of cells for a `pcolormesh` visualisation for `n` cell centroids.
Parameters
----------
x : np.ndarray
Centroids of the pcolormesh cells.
scale : str
Find the arithmetic midpoints if `linear` and the geometric midpoints if `log`.
Returns
-------
edges : np.ndarray
Edges of pcolormesh cells.
"""
if scale == 'log':
forward = np.log
backward = np.exp
elif scale == 'linear':
forward = backward = lambda x: x
elif scale == 'logit':
forward = special.logit
backward = special.expit
elif isinstance(scale, (tuple, list)):
forward, backward = scale
else:
raise ValueError(scale)
x = forward(x)
# Find the (n - 1) midpoints
midpoints = (x[1:] + x[:-1]) / 2
# Find the endpoints
left = 2 * x[0] - midpoints[0]
right = 2 * x[-1] - midpoints[-1]
# Construct the edges
edges = np.concatenate([[left], midpoints, [right]])
return backward(edges)
# Define all attributes that should be broadcast
_GEOMETRY_ELEMENTWISE_PROPERTIES = {'linestyle', 'facecolor', 'edgecolor', 'linewidth', 'array'}
def plot_geometry(geometries, aspect='equal', autoscale=True, scale=1, ax=None, **kwargs):
"""
Plot a shapely geometry using a polygon collection.
.. note::
This function does not plot holes in polygons.
Parameters
----------
geometries :
Geometry to plot or sequence thereof.
aspect : str or float, optional
Aspect ratio of the plot.
autoscale : bool, optional
Whether to autoscale the plot.
ax : optional
Axes to use for plotting.
**kwargs : dict
Keyword arguments passed to `matplotlib.collections.PolyCollection`.
Returns
-------
collection : matplotlib.collections.PolyCollection
Collection of polygons.
"""
import shapely.geometry
ax = ax or plt.gca()
# If a single geometry is passed, transform it to a list of geometries with one element
if not isinstance(geometries, collections.abc.Iterable):
geometries = [geometries]
# Identify which properties have been provided elementwise
elementwise_properties = _GEOMETRY_ELEMENTWISE_PROPERTIES & \
{key for key, value in kwargs.items() if isinstance(value, collections.abc.Iterable)
and not isinstance(value, str)}
# Build up all the attributes and vertices
vertices = []
collection_kwargs = {}
for i, geometry in enumerate(geometries):
if isinstance(geometry, shapely.geometry.MultiPolygon):
sub_geometries = geometry.geoms
elif isinstance(geometry, shapely.geometry.Polygon):
sub_geometries = [geometry]
else:
raise ValueError(geometry)
for geometry in sub_geometries:
coords = np.asarray(list(geometry.exterior.coords)) * scale
vertices.append(coords)
# Deal with elementwise attributes
for key in elementwise_properties:
value = kwargs[key]
collection_kwargs.setdefault(key, []).append(value[i % len(value)])
array = collection_kwargs.get('array')
if array is not None:
collection_kwargs['array'] = np.asarray(array)
# Copy over remaining kwargs
kwargs.update(collection_kwargs)
polys = mpl.collections.PolyCollection(vertices, **kwargs)
ax.add_collection(polys)
if aspect:
ax.set_aspect(aspect)
if autoscale:
ax.autoscale_view()
return polys
def alpha_cmap(color, name=''):
"""
Create a monochrome colormap that maps scalars to varying transparencies.
Parameters
----------
color : str, int, or tuple
Base color to use for the colormap.
name : str
Name of the colormap.
**kwargs : dict
Keyword arguments passed to :meth:`mpl.colors.LinearSegmentedColormap.from_list`.
Returns
-------
cmap : mpl.colors.Colormap
Colormap encoding scalars as transparencies.
"""
if isinstance(color, int):
color = f'C{color}'
return mpl.colors.LinearSegmentedColormap.from_list(name, [
mpl.colors.to_rgba(color, alpha=0.0),
mpl.colors.to_rgba(color, alpha=1.0),
])
| import collections
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy import special
def evaluate_pcolormesh_edges(x, scale='linear'):
"""
Evaluate the `n + 1` edges of cells for a `pcolormesh` visualisation for `n` cell centroids.
Parameters
----------
x : np.ndarray
Centroids of the pcolormesh cells.
scale : str
Find the arithmetic midpoints if `linear` and the geometric midpoints if `log`.
Returns
-------
edges : np.ndarray
Edges of pcolormesh cells.
"""
if scale == 'log':
forward = np.log
backward = np.exp
elif scale == 'linear':
forward = backward = lambda x: x
elif scale == 'logit':
forward = special.logit
backward = special.expit
elif isinstance(scale, (tuple, list)):
forward, backward = scale
else:
raise ValueError(scale)
x = forward(x)
# Find the (n - 1) midpoints
midpoints = (x[1:] + x[:-1]) / 2
# Find the endpoints
left = 2 * x[0] - midpoints[0]
right = 2 * x[-1] - midpoints[-1]
# Construct the edges
edges = np.concatenate([[left], midpoints, [right]])
return backward(edges)
# Define all attributes that should be broadcast
_GEOMETRY_ELEMENTWISE_PROPERTIES = {'linestyle', 'facecolor', 'edgecolor', 'linewidth', 'array'}
def plot_geometry(geometries, aspect='equal', autoscale=True, scale=1, ax=None, **kwargs):
"""
Plot a shapely geometry using a polygon collection.
.. note::
This function does not plot holes in polygons.
Parameters
----------
geometries :
Geometry to plot or sequence thereof.
aspect : str or float, optional
Aspect ratio of the plot.
autoscale : bool, optional
Whether to autoscale the plot.
ax : optional
Axes to use for plotting.
**kwargs : dict
Keyword arguments passed to `matplotlib.collections.PolyCollection`.
Returns
-------
collection : matplotlib.collections.PolyCollection
Collection of polygons.
"""
import shapely.geometry
ax = ax or plt.gca()
# If a single geometry is passed, transform it to a list of geometries with one element
if not isinstance(geometries, collections.abc.Iterable):
geometries = [geometries]
# Identify which properties have been provided elementwise
elementwise_properties = _GEOMETRY_ELEMENTWISE_PROPERTIES & \
{key for key, value in kwargs.items() if isinstance(value, collections.abc.Iterable)
and not isinstance(value, str)}
# Build up all the attributes and vertices
vertices = []
collection_kwargs = {}
for i, geometry in enumerate(geometries):
if isinstance(geometry, shapely.geometry.MultiPolygon):
sub_geometries = geometry.geoms
elif isinstance(geometry, shapely.geometry.Polygon):
sub_geometries = [geometry]
else:
raise ValueError(geometry)
for geometry in sub_geometries:
coords = np.asarray(list(geometry.exterior.coords)) * scale
vertices.append(coords)
# Deal with elementwise attributes
for key in elementwise_properties:
value = kwargs[key]
collection_kwargs.setdefault(key, []).append(value[i % len(value)])
array = collection_kwargs.get('array')
if array is not None:
collection_kwargs['array'] = np.asarray(array)
# Copy over remaining kwargs
kwargs.update(collection_kwargs)
polys = mpl.collections.PolyCollection(vertices, **kwargs)
ax.add_collection(polys)
if aspect:
ax.set_aspect(aspect)
if autoscale:
ax.autoscale_view()
return polys
def alpha_cmap(color, name=''):
"""
Create a monochrome colormap that maps scalars to varying transparencies.
Parameters
----------
color : str, int, or tuple
Base color to use for the colormap.
name : str
Name of the colormap.
**kwargs : dict
Keyword arguments passed to :meth:`mpl.colors.LinearSegmentedColormap.from_list`.
Returns
-------
cmap : mpl.colors.Colormap
Colormap encoding scalars as transparencies.
"""
if isinstance(color, int):
color = f'C{color}'
return mpl.colors.LinearSegmentedColormap.from_list(name, [
mpl.colors.to_rgba(color, alpha=0.0),
mpl.colors.to_rgba(color, alpha=1.0),
])
| en | 0.546824 | Evaluate the `n + 1` edges of cells for a `pcolormesh` visualisation for `n` cell centroids. Parameters ---------- x : np.ndarray Centroids of the pcolormesh cells. scale : str Find the arithmetic midpoints if `linear` and the geometric midpoints if `log`. Returns ------- edges : np.ndarray Edges of pcolormesh cells. # Find the (n - 1) midpoints # Find the endpoints # Construct the edges # Define all attributes that should be broadcast Plot a shapely geometry using a polygon collection. .. note:: This function does not plot holes in polygons. Parameters ---------- geometries : Geometry to plot or sequence thereof. aspect : str or float, optional Aspect ratio of the plot. autoscale : bool, optional Whether to autoscale the plot. ax : optional Axes to use for plotting. **kwargs : dict Keyword arguments passed to `matplotlib.collections.PolyCollection`. Returns ------- collection : matplotlib.collections.PolyCollection Collection of polygons. # If a single geometry is passed, transform it to a list of geometries with one element # Identify which properties have been provided elementwise # Build up all the attributes and vertices # Deal with elementwise attributes # Copy over remaining kwargs Create a monochrome colormap that maps scalars to varying transparencies. Parameters ---------- color : str, int, or tuple Base color to use for the colormap. name : str Name of the colormap. **kwargs : dict Keyword arguments passed to :meth:`mpl.colors.LinearSegmentedColormap.from_list`. Returns ------- cmap : mpl.colors.Colormap Colormap encoding scalars as transparencies. | 3.177671 | 3 |
pssgp/experiments/co2/mcmc.py | arplaboratory/python_gp_kalman_hyperparam | 10 | 6616408 | # Regression experiments on sinusoidal signals.
# Corresponds to the *** of paper.
import os
import gpflow as gpf
import numpy as np
import tensorflow as tf
import tqdm
from absl import app, flags
from gpflow import set_trainable
from gpflow.kernels import SquaredExponential
from gpflow.models import GPModel
from tensorflow_probability.python.distributions import Normal
from pssgp.experiments.co2.common import get_data, FLAGS
from pssgp.experiments.common import ModelEnum, get_model, \
run_one_mcmc, MCMC
from pssgp.kernels import Matern32, Periodic
flags.DEFINE_integer('np_seed', 42, "data model seed")
flags.DEFINE_integer('tf_seed', 31415, "mcmc model seed")
flags.DEFINE_integer('n_runs', 10, "size of the logspace for n training samples")
flags.DEFINE_string('mcmc', MCMC.HMC.value, "MCMC method enum")
flags.DEFINE_integer('n_samples', 1000, "Number of samples required")
flags.DEFINE_integer('n_burnin', 100, "Number of burnin samples")
flags.DEFINE_float('step_size', 0.01, "Step size for the gradient based chain")
flags.DEFINE_float('n_leapfrogs', 10, "Num leapfrogs for HMC")
flags.DEFINE_boolean('plot', False, "Plot the result")
flags.DEFINE_boolean('run', True, "Run the result or load the data")
def set_gp_priors(gp_model: GPModel):
if FLAGS.model == ModelEnum.GP.value:
set_trainable(gp_model.likelihood.variance, False)
else:
set_trainable(gp_model.noise_variance, False)
def get_covariance_function():
gp_dtype = gpf.config.default_float()
# Matern 32
m32_cov = Matern32(variance=1, lengthscales=100.)
m32_cov.variance.prior = Normal(gp_dtype(1.), gp_dtype(0.1))
m32_cov.lengthscales.prior = Normal(gp_dtype(100.), gp_dtype(50.))
# Periodic base kernel
periodic_base_cov = SquaredExponential(variance=5., lengthscales=1.)
set_trainable(periodic_base_cov.variance, False)
periodic_base_cov.lengthscales.prior = Normal(gp_dtype(5.), gp_dtype(1.))
# Periodic
periodic_cov = Periodic(periodic_base_cov, period=1., order=FLAGS.qp_order)
set_trainable(periodic_cov.period, False)
# Periodic damping
periodic_damping_cov = Matern32(variance=1e-1, lengthscales=50)
periodic_damping_cov.variance.prior = Normal(gp_dtype(1e-1), gp_dtype(1e-3))
periodic_damping_cov.lengthscales.prior = Normal(gp_dtype(50), gp_dtype(10.))
# Final covariance
co2_cov = periodic_cov * periodic_damping_cov + m32_cov
return co2_cov
def run():
gpf.config.set_default_float(getattr(np, FLAGS.dtype))
tf.random.set_seed(FLAGS.tf_seed)
f_times = os.path.join("results", f"mcmc-times-{FLAGS.model}-{FLAGS.mcmc}")
# TODO: we need a flag for this directory really.
f_posterior = os.path.join("results", f"mcmc-posterior-{FLAGS.model}-{FLAGS.mcmc}")
n_training_logspace = [3192]
if FLAGS.run:
cov_fun = get_covariance_function()
times = np.empty(len(n_training_logspace), dtype=float)
for i, n_training in tqdm.tqdm(enumerate(n_training_logspace), total=len(n_training_logspace)):
t, y = get_data(n_training)
gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun,
t.shape[0])
set_gp_priors(gp_model)
run_time, params_res = run_one_mcmc(n_training, gp_model)
times[i] = run_time
np.savez(f_posterior + f"-{n_training}", **params_res)
np.save(f_times, np.stack([n_training_logspace, times], axis=1))
def main(_):
device = tf.device(FLAGS.device)
with device:
run()
if __name__ == '__main__':
if not os.path.exists("results"):
os.makedirs('results')
app.run(main)
| # Regression experiments on sinusoidal signals.
# Corresponds to the *** of paper.
import os
import gpflow as gpf
import numpy as np
import tensorflow as tf
import tqdm
from absl import app, flags
from gpflow import set_trainable
from gpflow.kernels import SquaredExponential
from gpflow.models import GPModel
from tensorflow_probability.python.distributions import Normal
from pssgp.experiments.co2.common import get_data, FLAGS
from pssgp.experiments.common import ModelEnum, get_model, \
run_one_mcmc, MCMC
from pssgp.kernels import Matern32, Periodic
flags.DEFINE_integer('np_seed', 42, "data model seed")
flags.DEFINE_integer('tf_seed', 31415, "mcmc model seed")
flags.DEFINE_integer('n_runs', 10, "size of the logspace for n training samples")
flags.DEFINE_string('mcmc', MCMC.HMC.value, "MCMC method enum")
flags.DEFINE_integer('n_samples', 1000, "Number of samples required")
flags.DEFINE_integer('n_burnin', 100, "Number of burnin samples")
flags.DEFINE_float('step_size', 0.01, "Step size for the gradient based chain")
flags.DEFINE_float('n_leapfrogs', 10, "Num leapfrogs for HMC")
flags.DEFINE_boolean('plot', False, "Plot the result")
flags.DEFINE_boolean('run', True, "Run the result or load the data")
def set_gp_priors(gp_model: GPModel):
if FLAGS.model == ModelEnum.GP.value:
set_trainable(gp_model.likelihood.variance, False)
else:
set_trainable(gp_model.noise_variance, False)
def get_covariance_function():
gp_dtype = gpf.config.default_float()
# Matern 32
m32_cov = Matern32(variance=1, lengthscales=100.)
m32_cov.variance.prior = Normal(gp_dtype(1.), gp_dtype(0.1))
m32_cov.lengthscales.prior = Normal(gp_dtype(100.), gp_dtype(50.))
# Periodic base kernel
periodic_base_cov = SquaredExponential(variance=5., lengthscales=1.)
set_trainable(periodic_base_cov.variance, False)
periodic_base_cov.lengthscales.prior = Normal(gp_dtype(5.), gp_dtype(1.))
# Periodic
periodic_cov = Periodic(periodic_base_cov, period=1., order=FLAGS.qp_order)
set_trainable(periodic_cov.period, False)
# Periodic damping
periodic_damping_cov = Matern32(variance=1e-1, lengthscales=50)
periodic_damping_cov.variance.prior = Normal(gp_dtype(1e-1), gp_dtype(1e-3))
periodic_damping_cov.lengthscales.prior = Normal(gp_dtype(50), gp_dtype(10.))
# Final covariance
co2_cov = periodic_cov * periodic_damping_cov + m32_cov
return co2_cov
def run():
gpf.config.set_default_float(getattr(np, FLAGS.dtype))
tf.random.set_seed(FLAGS.tf_seed)
f_times = os.path.join("results", f"mcmc-times-{FLAGS.model}-{FLAGS.mcmc}")
# TODO: we need a flag for this directory really.
f_posterior = os.path.join("results", f"mcmc-posterior-{FLAGS.model}-{FLAGS.mcmc}")
n_training_logspace = [3192]
if FLAGS.run:
cov_fun = get_covariance_function()
times = np.empty(len(n_training_logspace), dtype=float)
for i, n_training in tqdm.tqdm(enumerate(n_training_logspace), total=len(n_training_logspace)):
t, y = get_data(n_training)
gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun,
t.shape[0])
set_gp_priors(gp_model)
run_time, params_res = run_one_mcmc(n_training, gp_model)
times[i] = run_time
np.savez(f_posterior + f"-{n_training}", **params_res)
np.save(f_times, np.stack([n_training_logspace, times], axis=1))
def main(_):
device = tf.device(FLAGS.device)
with device:
run()
if __name__ == '__main__':
if not os.path.exists("results"):
os.makedirs('results')
app.run(main)
| en | 0.656231 | # Regression experiments on sinusoidal signals. # Corresponds to the *** of paper. # Matern 32 # Periodic base kernel # Periodic # Periodic damping # Final covariance # TODO: we need a flag for this directory really. | 2.265391 | 2 |
app.py | jwilmot-dev/animals | 0 | 6616409 | from datetime import date
from flask import Flask, render_template, session, redirect, url_for, request
from flask_session.__init__ import Session
from tempfile import mkdtemp
import csv
import random
import json
import requests
import os.path
from datetime import timedelta
from helper import *
app = Flask(__name__)
app.secret_key = "#ICS499sp22andthecowjumpedoverthemoonD1dDl3dIDDlE"
app.permanent_session_lifetime = timedelta(minutes=20)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
@app.route('/login')
def start():
if "board" not in session:
session["board"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["board"].append(row_list)
row_list = []
if "score" not in session:
session["score"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["score"].append(row_list)
row_list = []
return render_template("login.html", game=session["board"], score=session["score"], status=status, in_play = in_play, initial = initial, end_game=end_game, custom_check = custom_check, num_tries=int(num_tries), word_length=word_length, language=language, icon_one=icon_one, icon_two=icon_two, icon_three=icon_three, icon_four=icon_four, icon_five=icon_five)
@app.route('/login', methods=['POST'])
def login():
if "user" not in session:
session['user'] = request.form['user']
session.pop("board", None)
session.pop("score", None)
return redirect(url_for("user"))
else:
return redirect(url_for("index"))
@app.route("/user")
def user():
if "user" in session:
session.permanent = True
session['initial'] = True
session['status'] = True
session['in_play'] = False
session['language'] = language
session['end_game'] = False
session['c_id'] = ' '
session['custom_check'] = False
session['counter'] = 0
return redirect(url_for("index"))
else:
return redirect(url_for("start"))
@app.route("/logout")
def logout():
session.pop("user", None)
session.pop("board", None)
session.pop("score", None)
session.pop("message", None)
session.pop('custom_check', None)
session.pop('language', None)
session.pop('c_id', None)
session.pop('initial', None)
session.pop('wordlist', None)
session.pop('word', None)
session.pop('word_length', None)
session.pop('custom_success_flag', None)
session.pop('custom_message', None)
session.pop('status', None)
session.pop('in_play', None)
session.pop('end_game', None)
session.pop('word_array', None)
session.pop('guess', None)
session.pop('guess_array', None)
session.pop('guessbaseword_array', None)
session.pop('counter', None)
session.pop('baseword_array', None)
return redirect(url_for("start"))
@app.route('/')
@app.route('/myword/<lang>/<custom_id>')
def index(custom_id=None, lang=None):
# global custom_check
# global c_id
# global custom_success_flag
# global initial
global wordlist
global word
# global language
global word_length
global word_array
if "user" not in session:
return redirect(url_for("start"))
if session['initial'] == True and custom_id != None:
session.pop("board", None)
session.pop("score", None)
session.pop("message", None)
session['custom_check'] = True
session['language'] = lang
session['c_id'] = custom_id
# session['initial'] = False
wordlist = create_wordlist()
print("inital wordlist", wordlist)
word = choose_word(wordlist, language)
print("inital word", word)
word_length = len(word_array)
print("inital word_length", word_length)
# session['num_tries'] = num_tries
elif session['initial'] == True:
wordlist = create_wordlist()
print("inital wordlist", wordlist)
word = choose_word(wordlist, language)
print("inital word", word)
word_length = len(word_array)
session['custom_success_flag'] = False
session['custom_message'] = ''
if "board" not in session:
session["board"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["board"].append(row_list)
row_list = []
if "score" not in session:
session["score"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["score"].append(row_list)
row_list = []
if "message" not in session:
session["message"] = 'Welcome, ' + session['user'] + '! Please make your first guess'
return render_template("game.html", game=session["board"], score=session["score"], message=session["message"],
status=session['status'], in_play = session['in_play'], initial = session['initial'],
end_game=session['end_game'], custom_check = session['custom_check'],
num_tries=num_tries, word_length=word_length,
language=session['language'], icon_one=icon_one, icon_two=icon_two,
icon_three=icon_three, icon_four=icon_four, icon_five=icon_five)
@app.route('/myword')
def custom_form():
global custom_message
global custom_success_flag
# session["message"] = 'Enter a word with no repeating letters. '
return render_template("myword.html", message = session['custom_message'],
custom_flag = session['custom_success_flag'])
@app.route('/myword', methods=['POST'])
def custom_input():
# global language
global word_length
global custom_word
# global custom_message
# global custom_success_flag
cust_language = request.form['custom_lang']
custom_word = request.form['custom_input']
while custom_input_check(custom_word) == False:
session['custom_message'] = 'Oops. Make sure you are guessing a {word_length}-letter word that contains no repeating letters.'
return redirect(url_for("custom_form"))
cust_wordpath = 'static/custom_words_' + cust_language
if cust_language == 'Telugu':
set_encoding = "utf-8"
else:
set_encoding = "ascii"
if session['language'] == 'English':
custom_word = custom_word.lower()
if not os.path.exists(cust_wordpath):
with open(cust_wordpath, mode='w', encoding = set_encoding, newline='') as wf:
next_index = 1
new_custom_entry = [next_index, cust_language, custom_word, word_length] ######eliminate lenght
writer = csv.writer(wf)
writer.writerow(new_custom_entry)
else:
with open(cust_wordpath, mode= 'r', encoding = set_encoding ) as rcf:
data = rcf.readlines()
print(len(data))
print ("checkpoint pre-last row")
next_index = len(data) + 1
with open(cust_wordpath, mode = 'a', encoding = set_encoding, newline='') as af:
custom_entry = [next_index, cust_language, custom_word, word_length] ######eliminate lenght
writer = csv.writer(af)
writer.writerow(custom_entry)
session['custom_success_flag'] = True
# session['custom_message'] = "/myword/" + cust_language + "/" + str(next_index)
session['custom_message'] = "http://127.0.0.1:5000/myword/" + cust_language + "/" + str(next_index)
return redirect(url_for("custom_form"))
@app.route('/', methods=['POST'])
@app.route('/myword/<lang>/<custom_id>', methods=['POST'])
def play(lang = None, custom_id = None):
global num_tries
global word_length
global word_index
global wordlist
global word
global word_array
global baseword_array
global guess_array
session['initial'] = False
session['custom_message'] = ''
session['custom_success_flag'] = False
if not wordlist:
print("if not wordlist checkpoint")
wordlist = create_wordlist()
word = choose_word(wordlist, session['language'])
print("checkpoint 1 word: ", word)
word_array = get_wordarray(word)
guess = request.form['guess'].lower()
if guess != 'yes':
global counter
global status
session['in_play'] = True
while session['status'] == True:
while input_check(guess) == False:
session["message"] = 'Oops. Make sure you are guessing a word with no repeating letters.'
return redirect(url_for("index"))
guess_array = get_wordarray(guess)
if session['language'] =="Telugu":
guessbaseword_array = get_basearray(guess)
#process guess
for x in range(len(guess_array)):
session["board"][session['counter']][x] = guess_array[x]
if guess_array[x] in word_array:
if x == word_array.index(guess_array[x]):
session["score"][session['counter']][x] = 1
else:
session["score"][session['counter']][x] = 2
elif session['language'] == "Telugu" and guessbaseword_array[x] in baseword_array:
if x == baseword_array.index(guessbaseword_array[x]):
session["score"][session['counter']][x] = 3
else:
session["score"][session['counter']][x] = 4
else:
session["score"][session['counter']][x] = 5
session["score"][session['counter']].sort()
if session["score"][session['counter']].count(1) == word_length :
if session['c_id'] == ' ':
session["message"] = 'Congratulations. You guessed the word of the day!'
else:
session["message"] = 'Congratulations. You guessed the word!'
session['status'] = False
session['c_id'] = ' '
session['in_play'] = False
session['end_game'] = True
elif session['counter'] == num_tries - 1:
session["message"] = f'Sorry, you did not guess the word in the allowed number of tries. The word was "{word}".'
session['status'] = False
session['c_id'] = ' '
session['in_play'] = False
session['end_game'] = True
else:
session['counter'] += 1
session["message"] = 'Guess another word'
return redirect(url_for("index"))
else:
session['counter'] = 0
word_index = 0
session['status'] = True
session['initial'] = True
session['end_game'] = False
session.pop('board', None)
session.pop('score', None)
session.pop('message', None)
if session['custom_check']:
session['custom_check'] = False
session['c_id'] = ' '
word_index = 0
wordlist.clear()
word = ''
if session['language'] != request.form['lang_toggle'] or word_length != request.form['c_length'] or num_tries != int(request.form['c_numattempts']):
session['language'] = request.form['lang_toggle']
if session['language'] == 'English':
word_length = 5;
else:
word_length = 4;
# word_length = int(request.form['c_length'])
num_tries = int(request.form['c_numattempts'])
word_index = 0
wordlist.clear()
else:
wordlist.clear()
return redirect(url_for("index"))
def create_wordlist():
global wordlist
# global language
global word_length
wordlist = []
if session['custom_check']:
cust_wordpath = 'static/custom_words_' + session['language']
if session['language'] == 'Telugu':
set_encoding = "utf-8"
else:
set_encoding = "ascii"
with open(cust_wordpath, mode='r', encoding=set_encoding) as read_custom:
rline = csv.reader(read_custom, delimiter =',')
for row in rline:
print(row)
if int(row[0]) == int(session['c_id']):
session['language'] = row[1]
wordlist.append(row[2])
word_length = int(row[3])
break
print("After custom_check:")
print("language: ", session['language'])
print("checkpoint after custom word added to wordlist")
print(wordlist)
print("word length: " + str(word_length))
else:
#####temp
input_file = "static/words_" + session['language'].lower() + ".csv"
if session['language'] == "Telugu":
#open input file
with open(input_file, 'r', encoding='utf-8') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
#read each row in CSV file and store values in variables
for row in readCSV:
if row[0] == date.today().isoformat():
print("Today is: " + date.today().isoformat())
wordlist.append(row[1])
else:
#open input file
with open(input_file, 'r') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
#read each row in CSV file and store values in variables
for row in readCSV:
if row[0] == date.today().isoformat():
print("Today is: " + date.today().isoformat())
wordlist.append(row[1])
#random.shuffle(wordlist)
print("Wordlist: ", wordlist)
return wordlist
def choose_word(wordlist, language):
global word_index
global word
global word_array
global baseword_array
# global used_words
# global custom_check
global word_length
word = wordlist[0]
word_array = get_wordarray(word)
session['word_array'] = word_array
word_length = len(word_array)
if session['language'] == "Telugu":
baseword_array = get_basearray(word)
session['base_array'] = baseword_array
print("basearray LLLLL")
print(baseword_array)
word_length = len(word_array)
word = "".join(word_array)
return word
#validate user input
def input_check(input):
flag = True
global guess_array
guess_array = get_wordarray(input)
if len(guess_array) != word_length:
flag = False
else:
for char in guess_array:
print("character count: ", char, guess_array.count(char))
if guess_array.count(char) > 1:
flag = False
return flag
if __name__ == "__main__":
app.run( )
| from datetime import date
from flask import Flask, render_template, session, redirect, url_for, request
from flask_session.__init__ import Session
from tempfile import mkdtemp
import csv
import random
import json
import requests
import os.path
from datetime import timedelta
from helper import *
app = Flask(__name__)
app.secret_key = "#ICS499sp22andthecowjumpedoverthemoonD1dDl3dIDDlE"
app.permanent_session_lifetime = timedelta(minutes=20)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
@app.route('/login')
def start():
if "board" not in session:
session["board"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["board"].append(row_list)
row_list = []
if "score" not in session:
session["score"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["score"].append(row_list)
row_list = []
return render_template("login.html", game=session["board"], score=session["score"], status=status, in_play = in_play, initial = initial, end_game=end_game, custom_check = custom_check, num_tries=int(num_tries), word_length=word_length, language=language, icon_one=icon_one, icon_two=icon_two, icon_three=icon_three, icon_four=icon_four, icon_five=icon_five)
@app.route('/login', methods=['POST'])
def login():
if "user" not in session:
session['user'] = request.form['user']
session.pop("board", None)
session.pop("score", None)
return redirect(url_for("user"))
else:
return redirect(url_for("index"))
@app.route("/user")
def user():
if "user" in session:
session.permanent = True
session['initial'] = True
session['status'] = True
session['in_play'] = False
session['language'] = language
session['end_game'] = False
session['c_id'] = ' '
session['custom_check'] = False
session['counter'] = 0
return redirect(url_for("index"))
else:
return redirect(url_for("start"))
@app.route("/logout")
def logout():
session.pop("user", None)
session.pop("board", None)
session.pop("score", None)
session.pop("message", None)
session.pop('custom_check', None)
session.pop('language', None)
session.pop('c_id', None)
session.pop('initial', None)
session.pop('wordlist', None)
session.pop('word', None)
session.pop('word_length', None)
session.pop('custom_success_flag', None)
session.pop('custom_message', None)
session.pop('status', None)
session.pop('in_play', None)
session.pop('end_game', None)
session.pop('word_array', None)
session.pop('guess', None)
session.pop('guess_array', None)
session.pop('guessbaseword_array', None)
session.pop('counter', None)
session.pop('baseword_array', None)
return redirect(url_for("start"))
@app.route('/')
@app.route('/myword/<lang>/<custom_id>')
def index(custom_id=None, lang=None):
# global custom_check
# global c_id
# global custom_success_flag
# global initial
global wordlist
global word
# global language
global word_length
global word_array
if "user" not in session:
return redirect(url_for("start"))
if session['initial'] == True and custom_id != None:
session.pop("board", None)
session.pop("score", None)
session.pop("message", None)
session['custom_check'] = True
session['language'] = lang
session['c_id'] = custom_id
# session['initial'] = False
wordlist = create_wordlist()
print("inital wordlist", wordlist)
word = choose_word(wordlist, language)
print("inital word", word)
word_length = len(word_array)
print("inital word_length", word_length)
# session['num_tries'] = num_tries
elif session['initial'] == True:
wordlist = create_wordlist()
print("inital wordlist", wordlist)
word = choose_word(wordlist, language)
print("inital word", word)
word_length = len(word_array)
session['custom_success_flag'] = False
session['custom_message'] = ''
if "board" not in session:
session["board"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["board"].append(row_list)
row_list = []
if "score" not in session:
session["score"] = []
row_list = []
for i in range(num_tries):
for j in range(word_length):
row_list.append(None)
session["score"].append(row_list)
row_list = []
if "message" not in session:
session["message"] = 'Welcome, ' + session['user'] + '! Please make your first guess'
return render_template("game.html", game=session["board"], score=session["score"], message=session["message"],
status=session['status'], in_play = session['in_play'], initial = session['initial'],
end_game=session['end_game'], custom_check = session['custom_check'],
num_tries=num_tries, word_length=word_length,
language=session['language'], icon_one=icon_one, icon_two=icon_two,
icon_three=icon_three, icon_four=icon_four, icon_five=icon_five)
@app.route('/myword')
def custom_form():
global custom_message
global custom_success_flag
# session["message"] = 'Enter a word with no repeating letters. '
return render_template("myword.html", message = session['custom_message'],
custom_flag = session['custom_success_flag'])
@app.route('/myword', methods=['POST'])
def custom_input():
# global language
global word_length
global custom_word
# global custom_message
# global custom_success_flag
cust_language = request.form['custom_lang']
custom_word = request.form['custom_input']
while custom_input_check(custom_word) == False:
session['custom_message'] = 'Oops. Make sure you are guessing a {word_length}-letter word that contains no repeating letters.'
return redirect(url_for("custom_form"))
cust_wordpath = 'static/custom_words_' + cust_language
if cust_language == 'Telugu':
set_encoding = "utf-8"
else:
set_encoding = "ascii"
if session['language'] == 'English':
custom_word = custom_word.lower()
if not os.path.exists(cust_wordpath):
with open(cust_wordpath, mode='w', encoding = set_encoding, newline='') as wf:
next_index = 1
new_custom_entry = [next_index, cust_language, custom_word, word_length] ######eliminate lenght
writer = csv.writer(wf)
writer.writerow(new_custom_entry)
else:
with open(cust_wordpath, mode= 'r', encoding = set_encoding ) as rcf:
data = rcf.readlines()
print(len(data))
print ("checkpoint pre-last row")
next_index = len(data) + 1
with open(cust_wordpath, mode = 'a', encoding = set_encoding, newline='') as af:
custom_entry = [next_index, cust_language, custom_word, word_length] ######eliminate lenght
writer = csv.writer(af)
writer.writerow(custom_entry)
session['custom_success_flag'] = True
# session['custom_message'] = "/myword/" + cust_language + "/" + str(next_index)
session['custom_message'] = "http://127.0.0.1:5000/myword/" + cust_language + "/" + str(next_index)
return redirect(url_for("custom_form"))
@app.route('/', methods=['POST'])
@app.route('/myword/<lang>/<custom_id>', methods=['POST'])
def play(lang = None, custom_id = None):
global num_tries
global word_length
global word_index
global wordlist
global word
global word_array
global baseword_array
global guess_array
session['initial'] = False
session['custom_message'] = ''
session['custom_success_flag'] = False
if not wordlist:
print("if not wordlist checkpoint")
wordlist = create_wordlist()
word = choose_word(wordlist, session['language'])
print("checkpoint 1 word: ", word)
word_array = get_wordarray(word)
guess = request.form['guess'].lower()
if guess != 'yes':
global counter
global status
session['in_play'] = True
while session['status'] == True:
while input_check(guess) == False:
session["message"] = 'Oops. Make sure you are guessing a word with no repeating letters.'
return redirect(url_for("index"))
guess_array = get_wordarray(guess)
if session['language'] =="Telugu":
guessbaseword_array = get_basearray(guess)
#process guess
for x in range(len(guess_array)):
session["board"][session['counter']][x] = guess_array[x]
if guess_array[x] in word_array:
if x == word_array.index(guess_array[x]):
session["score"][session['counter']][x] = 1
else:
session["score"][session['counter']][x] = 2
elif session['language'] == "Telugu" and guessbaseword_array[x] in baseword_array:
if x == baseword_array.index(guessbaseword_array[x]):
session["score"][session['counter']][x] = 3
else:
session["score"][session['counter']][x] = 4
else:
session["score"][session['counter']][x] = 5
session["score"][session['counter']].sort()
if session["score"][session['counter']].count(1) == word_length :
if session['c_id'] == ' ':
session["message"] = 'Congratulations. You guessed the word of the day!'
else:
session["message"] = 'Congratulations. You guessed the word!'
session['status'] = False
session['c_id'] = ' '
session['in_play'] = False
session['end_game'] = True
elif session['counter'] == num_tries - 1:
session["message"] = f'Sorry, you did not guess the word in the allowed number of tries. The word was "{word}".'
session['status'] = False
session['c_id'] = ' '
session['in_play'] = False
session['end_game'] = True
else:
session['counter'] += 1
session["message"] = 'Guess another word'
return redirect(url_for("index"))
else:
session['counter'] = 0
word_index = 0
session['status'] = True
session['initial'] = True
session['end_game'] = False
session.pop('board', None)
session.pop('score', None)
session.pop('message', None)
if session['custom_check']:
session['custom_check'] = False
session['c_id'] = ' '
word_index = 0
wordlist.clear()
word = ''
if session['language'] != request.form['lang_toggle'] or word_length != request.form['c_length'] or num_tries != int(request.form['c_numattempts']):
session['language'] = request.form['lang_toggle']
if session['language'] == 'English':
word_length = 5;
else:
word_length = 4;
# word_length = int(request.form['c_length'])
num_tries = int(request.form['c_numattempts'])
word_index = 0
wordlist.clear()
else:
wordlist.clear()
return redirect(url_for("index"))
def create_wordlist():
global wordlist
# global language
global word_length
wordlist = []
if session['custom_check']:
cust_wordpath = 'static/custom_words_' + session['language']
if session['language'] == 'Telugu':
set_encoding = "utf-8"
else:
set_encoding = "ascii"
with open(cust_wordpath, mode='r', encoding=set_encoding) as read_custom:
rline = csv.reader(read_custom, delimiter =',')
for row in rline:
print(row)
if int(row[0]) == int(session['c_id']):
session['language'] = row[1]
wordlist.append(row[2])
word_length = int(row[3])
break
print("After custom_check:")
print("language: ", session['language'])
print("checkpoint after custom word added to wordlist")
print(wordlist)
print("word length: " + str(word_length))
else:
#####temp
input_file = "static/words_" + session['language'].lower() + ".csv"
if session['language'] == "Telugu":
#open input file
with open(input_file, 'r', encoding='utf-8') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
#read each row in CSV file and store values in variables
for row in readCSV:
if row[0] == date.today().isoformat():
print("Today is: " + date.today().isoformat())
wordlist.append(row[1])
else:
#open input file
with open(input_file, 'r') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
#read each row in CSV file and store values in variables
for row in readCSV:
if row[0] == date.today().isoformat():
print("Today is: " + date.today().isoformat())
wordlist.append(row[1])
#random.shuffle(wordlist)
print("Wordlist: ", wordlist)
return wordlist
def choose_word(wordlist, language):
global word_index
global word
global word_array
global baseword_array
# global used_words
# global custom_check
global word_length
word = wordlist[0]
word_array = get_wordarray(word)
session['word_array'] = word_array
word_length = len(word_array)
if session['language'] == "Telugu":
baseword_array = get_basearray(word)
session['base_array'] = baseword_array
print("basearray LLLLL")
print(baseword_array)
word_length = len(word_array)
word = "".join(word_array)
return word
#validate user input
def input_check(input):
flag = True
global guess_array
guess_array = get_wordarray(input)
if len(guess_array) != word_length:
flag = False
else:
for char in guess_array:
print("character count: ", char, guess_array.count(char))
if guess_array.count(char) > 1:
flag = False
return flag
if __name__ == "__main__":
app.run( )
| en | 0.498588 | # global custom_check # global c_id # global custom_success_flag # global initial # global language # session['initial'] = False # session['num_tries'] = num_tries # session["message"] = 'Enter a word with no repeating letters. ' # global language # global custom_message # global custom_success_flag ######eliminate lenght ######eliminate lenght # session['custom_message'] = "/myword/" + cust_language + "/" + str(next_index) #process guess # word_length = int(request.form['c_length']) # global language #####temp #open input file #read each row in CSV file and store values in variables #open input file #read each row in CSV file and store values in variables #random.shuffle(wordlist) # global used_words # global custom_check #validate user input | 2.62546 | 3 |
test/test_slice.py | codeclimate-testing/falcon | 115 | 6616410 | <reponame>codeclimate-testing/falcon
from testing_helpers import wrap
@wrap
def store_slice():
x = range(100)
x[10:20] = range(50, 60)
return x
def test_store_slice():
store_slice()
@wrap
def store_slice1():
x = range(100)
x[10:] = range(50, 60)
return x
def test_store_slice1():
store_slice1()
@wrap
def store_slice2():
x = range(100)
x[:10] = range(50, 60)
return x
def test_store_slice2():
store_slice2()
@wrap
def store_slice3():
x = range(100)
x[:] = range(50, 60)
return x
def test_store_slice3():
store_slice3()
@wrap
def load_slice0():
x = range(100)
y = x[10:20]
return y
def test_load_slice0():
load_slice0()
@wrap
def load_slice1():
x = range(100)
y = x[10:]
return y
def test_load_slice1():
load_slice1()
@wrap
def load_slice2():
x = range(100)
y = x[:10]
return y
def test_load_slice2():
load_slice2()
@wrap
def load_slice3():
x = range(100)
y = x[:]
return y
def test_load_slice3():
load_slice3()
@wrap
def load_slice4():
x = range(100)
y = x[1::-1]
return y
def test_load_slice4():
load_slice4()
| from testing_helpers import wrap
@wrap
def store_slice():
x = range(100)
x[10:20] = range(50, 60)
return x
def test_store_slice():
store_slice()
@wrap
def store_slice1():
x = range(100)
x[10:] = range(50, 60)
return x
def test_store_slice1():
store_slice1()
@wrap
def store_slice2():
x = range(100)
x[:10] = range(50, 60)
return x
def test_store_slice2():
store_slice2()
@wrap
def store_slice3():
x = range(100)
x[:] = range(50, 60)
return x
def test_store_slice3():
store_slice3()
@wrap
def load_slice0():
x = range(100)
y = x[10:20]
return y
def test_load_slice0():
load_slice0()
@wrap
def load_slice1():
x = range(100)
y = x[10:]
return y
def test_load_slice1():
load_slice1()
@wrap
def load_slice2():
x = range(100)
y = x[:10]
return y
def test_load_slice2():
load_slice2()
@wrap
def load_slice3():
x = range(100)
y = x[:]
return y
def test_load_slice3():
load_slice3()
@wrap
def load_slice4():
x = range(100)
y = x[1::-1]
return y
def test_load_slice4():
load_slice4() | none | 1 | 2.637151 | 3 | |
tests/controllers/test_auth_controller.py | tuhinpaul/flask-sample-project | 0 | 6616411 | from flask import Flask, Response
import json
from flask.testing import FlaskClient
from werkzeug.security import generate_password_hash
import pytest
from flaskr import create_app
from flaskr.database import Database
from flaskr.models.user import User
app: Flask = create_app()
class TestAuthController():
def setup_user_in_db(self):
# first insert an user (after deleting existing ones)
Database.db_session.query(User).delete()
Database.db_session.add(
User(username='user1001', password=generate_password_hash('<PASSWORD>#')))
Database.db_session.commit()
def test_login_first_get(self):
with app.test_client() as client:
resp: Response = client.get('/login')
assert resp.status_code == 200
def test_login_get_already_logged_in(self):
with app.test_client() as client:
with client.session_transaction() as sess:
sess['username'] = 'any_username'
resp: Response = client.get('/login')
assert resp.status_code == 200
assert b'You are already logged in' in resp.data
def test_login_post_already_logged_in(self):
with app.test_client() as client:
with client.session_transaction() as sess:
sess['username'] = 'any_username'
resp: Response = client.post('/login', data=dict(
username='some_other_username',
password='<PASSWORD>'
))
assert resp.status_code == 200
assert b'You are already logged in' in resp.data
def test_login_blank_username_blank_password(self):
with app.test_client() as client:
resp: Response = client.post('/login', data=dict(
username='',
password=''
))
assert resp.status_code == 200
assert b'Invalid or absent username' in resp.data
assert b'Invalid or absent password' in resp.data
def test_login_wrong_password(self):
with app.test_client() as client:
resp: Response = client.post('/login', data=dict(
username='user1001',
password='<PASSWORD>'
))
assert resp.status_code == 200
assert b'Authentication failed' in resp.data
def test_login_successful_login(self):
# create a user in the database:
self.setup_user_in_db()
with app.test_client() as client:
resp: Response = client.post('/login', data=dict(
username='user1001',
password='<PASSWORD>#'
))
assert resp.status_code == 302
# the username should be available in session now:
with client.session_transaction() as sess:
assert sess['username'] == 'user1001'
def test_login_invalid_method(self):
# create a user in the database:
self.setup_user_in_db()
with app.test_client() as client:
# remove any user in the session
with client.session_transaction() as sess:
if 'username' in sess:
sess.pop('username')
c2: FlaskClient = client
resp: Response = c2.patch('/login', as_tuple=False, data=dict(
any_key='any_value'
))
assert resp.status_code == 200
assert b'405 Method Not Allowed' in resp.data
def test_logout(self):
# set username in session
with app.test_client() as client:
with client.session_transaction() as sess:
sess['username'] = 'any_username'
# logout
resp:Response = client.get('/logout')
assert resp.status_code == 302
# ensure username no longer exists in the session:
with client.session_transaction() as sess:
assert 'username' not in sess
def test_logout_without_login(self):
# set username in session
with app.test_client() as client:
with client.session_transaction() as sess:
if 'username' in sess:
sess.pop('username')
resp:Response = client.get('/logout')
assert resp.status_code == 200
assert b'401 Unauthorized' in resp.data
| from flask import Flask, Response
import json
from flask.testing import FlaskClient
from werkzeug.security import generate_password_hash
import pytest
from flaskr import create_app
from flaskr.database import Database
from flaskr.models.user import User
app: Flask = create_app()
class TestAuthController():
def setup_user_in_db(self):
# first insert an user (after deleting existing ones)
Database.db_session.query(User).delete()
Database.db_session.add(
User(username='user1001', password=generate_password_hash('<PASSWORD>#')))
Database.db_session.commit()
def test_login_first_get(self):
with app.test_client() as client:
resp: Response = client.get('/login')
assert resp.status_code == 200
def test_login_get_already_logged_in(self):
with app.test_client() as client:
with client.session_transaction() as sess:
sess['username'] = 'any_username'
resp: Response = client.get('/login')
assert resp.status_code == 200
assert b'You are already logged in' in resp.data
def test_login_post_already_logged_in(self):
with app.test_client() as client:
with client.session_transaction() as sess:
sess['username'] = 'any_username'
resp: Response = client.post('/login', data=dict(
username='some_other_username',
password='<PASSWORD>'
))
assert resp.status_code == 200
assert b'You are already logged in' in resp.data
def test_login_blank_username_blank_password(self):
with app.test_client() as client:
resp: Response = client.post('/login', data=dict(
username='',
password=''
))
assert resp.status_code == 200
assert b'Invalid or absent username' in resp.data
assert b'Invalid or absent password' in resp.data
def test_login_wrong_password(self):
with app.test_client() as client:
resp: Response = client.post('/login', data=dict(
username='user1001',
password='<PASSWORD>'
))
assert resp.status_code == 200
assert b'Authentication failed' in resp.data
def test_login_successful_login(self):
# create a user in the database:
self.setup_user_in_db()
with app.test_client() as client:
resp: Response = client.post('/login', data=dict(
username='user1001',
password='<PASSWORD>#'
))
assert resp.status_code == 302
# the username should be available in session now:
with client.session_transaction() as sess:
assert sess['username'] == 'user1001'
def test_login_invalid_method(self):
# create a user in the database:
self.setup_user_in_db()
with app.test_client() as client:
# remove any user in the session
with client.session_transaction() as sess:
if 'username' in sess:
sess.pop('username')
c2: FlaskClient = client
resp: Response = c2.patch('/login', as_tuple=False, data=dict(
any_key='any_value'
))
assert resp.status_code == 200
assert b'405 Method Not Allowed' in resp.data
def test_logout(self):
# set username in session
with app.test_client() as client:
with client.session_transaction() as sess:
sess['username'] = 'any_username'
# logout
resp:Response = client.get('/logout')
assert resp.status_code == 302
# ensure username no longer exists in the session:
with client.session_transaction() as sess:
assert 'username' not in sess
def test_logout_without_login(self):
# set username in session
with app.test_client() as client:
with client.session_transaction() as sess:
if 'username' in sess:
sess.pop('username')
resp:Response = client.get('/logout')
assert resp.status_code == 200
assert b'401 Unauthorized' in resp.data
| en | 0.687551 | # first insert an user (after deleting existing ones) #'))) # create a user in the database: #' # the username should be available in session now: # create a user in the database: # remove any user in the session # set username in session # logout # ensure username no longer exists in the session: # set username in session | 2.731767 | 3 |
google_problems/problem_84.py | loftwah/Daily-Coding-Problem | 129 | 6616412 | """This problem was asked by Google.
Yesterday you implemented a function that encodes a hexadecimal string into Base64.
Write a function to decode a Base64 string back to a hexadecimal string.
For example, the following string:
3q2+7w==
should produce:
deadbeef
""" | """This problem was asked by Google.
Yesterday you implemented a function that encodes a hexadecimal string into Base64.
Write a function to decode a Base64 string back to a hexadecimal string.
For example, the following string:
3q2+7w==
should produce:
deadbeef
""" | en | 0.823894 | This problem was asked by Google. Yesterday you implemented a function that encodes a hexadecimal string into Base64. Write a function to decode a Base64 string back to a hexadecimal string. For example, the following string: 3q2+7w== should produce: deadbeef | 2.630482 | 3 |
abhisek/Balanced_Brackets.py | abhisek-technicise/Python-Training | 1 | 6616413 | '''
Assignment 46 : Your task in this exercise is as follows:
Generate a string with N opening brackets ("[") and N closing brackets ("]"), in some arbitrary order.
Determine whether the generated string is balanced; that is, whether it consists entirely of pairs of opening/
closing brackets (in that order), none of which mis-nest.
Examples:
[] OK ][ NOT OK
[][] OK ][][ NOT OK
[[][]] OK []][[] NOT OK
'''
def isEmpty(STACK):
if len(STACK) == 0:
return True
def parChecker(symbolString):
stack = list() # Stack Data structure
balanced = True # Check for balanced parentheses
index = 0
while index < len(symbolString) and balanced:
symbol = symbolString[index]
if symbol == "[":
stack.append(symbol) # For open bracket '[' PUSH it
else:
if isEmpty(stack):
balanced = False # if no matching ']' for ']'
else:
stack.pop() # Else POP ']' for '['
index += 1
if balanced and isEmpty(stack): # Nothing to POP and balanced flag is true
return "OK"
else:
return "Not OK"
print("[]", parChecker('[]'))
print("[][]", parChecker('[][]'))
print("[[][]]", parChecker('[[][]]'))
print("][", parChecker(']['))
print("][][", parChecker('][]['))
print("[]][[]", parChecker('[]][[]')) | '''
Assignment 46 : Your task in this exercise is as follows:
Generate a string with N opening brackets ("[") and N closing brackets ("]"), in some arbitrary order.
Determine whether the generated string is balanced; that is, whether it consists entirely of pairs of opening/
closing brackets (in that order), none of which mis-nest.
Examples:
[] OK ][ NOT OK
[][] OK ][][ NOT OK
[[][]] OK []][[] NOT OK
'''
def isEmpty(STACK):
if len(STACK) == 0:
return True
def parChecker(symbolString):
stack = list() # Stack Data structure
balanced = True # Check for balanced parentheses
index = 0
while index < len(symbolString) and balanced:
symbol = symbolString[index]
if symbol == "[":
stack.append(symbol) # For open bracket '[' PUSH it
else:
if isEmpty(stack):
balanced = False # if no matching ']' for ']'
else:
stack.pop() # Else POP ']' for '['
index += 1
if balanced and isEmpty(stack): # Nothing to POP and balanced flag is true
return "OK"
else:
return "Not OK"
print("[]", parChecker('[]'))
print("[][]", parChecker('[][]'))
print("[[][]]", parChecker('[[][]]'))
print("][", parChecker(']['))
print("][][", parChecker('][]['))
print("[]][[]", parChecker('[]][[]')) | en | 0.822385 | Assignment 46 : Your task in this exercise is as follows: Generate a string with N opening brackets ("[") and N closing brackets ("]"), in some arbitrary order. Determine whether the generated string is balanced; that is, whether it consists entirely of pairs of opening/ closing brackets (in that order), none of which mis-nest. Examples: [] OK ][ NOT OK [][] OK ][][ NOT OK [[][]] OK []][[] NOT OK # Stack Data structure # Check for balanced parentheses # For open bracket '[' PUSH it # if no matching ']' for ']' # Else POP ']' for '[' # Nothing to POP and balanced flag is true | 4.066941 | 4 |
number/views.py | DenisDolmatov2020/lote | 0 | 6616414 | import random
from rest_framework.generics import ListAPIView, UpdateAPIView
from number.models import Number
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from number.serializers import NumberSerializer
from number.service import choose_winners
class NumberList(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NumberSerializer
def get_queryset(self):
return Number.objects.filter(user=self.request.user)
class NumberUpdateView(UpdateAPIView):
permission_classes = [IsAuthenticated]
def partial_update(self, request, *args, **kwargs):
numbers = Number.objects.select_related('lot__user').filter(lot_id=self.request.data['lot_id'])
lot_user_numbers = numbers.filter(user_id=request.user.id)
lot_numbers_free = numbers.filter(user_id=None)
if lot_numbers_free:
random_idx = random.randint(0, len(lot_numbers_free) - 1)
lot_number = lot_numbers_free[random_idx]
energy = lot_number.lot.energy * (2 ** len(lot_user_numbers))
if request.user.energy >= energy and request.user != lot_number.lot.user:
request.user.energy -= energy
request.user.save(update_fields=['energy'])
lot_number.user = request.user
lot_number.save(update_fields=['user'])
if len(lot_numbers_free) <= 1 and lot_number.lot.active:
choose_winners(lot_number.lot)
return Response(
status=status.HTTP_200_OK,
data=lot_number.num
)
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
| import random
from rest_framework.generics import ListAPIView, UpdateAPIView
from number.models import Number
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from number.serializers import NumberSerializer
from number.service import choose_winners
class NumberList(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NumberSerializer
def get_queryset(self):
return Number.objects.filter(user=self.request.user)
class NumberUpdateView(UpdateAPIView):
permission_classes = [IsAuthenticated]
def partial_update(self, request, *args, **kwargs):
numbers = Number.objects.select_related('lot__user').filter(lot_id=self.request.data['lot_id'])
lot_user_numbers = numbers.filter(user_id=request.user.id)
lot_numbers_free = numbers.filter(user_id=None)
if lot_numbers_free:
random_idx = random.randint(0, len(lot_numbers_free) - 1)
lot_number = lot_numbers_free[random_idx]
energy = lot_number.lot.energy * (2 ** len(lot_user_numbers))
if request.user.energy >= energy and request.user != lot_number.lot.user:
request.user.energy -= energy
request.user.save(update_fields=['energy'])
lot_number.user = request.user
lot_number.save(update_fields=['user'])
if len(lot_numbers_free) <= 1 and lot_number.lot.active:
choose_winners(lot_number.lot)
return Response(
status=status.HTTP_200_OK,
data=lot_number.num
)
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
| none | 1 | 2.086726 | 2 | |
backend/django_blog/views.py | Fowerus/drf-blog | 5 | 6616415 | <reponame>Fowerus/drf-blog<gh_stars>1-10
from django.shortcuts import render
import jwt
from django.conf import settings
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework import status
from rest_framework.response import Response
from rest_framework import generics
from Users.models import User
from Communities.models import Community
from Chats.models import Chats
def main(request):
return render(request, 'index.html')
class VerifyJWTUserAPIView(APIView):
def post(self,request):
current_user = User.objects.get(username = request.data.get('username'))
try:
if current_user and current_user.is_active:
token_decode = jwt.decode(request.data.get('token'),settings.SECRET_KEY, algorithms = ['HS256'])
if token_decode['id'] == current_user.id:
return Response({
'id':current_user.id,
'email':current_user.email,
'username':current_user.username,
'last_name':current_user.last_name,
'first_name':current_user.first_name
}, status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
except:
return Response(status = status.HTTP_400_BAD_REQUEST)
class VerifyPostAdminAPIView(APIView):
def post(self, request):
try:
current_user = User.objects.get(id = request.data.get('user_id'))
current_com = Community.objects.get(id = request.data.get('com_id'))
if current_user.id == current_com.com_admins.filter(id = current_user.id).first().id:
return Response(status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
except:
return Response(status = status.HTTP_400_BAD_REQUEST)
class VerifyChatMembershipAPIView(APIView):
def post(self, request, chat_id):
try:
current_chat = Chats.objects.get(id = chat_id)
current_user = User.objects.get(id = request.data['id'])
if current_user in current_chat.users.all():
return Response(status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
except:
return Response(status = status.HTTP_400_BAD_REQUEST) | from django.shortcuts import render
import jwt
from django.conf import settings
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework import status
from rest_framework.response import Response
from rest_framework import generics
from Users.models import User
from Communities.models import Community
from Chats.models import Chats
def main(request):
return render(request, 'index.html')
class VerifyJWTUserAPIView(APIView):
def post(self,request):
current_user = User.objects.get(username = request.data.get('username'))
try:
if current_user and current_user.is_active:
token_decode = jwt.decode(request.data.get('token'),settings.SECRET_KEY, algorithms = ['HS256'])
if token_decode['id'] == current_user.id:
return Response({
'id':current_user.id,
'email':current_user.email,
'username':current_user.username,
'last_name':current_user.last_name,
'first_name':current_user.first_name
}, status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
except:
return Response(status = status.HTTP_400_BAD_REQUEST)
class VerifyPostAdminAPIView(APIView):
def post(self, request):
try:
current_user = User.objects.get(id = request.data.get('user_id'))
current_com = Community.objects.get(id = request.data.get('com_id'))
if current_user.id == current_com.com_admins.filter(id = current_user.id).first().id:
return Response(status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
except:
return Response(status = status.HTTP_400_BAD_REQUEST)
class VerifyChatMembershipAPIView(APIView):
def post(self, request, chat_id):
try:
current_chat = Chats.objects.get(id = chat_id)
current_user = User.objects.get(id = request.data['id'])
if current_user in current_chat.users.all():
return Response(status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
except:
return Response(status = status.HTTP_400_BAD_REQUEST) | none | 1 | 2.08773 | 2 | |
Dpark_Test/wordcount.py | Liangchengdeye/Dpark | 1 | 6616416 | <gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: W_H_J
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm
@file: wordcount.py
@time: 2018/6/5 18:10
@describe: 单词统计
"""
from dpark import DparkContext
ctx = DparkContext()
file = ctx.textFile("./words.txt")
words = file.flatMap(lambda x:x.split()).map(lambda x:(x,1))
wc = words.reduceByKey(lambda x,y:x+y).collectAsMap()
print (wc)
# 统计单词出现的个数
def word_count(file_path, word):
# 指定某个Mesos主机进行沟通
dpark = DparkContext()
# 将分布式文件,构造成文件RDD,每块大小为16m
f = dpark.textFile(file_path, splitSize=16 << 20)
# 用map()转变成新的RDD,再用filter过滤出更新的RDD,最后用count()处理返回结果
print(word, 'count:', f.map(lambda line: line.strip()).filter(lambda line: word in line).count())
word_count("./words.txt", "php") | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: W_H_J
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm
@file: wordcount.py
@time: 2018/6/5 18:10
@describe: 单词统计
"""
from dpark import DparkContext
ctx = DparkContext()
file = ctx.textFile("./words.txt")
words = file.flatMap(lambda x:x.split()).map(lambda x:(x,1))
wc = words.reduceByKey(lambda x,y:x+y).collectAsMap()
print (wc)
# 统计单词出现的个数
def word_count(file_path, word):
# 指定某个Mesos主机进行沟通
dpark = DparkContext()
# 将分布式文件,构造成文件RDD,每块大小为16m
f = dpark.textFile(file_path, splitSize=16 << 20)
# 用map()转变成新的RDD,再用filter过滤出更新的RDD,最后用count()处理返回结果
print(word, 'count:', f.map(lambda line: line.strip()).filter(lambda line: word in line).count())
word_count("./words.txt", "php") | zh | 0.443063 | #!/usr/bin/env python # encoding: utf-8 @version: v1.0 @author: W_H_J @license: Apache Licence @contact: <EMAIL> @site: @software: PyCharm @file: wordcount.py @time: 2018/6/5 18:10 @describe: 单词统计 # 统计单词出现的个数 # 指定某个Mesos主机进行沟通 # 将分布式文件,构造成文件RDD,每块大小为16m # 用map()转变成新的RDD,再用filter过滤出更新的RDD,最后用count()处理返回结果 | 3.018591 | 3 |
tools/create-typelib.py | mothran/binja-typelibs | 7 | 6616417 | <reponame>mothran/binja-typelibs<gh_stars>1-10
import os
import sys
from argparse import ArgumentParser
from binaryninja.platform import Platform
from binaryninja.architecture import Architecture
from binaryninja.typelibrary import TypeLibrary
from binaryninja.log import log_info, log_warn, log_error, log_debug, log_to_stdout, LogLevel
def main(args):
log_to_stdout(LogLevel.InfoLog)
if not os.path.exists(args.input_file):
log_warn(f"input file: {args.input_file} does not exist")
return 1
dir_path = os.path.dirname(os.path.realpath(args.output))
if not os.path.exists(dir_path):
log_warn(f"Output path directory {dir_path} does not exist")
return 1
try:
platform: Platform = Platform[args.platform]
except KeyError:
log_warn(f"'{args.platform}' is not supported binja platform")
return 1
with open(args.input_file) as fd:
type_data = fd.read()
if args.definitions:
prepend_str = ""
for defintion in args.definitions.split(","):
prepend_str += f"#define {defintion} 1\n"
type_data = "%s%s" % (prepend_str, type_data)
types_path = [os.path.dirname(os.path.realpath(args.input_file))]
type_res = platform.parse_types_from_source(type_data, filename=args.input_file, include_dirs=types_path)
cur_typelib: TypeLibrary = TypeLibrary.new(Architecture[platform.arch.name], args.name)
for name, type_obj in type_res.functions.items():
# log_info(f"Adding function {name}")
cur_typelib.add_named_object(name, type_obj)
for name, type_obj in type_res.types.items():
# log_info(f"Adding type {name}")
cur_typelib.add_named_type(name, type_obj)
cur_typelib.add_platform(platform)
if args.alt_names:
for name in args.alt_names.split(","):
cur_typelib.add_alternate_name(name)
if args.guid:
cur_typelib.guid = args.guid
cur_typelib.finalize()
log_info(f"Wrote type library to {args.output}")
cur_typelib.write_to_file(args.output)
return 0
if __name__ == "__main__":
parser = ArgumentParser(prog='Create Binary Ninja typelibrary from .c/.h file')
parser.add_argument('-i', '--input_file',
type=str,
default=None,
required=True,
help='Path to c-header file to parse')
parser.add_argument('-p', '--platform',
type=str,
default=None,
required=True,
help='Binary Ninja platform to use for parsing: ex "linux-x86_64"')
parser.add_argument('-o', '--output',
type=str,
default=None,
required=True,
help='Path to output file ex: /tmp/test.bntl')
parser.add_argument('-n', '--name',
type=str,
default=None,
required=True,
help='Name for typelibrary ex: "libc.so|kernel32.dll"')
parser.add_argument('-a', '--alt_names',
type=str,
default=None,
required=False,
help='Alternative names used for the typelibrary, comment seperated ex: "libc.so.1,libc.so.6"')
parser.add_argument('-g', '--guid',
type=str,
default=None,
required=False,
help='Guid to assign to the typelibrary: ex "6c873bf0-dd43-49df-8f74-d65376540758"')
parser.add_argument('-d', '--definitions',
type=str,
default=None,
required=False,
help='List of #defines to add to the c-file before processing, comment seperated: eg: "MY_DEF,SECOND_DEF"')
args = parser.parse_args()
sys.exit(main(args)) | import os
import sys
from argparse import ArgumentParser
from binaryninja.platform import Platform
from binaryninja.architecture import Architecture
from binaryninja.typelibrary import TypeLibrary
from binaryninja.log import log_info, log_warn, log_error, log_debug, log_to_stdout, LogLevel
def main(args):
log_to_stdout(LogLevel.InfoLog)
if not os.path.exists(args.input_file):
log_warn(f"input file: {args.input_file} does not exist")
return 1
dir_path = os.path.dirname(os.path.realpath(args.output))
if not os.path.exists(dir_path):
log_warn(f"Output path directory {dir_path} does not exist")
return 1
try:
platform: Platform = Platform[args.platform]
except KeyError:
log_warn(f"'{args.platform}' is not supported binja platform")
return 1
with open(args.input_file) as fd:
type_data = fd.read()
if args.definitions:
prepend_str = ""
for defintion in args.definitions.split(","):
prepend_str += f"#define {defintion} 1\n"
type_data = "%s%s" % (prepend_str, type_data)
types_path = [os.path.dirname(os.path.realpath(args.input_file))]
type_res = platform.parse_types_from_source(type_data, filename=args.input_file, include_dirs=types_path)
cur_typelib: TypeLibrary = TypeLibrary.new(Architecture[platform.arch.name], args.name)
for name, type_obj in type_res.functions.items():
# log_info(f"Adding function {name}")
cur_typelib.add_named_object(name, type_obj)
for name, type_obj in type_res.types.items():
# log_info(f"Adding type {name}")
cur_typelib.add_named_type(name, type_obj)
cur_typelib.add_platform(platform)
if args.alt_names:
for name in args.alt_names.split(","):
cur_typelib.add_alternate_name(name)
if args.guid:
cur_typelib.guid = args.guid
cur_typelib.finalize()
log_info(f"Wrote type library to {args.output}")
cur_typelib.write_to_file(args.output)
return 0
if __name__ == "__main__":
parser = ArgumentParser(prog='Create Binary Ninja typelibrary from .c/.h file')
parser.add_argument('-i', '--input_file',
type=str,
default=None,
required=True,
help='Path to c-header file to parse')
parser.add_argument('-p', '--platform',
type=str,
default=None,
required=True,
help='Binary Ninja platform to use for parsing: ex "linux-x86_64"')
parser.add_argument('-o', '--output',
type=str,
default=None,
required=True,
help='Path to output file ex: /tmp/test.bntl')
parser.add_argument('-n', '--name',
type=str,
default=None,
required=True,
help='Name for typelibrary ex: "libc.so|kernel32.dll"')
parser.add_argument('-a', '--alt_names',
type=str,
default=None,
required=False,
help='Alternative names used for the typelibrary, comment seperated ex: "libc.so.1,libc.so.6"')
parser.add_argument('-g', '--guid',
type=str,
default=None,
required=False,
help='Guid to assign to the typelibrary: ex "6c873bf0-dd43-49df-8f74-d65376540758"')
parser.add_argument('-d', '--definitions',
type=str,
default=None,
required=False,
help='List of #defines to add to the c-file before processing, comment seperated: eg: "MY_DEF,SECOND_DEF"')
args = parser.parse_args()
sys.exit(main(args)) | en | 0.398685 | # log_info(f"Adding function {name}") # log_info(f"Adding type {name}") #defines to add to the c-file before processing, comment seperated: eg: "MY_DEF,SECOND_DEF"') | 2.524837 | 3 |
complete/11 - 20/Problem13/main.py | this-jacob/project-euler | 0 | 6616418 | <reponame>this-jacob/project-euler<filename>complete/11 - 20/Problem13/main.py
def main():
#open the file
f = open("numbers.txt")
huge = 0
numbers = [0 for i in range(100)]
it = 0
for line in f:
numbers[it] = line
it += 1
for each in numbers:
huge += int(each)
print(str(huge)[0:10])
if __name__ == '__main__':
main()
| - 20/Problem13/main.py
def main():
#open the file
f = open("numbers.txt")
huge = 0
numbers = [0 for i in range(100)]
it = 0
for line in f:
numbers[it] = line
it += 1
for each in numbers:
huge += int(each)
print(str(huge)[0:10])
if __name__ == '__main__':
main() | en | 0.442762 | #open the file | 3.805004 | 4 |
twitter_elections/score_sentiment_Twitter.py | verycourt/Elections | 0 | 6616419 | csv_file = pd.DataFrame(pd.read_csv(fname, sep = ',', header = 0, index_col = False))
csv_file.to_json('twitter_sentiments_daily_15_until_22_april_raw.json', orient ='split')
df = pd.read_json('twitter_sentiments_daily_15_until_22_april_raw.json', orient='split')
df['neu'] = pd.to_numeric(df['neu'], errors='coerce')
df['neg'] = pd.to_numeric(df['neg'], errors='coerce')
df['pos'] = pd.to_numeric(df['pos'], errors='coerce')
df = df.fillna(0)
df['score'] = (df['pos']) / (df['neg'] + df['neu'] + df['pos'])
df = df.fillna(0)
df['score'] = df['score'] * df['count']
df_pivot = df.pivot_table(index='date',columns='candidat',values='score').fillna(0)
df_pivot.to_json('twitter_sentiments_daily_15_until_22_april.json', orient='split') | csv_file = pd.DataFrame(pd.read_csv(fname, sep = ',', header = 0, index_col = False))
csv_file.to_json('twitter_sentiments_daily_15_until_22_april_raw.json', orient ='split')
df = pd.read_json('twitter_sentiments_daily_15_until_22_april_raw.json', orient='split')
df['neu'] = pd.to_numeric(df['neu'], errors='coerce')
df['neg'] = pd.to_numeric(df['neg'], errors='coerce')
df['pos'] = pd.to_numeric(df['pos'], errors='coerce')
df = df.fillna(0)
df['score'] = (df['pos']) / (df['neg'] + df['neu'] + df['pos'])
df = df.fillna(0)
df['score'] = df['score'] * df['count']
df_pivot = df.pivot_table(index='date',columns='candidat',values='score').fillna(0)
df_pivot.to_json('twitter_sentiments_daily_15_until_22_april.json', orient='split') | none | 1 | 2.824578 | 3 | |
reset_psw.py | fjacob21/nhlplayoffs | 0 | 6616420 | #!/usr/bin/env python3
import json
import sys
import requests
def reset_psw(server, player, new_psw, root_psw):
url = 'http://' + server + '/nhlplayoffs/api/v2.0/players/' + player + '/reset'
headers = {'content-type': 'application/json'}
data = {'new_psw': new_psw, 'root_psw': root_psw}
r = requests.post(url, data=json.dumps(data), headers=headers)
if not r.ok:
print('Invalid request!!!!')
if r.json()['result']:
print('Reset successful')
else:
print('Invalid parameter')
if __name__ == '__main__':
if len(sys.argv) != 4:
print('reset_psw.py <player> <new_psw> <root_psw>')
exit(1)
player = sys.argv[1]
new_psw = sys.argv[2]
root_psw = sys.argv[3]
server = 'localhost:5000'
# server = 'nhlpool.roblab.net/'
reset_psw(server, player, new_psw, root_psw)
| #!/usr/bin/env python3
import json
import sys
import requests
def reset_psw(server, player, new_psw, root_psw):
url = 'http://' + server + '/nhlplayoffs/api/v2.0/players/' + player + '/reset'
headers = {'content-type': 'application/json'}
data = {'new_psw': new_psw, 'root_psw': root_psw}
r = requests.post(url, data=json.dumps(data), headers=headers)
if not r.ok:
print('Invalid request!!!!')
if r.json()['result']:
print('Reset successful')
else:
print('Invalid parameter')
if __name__ == '__main__':
if len(sys.argv) != 4:
print('reset_psw.py <player> <new_psw> <root_psw>')
exit(1)
player = sys.argv[1]
new_psw = sys.argv[2]
root_psw = sys.argv[3]
server = 'localhost:5000'
# server = 'nhlpool.roblab.net/'
reset_psw(server, player, new_psw, root_psw)
| en | 0.220358 | #!/usr/bin/env python3 # server = 'nhlpool.roblab.net/' | 2.542267 | 3 |
orastorage/model.py | taewanme/orastorage-py | 0 | 6616421 | <filename>orastorage/model.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
class Identity:
"""
- 사용자 정보
- Account 정보를 이용하여 인증을 수행
"""
def __init__(self, identity_domain, user_id, password=None):
self._identity_domain = identity_domain
self._user_id = user_id
self._password = password
self._rest_endpoint = 'https://%s.storage.oraclecloud.com' % identity_domain
self._storage_user = 'Storage-%s:%s' % (identity_domain, user_id)
self._account = 'Storage-%s' % identity_domain
def __str__(self):
contents = ['Identity domain: %s' % self._identity_domain,
'user id: %s' % self._user_id,
'Is password saved?: %s' % (self._password is not None),
'REST endpoint: %s' % self._rest_endpoint,
'Storage user: %s' % self._storage_user,
'Account: %s' % self._account]
return '\n'.join(contents)
def get_rest_endpoint(self):
return self._rest_endpoint
def get_identity_domain(self):
return self._identity_domain
def get_password(self):
return self._password
def get_storage_user(self):
return self._storage_user
def get_user_id(self):
return self._user_id
def get_account(self):
return self._account
| <filename>orastorage/model.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
class Identity:
"""
- 사용자 정보
- Account 정보를 이용하여 인증을 수행
"""
def __init__(self, identity_domain, user_id, password=None):
self._identity_domain = identity_domain
self._user_id = user_id
self._password = password
self._rest_endpoint = 'https://%s.storage.oraclecloud.com' % identity_domain
self._storage_user = 'Storage-%s:%s' % (identity_domain, user_id)
self._account = 'Storage-%s' % identity_domain
def __str__(self):
contents = ['Identity domain: %s' % self._identity_domain,
'user id: %s' % self._user_id,
'Is password saved?: %s' % (self._password is not None),
'REST endpoint: %s' % self._rest_endpoint,
'Storage user: %s' % self._storage_user,
'Account: %s' % self._account]
return '\n'.join(contents)
def get_rest_endpoint(self):
return self._rest_endpoint
def get_identity_domain(self):
return self._identity_domain
def get_password(self):
return self._password
def get_storage_user(self):
return self._storage_user
def get_user_id(self):
return self._user_id
def get_account(self):
return self._account
| ko | 0.999602 | # -*- coding: utf-8 -*- - 사용자 정보 - Account 정보를 이용하여 인증을 수행 | 2.382117 | 2 |
src/recipe_app/tests.py | OCHIENGDAVIS/recipe-api | 0 | 6616422 | import tempfile
import os
from PIL import Image
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag,Ingredient, Recipe
from recipe_app.serializers import TagSerializer, IngredientSerializer, RecipeSerializer, RecipeDetailSerializer
TAGS_URL = reverse('recipe_app:tag-list')
INGRIDENT_URL = reverse('recipe_app:ingredient-list')
RECIPE_URL = reverse('recipe_app:recipe-list')
def image_upload_url(recipe_id):
"""Return url for recipe image upload"""
return reverse('recipe_app:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail url"""
return reverse('recipe_app:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a simple recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price':5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicTagsAPITest(TestCase):
"""Tests the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test log in required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsAPITest(TestCase):
"""Test the authorised tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Tests retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Tests that tags returned are fot the authenticated user"""
user2 = get_user_model().objects.create_user('<EMAIL>', 'other')
Tag.objects.create(user=user2,name='Fruity' )
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'],tag.name )
def test_create_tag_succesfull(self):
"""Test creating a new tag"""
payload = {'name':'test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists)
def test_create_test_invalid(self):
"""Test creating a tag with an invalid name"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class PublicIngredientAPITest(TestCase):
"""Tests the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Tests the login is required to access the endpoint"""
res = self.client.get(INGRIDENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsAPITest(TestCase):
"""Test private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
self.client.force_authenticate(self.user)
def test_ingriedient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGRIDENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that only ingredients for the authenticated user get returned"""
user2 = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGRIDENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successfull(self):
"""Tests creating a new ingredient"""
paylaod = dict(name='cabbage')
self.client.post(INGRIDENT_URL, paylaod)
exists = Ingredient.objects.filter(user=self.user, name=paylaod['name']).exists()
self.assertTrue(exists)
def test_ingredients_invalid(self):
"""Tests creating an invalid ingredients fails"""
payload = {'name':''}
res = self.client.post(INGRIDENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class PublicRecipeAPITest(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test than authentication ie required"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeAPITest(TestCase):
"""Test unauthorised recipe API access"""
def setUp(self):
self.user = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Tets getting a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test getting recipes for user"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating a recipe"""
payload = {
'title':'chcoclate cheesecake',
'time_minutes': 30,
'price':5.00,
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title':'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes':60,
'price':20.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating a recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes':20,
'price': 7.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test Updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title':'Chicken tikka',
'tags':[new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spagheti carbonara',
'time_minutes': 25,
'price': 5.00,
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTest(TestCase):
"""Image upload tests"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image(self):
"""Tests uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10,10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image':ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Tests uploading a bad image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image':'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan Curry')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and Chips')
res = self.client.get(RECIPE_URL, {'tags':f'{tag1.id}, {tag2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3, res.data)
def test_filter_recipes_by_ingredients(self):
"""Tests returning a recipe with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on Toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta Cheeese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and Mushrooms')
res = self.client.get(
RECIPE_URL,
{'ingredients': f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3, res.data)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test fiiltering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on Toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only':1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned o recipes"""
ingredient1 = Ingredient.objects.create(user=self.user, name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey')
recipe = Recipe.objects.create(
title='Apple crumble',
price=10.00,
time_minutes=5,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGRIDENT_URL, {'assigned_only': 1 })
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
| import tempfile
import os
from PIL import Image
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag,Ingredient, Recipe
from recipe_app.serializers import TagSerializer, IngredientSerializer, RecipeSerializer, RecipeDetailSerializer
TAGS_URL = reverse('recipe_app:tag-list')
INGRIDENT_URL = reverse('recipe_app:ingredient-list')
RECIPE_URL = reverse('recipe_app:recipe-list')
def image_upload_url(recipe_id):
"""Return url for recipe image upload"""
return reverse('recipe_app:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail url"""
return reverse('recipe_app:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a simple recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price':5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicTagsAPITest(TestCase):
"""Tests the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test log in required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsAPITest(TestCase):
"""Test the authorised tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Tests retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Tests that tags returned are fot the authenticated user"""
user2 = get_user_model().objects.create_user('<EMAIL>', 'other')
Tag.objects.create(user=user2,name='Fruity' )
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'],tag.name )
def test_create_tag_succesfull(self):
"""Test creating a new tag"""
payload = {'name':'test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists)
def test_create_test_invalid(self):
"""Test creating a tag with an invalid name"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class PublicIngredientAPITest(TestCase):
"""Tests the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Tests the login is required to access the endpoint"""
res = self.client.get(INGRIDENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsAPITest(TestCase):
"""Test private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
self.client.force_authenticate(self.user)
def test_ingriedient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGRIDENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that only ingredients for the authenticated user get returned"""
user2 = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGRIDENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successfull(self):
"""Tests creating a new ingredient"""
paylaod = dict(name='cabbage')
self.client.post(INGRIDENT_URL, paylaod)
exists = Ingredient.objects.filter(user=self.user, name=paylaod['name']).exists()
self.assertTrue(exists)
def test_ingredients_invalid(self):
"""Tests creating an invalid ingredients fails"""
payload = {'name':''}
res = self.client.post(INGRIDENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class PublicRecipeAPITest(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test than authentication ie required"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeAPITest(TestCase):
"""Test unauthorised recipe API access"""
def setUp(self):
self.user = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Tets getting a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test getting recipes for user"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating a recipe"""
payload = {
'title':'chcoclate cheesecake',
'time_minutes': 30,
'price':5.00,
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title':'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes':60,
'price':20.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating a recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes':20,
'price': 7.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test Updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title':'Chicken tikka',
'tags':[new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spagheti carbonara',
'time_minutes': 25,
'price': 5.00,
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTest(TestCase):
"""Image upload tests"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('<EMAIL>', '<PASSWORD>')
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image(self):
"""Tests uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10,10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image':ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Tests uploading a bad image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image':'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan Curry')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and Chips')
res = self.client.get(RECIPE_URL, {'tags':f'{tag1.id}, {tag2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3, res.data)
def test_filter_recipes_by_ingredients(self):
"""Tests returning a recipe with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on Toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta Cheeese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and Mushrooms')
res = self.client.get(
RECIPE_URL,
{'ingredients': f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3, res.data)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test fiiltering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on Toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only':1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned o recipes"""
ingredient1 = Ingredient.objects.create(user=self.user, name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey')
recipe = Recipe.objects.create(
title='Apple crumble',
price=10.00,
time_minutes=5,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGRIDENT_URL, {'assigned_only': 1 })
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
| en | 0.795831 | Return url for recipe image upload Return recipe detail url Create and return a sample tag Create and return a sample ingredient Create and return a simple recipe Tests the publicly available tags API Test log in required for retrieving tags Test the authorised tags API Tests retrieving tags Tests that tags returned are fot the authenticated user Test creating a new tag Test creating a tag with an invalid name Tests the publicly available ingredients API Tests the login is required to access the endpoint Test private ingredients API Test retrieving a list of ingredients Test that only ingredients for the authenticated user get returned Tests creating a new ingredient Tests creating an invalid ingredients fails Test unauthenticated recipe API access Test than authentication ie required Test unauthorised recipe API access Tets getting a list of recipes Test getting recipes for user Test viewing a recipe detail Test creating a recipe Test creating a recipe with tags Test creating a recipe with ingredients Test Updating a recipe with patch Test updating a recipe with put Image upload tests Tests uploading an image to recipe Tests uploading a bad image Test returning recipes with specific tags Tests returning a recipe with specific ingredients Test fiiltering tags by those assigned to recipes Test filtering ingredients by those assigned o recipes | 2.445512 | 2 |
api/collector/data.py | lucasblazzi/insights-project | 0 | 6616423 | <gh_stars>0
import pymongo
import logging
import pandas as pd
mongo = pymongo.MongoClient("mongodb://admin:admin@mongo_db:27017/")
analytics_db = mongo["analytics"]
logger = logging.getLogger("app")
def get_data_store(collection_name):
try:
collection = analytics_db[collection_name]
data = collection.find({}, {'_id': False})
logger.info(f"GETTING ANALYTIC {data}")
return pd.DataFrame(data)
except Exception as e:
logger.error(e)
def clean_collection(collection_name):
try:
collection = analytics_db[collection_name]
collection.drop()
return {"status": f"Collection {collection_name} cleaned"}
except Exception as e:
logger.error(e) | import pymongo
import logging
import pandas as pd
mongo = pymongo.MongoClient("mongodb://admin:admin@mongo_db:27017/")
analytics_db = mongo["analytics"]
logger = logging.getLogger("app")
def get_data_store(collection_name):
try:
collection = analytics_db[collection_name]
data = collection.find({}, {'_id': False})
logger.info(f"GETTING ANALYTIC {data}")
return pd.DataFrame(data)
except Exception as e:
logger.error(e)
def clean_collection(collection_name):
try:
collection = analytics_db[collection_name]
collection.drop()
return {"status": f"Collection {collection_name} cleaned"}
except Exception as e:
logger.error(e) | none | 1 | 2.640237 | 3 | |
manifold/serialPort.py | Ligcox/SUES_Birdiebot_gcxl | 0 | 6616424 | <filename>manifold/serialPort.py
import threading
from sender import *
from config import *
# 串口对象
SER = serial.Serial(PORTX, BPS, timeout=TIMEX)
def send_LX(s):
"""
发送数据至凌霄
"""
print(s)
s = bytes(s)
ser = SER.write(s)
print("写总字节数:{}".format(ser))
def close_post():
SER.close()
class SerialThread(threading.Thread):
'''
串口对象监听器
'''
def __init__(self):
threading.Thread.__init__(self)
self.threadID = 0
self.name = "串口监听器"
def run(self):
print ("开始线程:" + self.name)
self.reception()
print ("退出线程:" + self.name)
def reception(self):
# temp = []
while True:
LX_data = SER.read(256)
temp = []
dataEND = 0
try:
while True:
dataHEAD = LX_data.index(170, dataEND)
d_addr = dataHEAD + 1
if LX_data[d_addr] == 175:
dataLEN = LX_data[d_addr+2]
dataEND = d_addr+4+dataLEN
info = LX_data[d_addr-1: dataEND]
LX_Receiver(info)
else:
dataEND = dataHEAD
except:
# 后面完善吧,
# 最后超出255的部分数据会丢失
pass
| <filename>manifold/serialPort.py
import threading
from sender import *
from config import *
# 串口对象
SER = serial.Serial(PORTX, BPS, timeout=TIMEX)
def send_LX(s):
"""
发送数据至凌霄
"""
print(s)
s = bytes(s)
ser = SER.write(s)
print("写总字节数:{}".format(ser))
def close_post():
SER.close()
class SerialThread(threading.Thread):
'''
串口对象监听器
'''
def __init__(self):
threading.Thread.__init__(self)
self.threadID = 0
self.name = "串口监听器"
def run(self):
print ("开始线程:" + self.name)
self.reception()
print ("退出线程:" + self.name)
def reception(self):
# temp = []
while True:
LX_data = SER.read(256)
temp = []
dataEND = 0
try:
while True:
dataHEAD = LX_data.index(170, dataEND)
d_addr = dataHEAD + 1
if LX_data[d_addr] == 175:
dataLEN = LX_data[d_addr+2]
dataEND = d_addr+4+dataLEN
info = LX_data[d_addr-1: dataEND]
LX_Receiver(info)
else:
dataEND = dataHEAD
except:
# 后面完善吧,
# 最后超出255的部分数据会丢失
pass
| zh | 0.949489 | # 串口对象 发送数据至凌霄 串口对象监听器 # temp = [] # 后面完善吧, # 最后超出255的部分数据会丢失 | 2.740143 | 3 |
03-DataWranglingWithMongoDB/P02-WrangleOpenStreetMapData/consistency_audit.py | ccampguilhem/Udacity-DataAnalyst | 1 | 6616425 | """
Data consistency audit object in a form of a callback for SAX content handler.
This audit class checks existence of nodes and ways referenced by ways or relations. Instead of reporting each
inconsistency, the audit calculates proportion of ways and relations defined without the elements they refer to.
It is possible to request ids of missing nodes, ways and relations.
"""
class DataConsistencyAudit(object):
"""
Constructor.
"""
def __init__(self):
self._nonconformities = [ ]
self._known_nodes = set()
self._known_ways = set()
self._known_relations = set()
self._ways_ok = 0
self._ways_ko = 0
self._relations_ok = 0
self._relations_ko = 0
self._missing_nodes = set()
self._missing_ways = set()
self._missing_relations = set()
"""
Method called back when a start event is encountered.
- stack: stack of elements being read
- locator: locator object from SAX parser
"""
def startEventCallback(self, stack, locator):
name = stack[-1][1]
attrs = stack[-1][2]
try:
element_id = attrs["id"]
except KeyError:
element_id = None
#node element
if name == "node":
self._known_nodes.add(int(element_id))
#way element
elif name == "way":
self._known_ways.add(int(element_id))
#relation element
elif name == "relation":
self._known_relations.add(int(element_id))
"""
Method called back when an end event is encountered.
- name: element name
- children: element children
- locator: locator object from SAX parser
"""
def endEventCallback(self, name, children, locator):
#way element
if name == "way":
error = False
for child in children:
if child[0] == "nd":
node_id = int(child[1]["ref"])
if node_id not in self._known_nodes:
self._missing_nodes.add(node_id)
error = True
break
if error:
self._ways_ko += 1
else:
self._ways_ok += 1
#relation element
elif name == "relation":
error = False
for child in children:
if child[0] == "member":
member_type = child[1]["type"]
element_id = int(child[1]["ref"])
if member_type == "node":
if element_id not in self._known_nodes:
error = True
self._missing_nodes.add(element_id)
break
elif member_type == "way":
if element_id not in self._known_ways:
error = True
self._missing_ways.add(element_id)
break
elif member_type == "relation":
if element_id not in self._known_relations:
error = True
self._missing_relations.add(element_id)
break
if error:
self._relations_ko += 1
else:
self._relations_ok += 1
"""
Return nonconformities.
return: list of tuple (type of audit, nonconformity description)
"""
def getNonconformities(self):
if self._ways_ko > 0:
count = self._ways_ko + self._ways_ok
message = '{} ways refer to non-present entities out of {} ({:.1f}%)'.format(self._ways_ko,
count, 100. * self._ways_ko / float(count))
self._nonconformities.append(('Consistency', message))
if self._relations_ko > 0:
count = self._relations_ko + self._relations_ok
message = '{} relations refer to non-present entities out of {} ({:.1f}%)'.format(self._relations_ko,
count, 100. * self._relations_ko / float(count))
self._nonconformities.append(('Consistency', message))
return self._nonconformities
"""
Return a list of all missing nodes in dataset.
- return: set of nodes id
"""
def getMissingNodes(self):
return self._missing_nodes
"""
Return a list of all missing ways in dataset.
- return: set of ways id
"""
def getMissingWays(self):
return self._missing_ways
"""
Return a list of all missing relations in dataset.
- return: set of relations id
"""
def getMissingRelations(self):
return self._missing_relations
| """
Data consistency audit object in a form of a callback for SAX content handler.
This audit class checks existence of nodes and ways referenced by ways or relations. Instead of reporting each
inconsistency, the audit calculates proportion of ways and relations defined without the elements they refer to.
It is possible to request ids of missing nodes, ways and relations.
"""
class DataConsistencyAudit(object):
"""
Constructor.
"""
def __init__(self):
self._nonconformities = [ ]
self._known_nodes = set()
self._known_ways = set()
self._known_relations = set()
self._ways_ok = 0
self._ways_ko = 0
self._relations_ok = 0
self._relations_ko = 0
self._missing_nodes = set()
self._missing_ways = set()
self._missing_relations = set()
"""
Method called back when a start event is encountered.
- stack: stack of elements being read
- locator: locator object from SAX parser
"""
def startEventCallback(self, stack, locator):
name = stack[-1][1]
attrs = stack[-1][2]
try:
element_id = attrs["id"]
except KeyError:
element_id = None
#node element
if name == "node":
self._known_nodes.add(int(element_id))
#way element
elif name == "way":
self._known_ways.add(int(element_id))
#relation element
elif name == "relation":
self._known_relations.add(int(element_id))
"""
Method called back when an end event is encountered.
- name: element name
- children: element children
- locator: locator object from SAX parser
"""
def endEventCallback(self, name, children, locator):
#way element
if name == "way":
error = False
for child in children:
if child[0] == "nd":
node_id = int(child[1]["ref"])
if node_id not in self._known_nodes:
self._missing_nodes.add(node_id)
error = True
break
if error:
self._ways_ko += 1
else:
self._ways_ok += 1
#relation element
elif name == "relation":
error = False
for child in children:
if child[0] == "member":
member_type = child[1]["type"]
element_id = int(child[1]["ref"])
if member_type == "node":
if element_id not in self._known_nodes:
error = True
self._missing_nodes.add(element_id)
break
elif member_type == "way":
if element_id not in self._known_ways:
error = True
self._missing_ways.add(element_id)
break
elif member_type == "relation":
if element_id not in self._known_relations:
error = True
self._missing_relations.add(element_id)
break
if error:
self._relations_ko += 1
else:
self._relations_ok += 1
"""
Return nonconformities.
return: list of tuple (type of audit, nonconformity description)
"""
def getNonconformities(self):
if self._ways_ko > 0:
count = self._ways_ko + self._ways_ok
message = '{} ways refer to non-present entities out of {} ({:.1f}%)'.format(self._ways_ko,
count, 100. * self._ways_ko / float(count))
self._nonconformities.append(('Consistency', message))
if self._relations_ko > 0:
count = self._relations_ko + self._relations_ok
message = '{} relations refer to non-present entities out of {} ({:.1f}%)'.format(self._relations_ko,
count, 100. * self._relations_ko / float(count))
self._nonconformities.append(('Consistency', message))
return self._nonconformities
"""
Return a list of all missing nodes in dataset.
- return: set of nodes id
"""
def getMissingNodes(self):
return self._missing_nodes
"""
Return a list of all missing ways in dataset.
- return: set of ways id
"""
def getMissingWays(self):
return self._missing_ways
"""
Return a list of all missing relations in dataset.
- return: set of relations id
"""
def getMissingRelations(self):
return self._missing_relations
| en | 0.820738 | Data consistency audit object in a form of a callback for SAX content handler. This audit class checks existence of nodes and ways referenced by ways or relations. Instead of reporting each inconsistency, the audit calculates proportion of ways and relations defined without the elements they refer to. It is possible to request ids of missing nodes, ways and relations. Constructor. Method called back when a start event is encountered. - stack: stack of elements being read - locator: locator object from SAX parser #node element #way element #relation element Method called back when an end event is encountered. - name: element name - children: element children - locator: locator object from SAX parser #way element #relation element Return nonconformities. return: list of tuple (type of audit, nonconformity description) Return a list of all missing nodes in dataset. - return: set of nodes id Return a list of all missing ways in dataset. - return: set of ways id Return a list of all missing relations in dataset. - return: set of relations id | 2.687932 | 3 |
idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py | aladshaw3/idaes-pse | 0 | 6616426 | <reponame>aladshaw3/idaes-pse
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for Plate Heat Exchnager unit model.
Author: <NAME>
"""
import pytest
from pyomo.environ import (
check_optimal_termination,
ConcreteModel,
Param,
units as pyunits,
value,
)
from idaes.core import FlowsheetBlock
from idaes.models_extra.column_models.plate_heat_exchanger import (
PlateHeatExchanger as PHE,
)
from idaes.models.properties.modular_properties.base.generic_property import (
GenericParameterBlock,
)
from idaes.models_extra.column_models.properties.MEA_solvent import (
configuration as aqueous_mea,
)
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core.solvers import get_solver
from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent
# -----------------------------------------------------------------------------
# Get default solver for testing
solver = get_solver()
@pytest.mark.unit
def test_config():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.hotside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.coldside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.unit = PHE(
default={
"passes": 4,
"channels_per_pass": 12,
"number_of_divider_plates": 2,
"hot_side": {"property_package": m.fs.hotside_properties},
"cold_side": {"property_package": m.fs.coldside_properties},
}
)
# Check unit config arguments
assert len(m.fs.unit.config) == 7
# -----------------------------------------------------------------------------
class TestPHE(object):
@pytest.fixture(scope="class")
def phe(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.hotside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.coldside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.unit = PHE(
default={
"passes": 4,
"channels_per_pass": 12,
"number_of_divider_plates": 2,
"hot_side": {"property_package": m.fs.hotside_properties},
"cold_side": {"property_package": m.fs.coldside_properties},
}
)
# hot fluid
m.fs.unit.hot_inlet.flow_mol[0].fix(60.54879)
m.fs.unit.hot_inlet.temperature[0].fix(392.23)
m.fs.unit.hot_inlet.pressure[0].fix(202650)
m.fs.unit.hot_inlet.mole_frac_comp[0, "CO2"].fix(0.0158)
m.fs.unit.hot_inlet.mole_frac_comp[0, "H2O"].fix(0.8747)
m.fs.unit.hot_inlet.mole_frac_comp[0, "MEA"].fix(0.1095)
# cold fluid
m.fs.unit.cold_inlet.flow_mol[0].fix(63.01910)
m.fs.unit.cold_inlet.temperature[0].fix(326.36)
m.fs.unit.cold_inlet.pressure[0].fix(202650)
m.fs.unit.cold_inlet.mole_frac_comp[0, "CO2"].fix(0.0414)
m.fs.unit.cold_inlet.mole_frac_comp[0, "H2O"].fix(0.8509)
m.fs.unit.cold_inlet.mole_frac_comp[0, "MEA"].fix(0.1077)
# Fix unit geometry - default values should be correct
m.fs.unit.plate_length.fix()
m.fs.unit.plate_width.fix()
m.fs.unit.plate_thickness.fix()
m.fs.unit.plate_pact_length.fix()
m.fs.unit.port_diameter.fix()
m.fs.unit.plate_therm_cond.fix()
m.fs.unit.area.fix()
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, phe):
assert hasattr(phe.fs.unit, "hot_inlet")
assert len(phe.fs.unit.hot_inlet.vars) == 4
assert hasattr(phe.fs.unit.hot_inlet, "flow_mol")
assert hasattr(phe.fs.unit.hot_inlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.hot_inlet, "temperature")
assert hasattr(phe.fs.unit.hot_inlet, "pressure")
assert hasattr(phe.fs.unit, "hot_outlet")
assert len(phe.fs.unit.hot_outlet.vars) == 4
assert hasattr(phe.fs.unit.hot_outlet, "flow_mol")
assert hasattr(phe.fs.unit.hot_outlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.hot_outlet, "temperature")
assert hasattr(phe.fs.unit.hot_outlet, "pressure")
assert hasattr(phe.fs.unit, "cold_inlet")
assert len(phe.fs.unit.cold_inlet.vars) == 4
assert hasattr(phe.fs.unit.cold_inlet, "flow_mol")
assert hasattr(phe.fs.unit.cold_inlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.cold_inlet, "temperature")
assert hasattr(phe.fs.unit.cold_inlet, "pressure")
assert hasattr(phe.fs.unit, "cold_outlet")
assert len(phe.fs.unit.cold_outlet.vars) == 4
assert hasattr(phe.fs.unit.cold_outlet, "flow_mol")
assert hasattr(phe.fs.unit.cold_outlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.cold_outlet, "temperature")
assert hasattr(phe.fs.unit.cold_outlet, "pressure")
assert hasattr(phe.fs.unit.cold_side, "deltaP")
assert hasattr(phe.fs.unit.hot_side, "deltaP")
assert isinstance(phe.fs.unit.number_of_passes, Param)
assert isinstance(phe.fs.unit.channels_per_pass, Param)
@pytest.mark.component
def test_units(self, phe):
assert_units_equivalent(phe.fs.unit.Re_hot[0], pyunits.dimensionless)
assert_units_equivalent(phe.fs.unit.Re_cold[0], pyunits.dimensionless)
assert_units_consistent(phe)
@pytest.mark.unit
def test_dof(self, phe):
assert degrees_of_freedom(phe) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, phe):
initialization_tester(
phe, duty=(245000, pyunits.W), optarg={"bound_push": 1e-8, "mu_init": 1e-8}
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, phe):
results = solver.solve(phe)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, phe):
# phe.fs.unit.display()
assert pytest.approx(182244.65, rel=1e-5) == value(
phe.fs.unit.hot_outlet.pressure[0]
)
assert pytest.approx(177366.53, rel=1e-5) == value(
phe.fs.unit.cold_outlet.pressure[0]
)
assert pytest.approx(685.730, rel=1e-5) == value(phe.fs.unit.Re_hot[0])
assert pytest.approx(196.018, rel=1e-5) == value(phe.fs.unit.Re_cold[0])
assert pytest.approx(4.55441, rel=1e-5) == value(phe.fs.unit.Pr_hot[0])
assert pytest.approx(15.0566, rel=1e-5) == value(phe.fs.unit.Pr_cold[0])
assert pytest.approx(3691.35, rel=1e-5) == value(
phe.fs.unit.heat_transfer_coefficient_hot_side[0]
)
assert pytest.approx(2610.81, rel=1e-5) == value(
phe.fs.unit.heat_transfer_coefficient_cold_side[0]
)
assert pytest.approx(1170.34, rel=1e-5) == value(
phe.fs.unit.heat_transfer_coefficient[0]
)
assert pytest.approx(24.4118, rel=1e-5) == value(phe.fs.unit.NTU[0])
assert pytest.approx(0.971227, rel=1e-5) == value(phe.fs.unit.Cratio[0])
assert pytest.approx(0.676902, rel=1e-5) == value(phe.fs.unit.effectiveness[0])
assert pytest.approx(244327, rel=1e-5) == value(phe.fs.unit.heat_duty[0])
assert pytest.approx(365.288, rel=1e-5) == value(
phe.fs.unit.hot_outlet.temperature[0]
)
assert pytest.approx(356.952, rel=1e-5) == value(
phe.fs.unit.cold_outlet.temperature[0]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, phe):
# Mass conservation test
assert (
abs(
value(
phe.fs.unit.hot_inlet.flow_mol[0]
- phe.fs.unit.hot_outlet.flow_mol[0]
)
)
<= 1e-6
)
for j in phe.fs.hotside_properties.apparent_species_set:
assert (
abs(
value(
phe.fs.unit.hot_inlet.flow_mol[0]
* phe.fs.unit.hot_inlet.mole_frac_comp[0, j]
- phe.fs.unit.hot_outlet.flow_mol[0]
* phe.fs.unit.hot_outlet.mole_frac_comp[0, j]
)
)
<= 1e-6
)
assert (
abs(
value(
phe.fs.unit.cold_inlet.flow_mol[0]
- phe.fs.unit.cold_outlet.flow_mol[0]
)
)
<= 1e-6
)
for j in phe.fs.coldside_properties.apparent_species_set:
assert (
abs(
value(
phe.fs.unit.cold_inlet.flow_mol[0]
* phe.fs.unit.cold_inlet.mole_frac_comp[0, j]
- phe.fs.unit.cold_outlet.flow_mol[0]
* phe.fs.unit.cold_outlet.mole_frac_comp[0, j]
)
)
<= 1e-6
)
# Energy conservation test
assert (
abs(
value(
phe.fs.unit.hot_side.properties_in[0]._enthalpy_flow_term["Liq"]
+ phe.fs.unit.cold_side.properties_in[0]._enthalpy_flow_term["Liq"]
- phe.fs.unit.hot_side.properties_out[0]._enthalpy_flow_term["Liq"]
- phe.fs.unit.cold_side.properties_out[0]._enthalpy_flow_term["Liq"]
)
)
<= 1e-6
)
# @pytest.mark.ui
# @pytest.mark.unit
# def test_report(self, phe):
# phe.fs.unit.report()
| #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for Plate Heat Exchnager unit model.
Author: <NAME>
"""
import pytest
from pyomo.environ import (
check_optimal_termination,
ConcreteModel,
Param,
units as pyunits,
value,
)
from idaes.core import FlowsheetBlock
from idaes.models_extra.column_models.plate_heat_exchanger import (
PlateHeatExchanger as PHE,
)
from idaes.models.properties.modular_properties.base.generic_property import (
GenericParameterBlock,
)
from idaes.models_extra.column_models.properties.MEA_solvent import (
configuration as aqueous_mea,
)
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core.solvers import get_solver
from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent
# -----------------------------------------------------------------------------
# Get default solver for testing
solver = get_solver()
@pytest.mark.unit
def test_config():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.hotside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.coldside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.unit = PHE(
default={
"passes": 4,
"channels_per_pass": 12,
"number_of_divider_plates": 2,
"hot_side": {"property_package": m.fs.hotside_properties},
"cold_side": {"property_package": m.fs.coldside_properties},
}
)
# Check unit config arguments
assert len(m.fs.unit.config) == 7
# -----------------------------------------------------------------------------
class TestPHE(object):
@pytest.fixture(scope="class")
def phe(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.hotside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.coldside_properties = GenericParameterBlock(default=aqueous_mea)
m.fs.unit = PHE(
default={
"passes": 4,
"channels_per_pass": 12,
"number_of_divider_plates": 2,
"hot_side": {"property_package": m.fs.hotside_properties},
"cold_side": {"property_package": m.fs.coldside_properties},
}
)
# hot fluid
m.fs.unit.hot_inlet.flow_mol[0].fix(60.54879)
m.fs.unit.hot_inlet.temperature[0].fix(392.23)
m.fs.unit.hot_inlet.pressure[0].fix(202650)
m.fs.unit.hot_inlet.mole_frac_comp[0, "CO2"].fix(0.0158)
m.fs.unit.hot_inlet.mole_frac_comp[0, "H2O"].fix(0.8747)
m.fs.unit.hot_inlet.mole_frac_comp[0, "MEA"].fix(0.1095)
# cold fluid
m.fs.unit.cold_inlet.flow_mol[0].fix(63.01910)
m.fs.unit.cold_inlet.temperature[0].fix(326.36)
m.fs.unit.cold_inlet.pressure[0].fix(202650)
m.fs.unit.cold_inlet.mole_frac_comp[0, "CO2"].fix(0.0414)
m.fs.unit.cold_inlet.mole_frac_comp[0, "H2O"].fix(0.8509)
m.fs.unit.cold_inlet.mole_frac_comp[0, "MEA"].fix(0.1077)
# Fix unit geometry - default values should be correct
m.fs.unit.plate_length.fix()
m.fs.unit.plate_width.fix()
m.fs.unit.plate_thickness.fix()
m.fs.unit.plate_pact_length.fix()
m.fs.unit.port_diameter.fix()
m.fs.unit.plate_therm_cond.fix()
m.fs.unit.area.fix()
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, phe):
assert hasattr(phe.fs.unit, "hot_inlet")
assert len(phe.fs.unit.hot_inlet.vars) == 4
assert hasattr(phe.fs.unit.hot_inlet, "flow_mol")
assert hasattr(phe.fs.unit.hot_inlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.hot_inlet, "temperature")
assert hasattr(phe.fs.unit.hot_inlet, "pressure")
assert hasattr(phe.fs.unit, "hot_outlet")
assert len(phe.fs.unit.hot_outlet.vars) == 4
assert hasattr(phe.fs.unit.hot_outlet, "flow_mol")
assert hasattr(phe.fs.unit.hot_outlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.hot_outlet, "temperature")
assert hasattr(phe.fs.unit.hot_outlet, "pressure")
assert hasattr(phe.fs.unit, "cold_inlet")
assert len(phe.fs.unit.cold_inlet.vars) == 4
assert hasattr(phe.fs.unit.cold_inlet, "flow_mol")
assert hasattr(phe.fs.unit.cold_inlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.cold_inlet, "temperature")
assert hasattr(phe.fs.unit.cold_inlet, "pressure")
assert hasattr(phe.fs.unit, "cold_outlet")
assert len(phe.fs.unit.cold_outlet.vars) == 4
assert hasattr(phe.fs.unit.cold_outlet, "flow_mol")
assert hasattr(phe.fs.unit.cold_outlet, "mole_frac_comp")
assert hasattr(phe.fs.unit.cold_outlet, "temperature")
assert hasattr(phe.fs.unit.cold_outlet, "pressure")
assert hasattr(phe.fs.unit.cold_side, "deltaP")
assert hasattr(phe.fs.unit.hot_side, "deltaP")
assert isinstance(phe.fs.unit.number_of_passes, Param)
assert isinstance(phe.fs.unit.channels_per_pass, Param)
@pytest.mark.component
def test_units(self, phe):
assert_units_equivalent(phe.fs.unit.Re_hot[0], pyunits.dimensionless)
assert_units_equivalent(phe.fs.unit.Re_cold[0], pyunits.dimensionless)
assert_units_consistent(phe)
@pytest.mark.unit
def test_dof(self, phe):
assert degrees_of_freedom(phe) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, phe):
initialization_tester(
phe, duty=(245000, pyunits.W), optarg={"bound_push": 1e-8, "mu_init": 1e-8}
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, phe):
results = solver.solve(phe)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, phe):
# phe.fs.unit.display()
assert pytest.approx(182244.65, rel=1e-5) == value(
phe.fs.unit.hot_outlet.pressure[0]
)
assert pytest.approx(177366.53, rel=1e-5) == value(
phe.fs.unit.cold_outlet.pressure[0]
)
assert pytest.approx(685.730, rel=1e-5) == value(phe.fs.unit.Re_hot[0])
assert pytest.approx(196.018, rel=1e-5) == value(phe.fs.unit.Re_cold[0])
assert pytest.approx(4.55441, rel=1e-5) == value(phe.fs.unit.Pr_hot[0])
assert pytest.approx(15.0566, rel=1e-5) == value(phe.fs.unit.Pr_cold[0])
assert pytest.approx(3691.35, rel=1e-5) == value(
phe.fs.unit.heat_transfer_coefficient_hot_side[0]
)
assert pytest.approx(2610.81, rel=1e-5) == value(
phe.fs.unit.heat_transfer_coefficient_cold_side[0]
)
assert pytest.approx(1170.34, rel=1e-5) == value(
phe.fs.unit.heat_transfer_coefficient[0]
)
assert pytest.approx(24.4118, rel=1e-5) == value(phe.fs.unit.NTU[0])
assert pytest.approx(0.971227, rel=1e-5) == value(phe.fs.unit.Cratio[0])
assert pytest.approx(0.676902, rel=1e-5) == value(phe.fs.unit.effectiveness[0])
assert pytest.approx(244327, rel=1e-5) == value(phe.fs.unit.heat_duty[0])
assert pytest.approx(365.288, rel=1e-5) == value(
phe.fs.unit.hot_outlet.temperature[0]
)
assert pytest.approx(356.952, rel=1e-5) == value(
phe.fs.unit.cold_outlet.temperature[0]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, phe):
# Mass conservation test
assert (
abs(
value(
phe.fs.unit.hot_inlet.flow_mol[0]
- phe.fs.unit.hot_outlet.flow_mol[0]
)
)
<= 1e-6
)
for j in phe.fs.hotside_properties.apparent_species_set:
assert (
abs(
value(
phe.fs.unit.hot_inlet.flow_mol[0]
* phe.fs.unit.hot_inlet.mole_frac_comp[0, j]
- phe.fs.unit.hot_outlet.flow_mol[0]
* phe.fs.unit.hot_outlet.mole_frac_comp[0, j]
)
)
<= 1e-6
)
assert (
abs(
value(
phe.fs.unit.cold_inlet.flow_mol[0]
- phe.fs.unit.cold_outlet.flow_mol[0]
)
)
<= 1e-6
)
for j in phe.fs.coldside_properties.apparent_species_set:
assert (
abs(
value(
phe.fs.unit.cold_inlet.flow_mol[0]
* phe.fs.unit.cold_inlet.mole_frac_comp[0, j]
- phe.fs.unit.cold_outlet.flow_mol[0]
* phe.fs.unit.cold_outlet.mole_frac_comp[0, j]
)
)
<= 1e-6
)
# Energy conservation test
assert (
abs(
value(
phe.fs.unit.hot_side.properties_in[0]._enthalpy_flow_term["Liq"]
+ phe.fs.unit.cold_side.properties_in[0]._enthalpy_flow_term["Liq"]
- phe.fs.unit.hot_side.properties_out[0]._enthalpy_flow_term["Liq"]
- phe.fs.unit.cold_side.properties_out[0]._enthalpy_flow_term["Liq"]
)
)
<= 1e-6
)
# @pytest.mark.ui
# @pytest.mark.unit
# def test_report(self, phe):
# phe.fs.unit.report() | en | 0.510026 | ################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# Tests for Plate Heat Exchnager unit model. Author: <NAME> # ----------------------------------------------------------------------------- # Get default solver for testing # Check unit config arguments # ----------------------------------------------------------------------------- # hot fluid # cold fluid # Fix unit geometry - default values should be correct # Check for optimal solution # phe.fs.unit.display() # Mass conservation test # Energy conservation test # @pytest.mark.ui # @pytest.mark.unit # def test_report(self, phe): # phe.fs.unit.report() | 1.741892 | 2 |
daiquiri/query/tests/test_views.py | agy-why/daiquiri | 14 | 6616427 | <reponame>agy-why/daiquiri
from django.test import TestCase
from test_generator.views import TestListViewMixin
from daiquiri.core.utils import setup_group
class QueryViewTestCase(TestCase):
databases = ('default', 'data', 'tap', 'oai')
fixtures = (
'auth.json',
'metadata.json'
)
users = (
('admin', 'admin'),
('manager', 'manager'),
('user', 'user'),
('anonymous', None),
)
def setUp(self):
setup_group('query_manager')
class QueryTests(TestListViewMixin, QueryViewTestCase):
url_names = {
'list_view': 'query:query'
}
status_map = {
'list_view': {
'admin': 200, 'manager': 200, 'user': 200, 'anonymous': 302
}
}
class ExamplesTests(TestListViewMixin, QueryViewTestCase):
url_names = {
'list_view': 'query:examples'
}
status_map = {
'list_view': {
'admin': 200, 'manager': 200, 'user': 403, 'anonymous': 302
}
}
| from django.test import TestCase
from test_generator.views import TestListViewMixin
from daiquiri.core.utils import setup_group
class QueryViewTestCase(TestCase):
databases = ('default', 'data', 'tap', 'oai')
fixtures = (
'auth.json',
'metadata.json'
)
users = (
('admin', 'admin'),
('manager', 'manager'),
('user', 'user'),
('anonymous', None),
)
def setUp(self):
setup_group('query_manager')
class QueryTests(TestListViewMixin, QueryViewTestCase):
url_names = {
'list_view': 'query:query'
}
status_map = {
'list_view': {
'admin': 200, 'manager': 200, 'user': 200, 'anonymous': 302
}
}
class ExamplesTests(TestListViewMixin, QueryViewTestCase):
url_names = {
'list_view': 'query:examples'
}
status_map = {
'list_view': {
'admin': 200, 'manager': 200, 'user': 403, 'anonymous': 302
}
} | none | 1 | 2.076168 | 2 | |
Sensor.py | chumo/G.A.M.E. | 0 | 6616428 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 19 15:20:13 2015
@author: JMB
"""
def ReadLine(port):
'''Reads a line of pixels. In the Arduino file distance_sensor.ino
you can find how many pixels are red per line (tipically 64)
'''
port.flushInput() #- Empty the buffer
port.write(' '.encode()) #- Send a space character
aux = port.readline()
line = aux.decode().split(';')[0:64]
return line
| # -*- coding: utf-8 -*-
"""
Created on Mon Jan 19 15:20:13 2015
@author: JMB
"""
def ReadLine(port):
'''Reads a line of pixels. In the Arduino file distance_sensor.ino
you can find how many pixels are red per line (tipically 64)
'''
port.flushInput() #- Empty the buffer
port.write(' '.encode()) #- Send a space character
aux = port.readline()
line = aux.decode().split(';')[0:64]
return line
| en | 0.816383 | # -*- coding: utf-8 -*- Created on Mon Jan 19 15:20:13 2015 @author: JMB Reads a line of pixels. In the Arduino file distance_sensor.ino you can find how many pixels are red per line (tipically 64) #- Empty the buffer #- Send a space character | 3.207109 | 3 |
convlab2/policy/mdrg/multiwoz/__init__.py | ljw23/ConvLab-2 | 339 | 6616429 | from convlab2.policy.mdrg.multiwoz.policy import MDRGWordPolicy | from convlab2.policy.mdrg.multiwoz.policy import MDRGWordPolicy | none | 1 | 1.088791 | 1 | |
hacker/challenges/crypto/didactic_xor_long_cipher.py | Tenebrar/codebase | 1 | 6616430 | from hacker.bytestreams import bytes_from_hex, skip, offset
from hacker.decoder import transform
value = '<KEY>' # noqa
def is_acceptable(c):
""" Returns whether a given character is considered potentially in the result string """
return 'a' <= c.lower() <= 'z' or c in ' .?!\'",'
def get_key_options(key_index, ciphertext):
""" Returns only the potential keys that map ALL the characters it would be used for to an acceptable one """
for key_option in range(256):
bytes_for_key = skip(offset(bytes_from_hex(ciphertext), key_index), 4)
if all(transform(bytes_for_key, lambda byte: byte ^ key_option, chr, is_acceptable)):
yield key_option
key_space = [get_key_options(j, value) for j in range(4)]
for key1 in key_space[0]:
for key2 in key_space[1]:
for key3 in key_space[2]:
for key4 in key_space[3]:
key = [key1, key2, key3, key4]
result = ''
for index, b in enumerate(bytes_from_hex(value)):
result += chr(b ^ key[index % 4])
print(result)
| from hacker.bytestreams import bytes_from_hex, skip, offset
from hacker.decoder import transform
value = '<KEY>' # noqa
def is_acceptable(c):
""" Returns whether a given character is considered potentially in the result string """
return 'a' <= c.lower() <= 'z' or c in ' .?!\'",'
def get_key_options(key_index, ciphertext):
""" Returns only the potential keys that map ALL the characters it would be used for to an acceptable one """
for key_option in range(256):
bytes_for_key = skip(offset(bytes_from_hex(ciphertext), key_index), 4)
if all(transform(bytes_for_key, lambda byte: byte ^ key_option, chr, is_acceptable)):
yield key_option
key_space = [get_key_options(j, value) for j in range(4)]
for key1 in key_space[0]:
for key2 in key_space[1]:
for key3 in key_space[2]:
for key4 in key_space[3]:
key = [key1, key2, key3, key4]
result = ''
for index, b in enumerate(bytes_from_hex(value)):
result += chr(b ^ key[index % 4])
print(result)
| en | 0.865785 | # noqa Returns whether a given character is considered potentially in the result string Returns only the potential keys that map ALL the characters it would be used for to an acceptable one | 3.241878 | 3 |
data_prep/old/01_prepare_data.py | vejvarm/speech_recognition_with_TF2_at_UCT_Prague | 0 | 6616431 | import os
from itertools import compress
import numpy as np
from pysndfx import AudioEffectsChain
from FeatureExtraction import FeatureExtractor
from DataOps import DataLoader, PDTSCLoader, OralLoader
from helpers import console_logger
LOGGER = console_logger(__name__, "DEBUG")
def get_file_paths(audio_folder, transcript_folder):
audio_files = [os.path.splitext(f) for f in os.listdir(audio_folder)
if os.path.isfile(os.path.join(audio_folder, f))]
transcript_files = [os.path.splitext(f) for f in os.listdir(transcript_folder)
if os.path.isfile(os.path.join(transcript_folder, f))]
files = []
for file1, file2 in zip(audio_files, transcript_files):
err_message = "{} =/= {}".format(file1[0], file2[0])
assert file1[0] == file2[0], err_message
files.append((audio_folder+file1[0]+file1[1], transcript_folder+file2[0]+file2[1]))
return files
def get_file_names(files):
return [os.path.splitext(os.path.split(file[0])[1])[0] for file in files]
def prepare_data(files, save_folder, dataset="pdtsc", label_max_duration=10.0, speeds=(0.9, 1.0, 1.1),
feature_type="MFSC", bigrams=False, repeated=False, energy=True, deltas=(0, 0),
nbanks=40, filter_nan=True, sort=True):
cepstra_length_list = []
file_names = get_file_names(files)
for speed in speeds:
LOGGER.info(f"Create audio_transormer for speed {speed}")
audio_transformer = (AudioEffectsChain().speed(speed))
save_path = os.path.join(save_folder, f"{speed}/")
LOGGER.debug(f"Current save_path: {save_path}")
for i, file in enumerate(files):
if dataset == "pdtsc":
pdtsc = PDTSCLoader([file[0]], [file[1]], bigrams, repeated)
labels = pdtsc.transcripts_to_labels() # list of lists of 1D numpy arrays
labels = labels[0] # flatten label list
audio_list, fs = pdtsc.load_audio()
audio = audio_list[0]
fs = fs[0]
LOGGER.debug(f"Loaded PDTSC with fs {fs} from:\n \t audio_path: {file[0]}\n \t transcript_path: {file[1]}")
elif dataset == "oral":
oral = OralLoader([file[0]], [file[1]], bigrams, repeated)
label_dict = oral.transcripts_to_labels(label_max_duration) # Dict['file_name':Tuple[sents_list, starts_list, ends_list]]
audio_dict, fs_dict = oral.load_audio() # Dicts['file_name']
labels = label_dict[file_names[i]]
audio = audio_dict[file_names[i]]
fs = fs_dict[file_names[i]]
LOGGER.debug(f"Loaded ORAL with fs {fs} from:\n \t audio_path: {file[0]}\n \t transcript_path: {file[1]}")
else:
raise ValueError("'dataset' argument must be either 'pdtsc' or 'oral'")
full_save_path = os.path.join(save_path, file_names[i])
LOGGER.info(f"\tApplying SoX transormation on audio from {full_save_path}")
for ii in range(len(audio)):
LOGGER.debug(f"\t\t input.shape: {audio[ii].shape}")
audio[ii] = audio_transformer(audio[ii])
LOGGER.debug(f"\t\t output.shape: {audio[ii].shape}")
LOGGER.info(f"\tApplying FeatureExtractor on audio")
feat_ext = FeatureExtractor(fs, feature_type=feature_type, energy=energy, deltas=deltas, nbanks=nbanks)
cepstra = feat_ext.transform_data(audio) # list of 2D arrays
# filter out cepstra which are containing nan values
if filter_nan:
LOGGER.info(f"\tFiltering out NaN values")
# boolean list where False marks cepstra in which there is at least one nan value present
mask_nan = [not np.isnan(cepstrum).any() for cepstrum in cepstra]
# mask out cepstra and their corresponding labels with nan values
cepstra = list(compress(cepstra, mask_nan))
labels = list(compress(labels, mask_nan))
# SAVE Cepstra to files (features)
LOGGER.info(f"\tSaving cepstra to files")
FeatureExtractor.save_cepstra(cepstra, full_save_path, exist_ok=True)
LOGGER.debug(f"\t\tfull_save_path: {full_save_path}")
# SAVE Transcripts to files (labels)
LOGGER.info(f"\tSaving transcripts to files")
if dataset == 'pdtsc':
pdtsc.save_labels([labels], save_path, exist_ok=True)
elif dataset == 'oral':
label_dict[file_names[i]] = labels
oral.save_labels(label_dict, save_path, exist_ok=True)
else:
raise ValueError("'dataset' argument must be either 'pdtsc' or 'oral'")
LOGGER.info(f"\tChecking SAVE/LOAD consistency")
loaded_cepstra, loaded_cepstra_paths = FeatureExtractor.load_cepstra(full_save_path)
loaded_labels, loaded_label_paths = DataLoader.load_labels(full_save_path)
# flatten the lists
loaded_cepstra, loaded_cepstra_paths, loaded_labels, loaded_label_paths = (loaded_cepstra[0],
loaded_cepstra_paths[0],
loaded_labels[0],
loaded_label_paths[0])
for j in range(len(cepstra)):
if np.any(np.not_equal(cepstra[j], loaded_cepstra[j])):
raise UserWarning("Saved and loaded cepstra are not value consistent.")
if dataset == 'pdtsc':
if np.any(np.not_equal(labels[j], loaded_labels[j])):
raise UserWarning("Saved and loaded labels are not value consistent.")
elif dataset == 'oral':
if np.any(np.not_equal(labels[j][0], loaded_labels[j])):
raise UserWarning("Saved and loaded labels are not value consistent.")
# add (cepstrum_path, label_path, cepstrum_length) tuple into collective list for sorting
cepstra_length_list.append((loaded_cepstra_paths[j], loaded_label_paths[j], loaded_cepstra[j].shape[0]))
LOGGER.debug(f'files from {file_names[i]} transformed and saved into {os.path.abspath(save_path)}.')
# sort cepstra and labels by time length (number of frames)
if sort:
LOGGER.info(f"Sorting cepstra and labels by time length (number of frames)")
sort_indices = np.argsort([c[2] for c in cepstra_length_list]) # indices which sort the lists by cepstra length
cepstra_length_list = [cepstra_length_list[i] for i in sort_indices] # sort the cepstra list
num_digits = len(str(len(cepstra_length_list)))
for idx, file in enumerate(cepstra_length_list):
cepstrum_path, label_path, _ = file
os.rename(cepstrum_path, "{0}/cepstrum-{1:0{2}d}.npy".format(save_path, idx, num_digits))
os.rename(label_path, "{0}/transcript-{1:0{2}d}.npy".format(save_path, idx, num_digits))
subfolders = next(os.walk(save_path))[1]
for folder in subfolders:
try:
os.rmdir(os.path.join(save_path, folder))
except OSError:
LOGGER.warning("Folder {} is not empty! Can't delete.".format(os.path.join(save_path, folder)))
if __name__ == '__main__':
# extracting audiofiles, transforming into cepstra and saving to separate folders
debug = True
dataset = "pdtsc"
feature_type = "MFSC"
label_type = "unigram"
bigrams = True if label_type == "bigram" else False
repeated = False
energy = True
deltas = (2, 2)
nbanks = 40
filter_nan = True
sort = False
audio_folder = "b:/!DATASETS/raw_debug/audio/"
transcript_folder = "b:/!DATASETS/raw_debug/transcripts/"
save_folder = f'B:/!temp/{dataset.upper()}_{feature_type}_{label_type}_{nbanks}_banks_{"DEBUG" if debug else ""}/'
files = get_file_paths(audio_folder, transcript_folder)
prepare_data(files, save_folder, dataset=dataset, feature_type=feature_type,
bigrams=bigrams, repeated=repeated, energy=energy,
deltas=deltas, nbanks=nbanks, filter_nan=filter_nan, sort=sort)
| import os
from itertools import compress
import numpy as np
from pysndfx import AudioEffectsChain
from FeatureExtraction import FeatureExtractor
from DataOps import DataLoader, PDTSCLoader, OralLoader
from helpers import console_logger
LOGGER = console_logger(__name__, "DEBUG")
def get_file_paths(audio_folder, transcript_folder):
audio_files = [os.path.splitext(f) for f in os.listdir(audio_folder)
if os.path.isfile(os.path.join(audio_folder, f))]
transcript_files = [os.path.splitext(f) for f in os.listdir(transcript_folder)
if os.path.isfile(os.path.join(transcript_folder, f))]
files = []
for file1, file2 in zip(audio_files, transcript_files):
err_message = "{} =/= {}".format(file1[0], file2[0])
assert file1[0] == file2[0], err_message
files.append((audio_folder+file1[0]+file1[1], transcript_folder+file2[0]+file2[1]))
return files
def get_file_names(files):
return [os.path.splitext(os.path.split(file[0])[1])[0] for file in files]
def prepare_data(files, save_folder, dataset="pdtsc", label_max_duration=10.0, speeds=(0.9, 1.0, 1.1),
feature_type="MFSC", bigrams=False, repeated=False, energy=True, deltas=(0, 0),
nbanks=40, filter_nan=True, sort=True):
cepstra_length_list = []
file_names = get_file_names(files)
for speed in speeds:
LOGGER.info(f"Create audio_transormer for speed {speed}")
audio_transformer = (AudioEffectsChain().speed(speed))
save_path = os.path.join(save_folder, f"{speed}/")
LOGGER.debug(f"Current save_path: {save_path}")
for i, file in enumerate(files):
if dataset == "pdtsc":
pdtsc = PDTSCLoader([file[0]], [file[1]], bigrams, repeated)
labels = pdtsc.transcripts_to_labels() # list of lists of 1D numpy arrays
labels = labels[0] # flatten label list
audio_list, fs = pdtsc.load_audio()
audio = audio_list[0]
fs = fs[0]
LOGGER.debug(f"Loaded PDTSC with fs {fs} from:\n \t audio_path: {file[0]}\n \t transcript_path: {file[1]}")
elif dataset == "oral":
oral = OralLoader([file[0]], [file[1]], bigrams, repeated)
label_dict = oral.transcripts_to_labels(label_max_duration) # Dict['file_name':Tuple[sents_list, starts_list, ends_list]]
audio_dict, fs_dict = oral.load_audio() # Dicts['file_name']
labels = label_dict[file_names[i]]
audio = audio_dict[file_names[i]]
fs = fs_dict[file_names[i]]
LOGGER.debug(f"Loaded ORAL with fs {fs} from:\n \t audio_path: {file[0]}\n \t transcript_path: {file[1]}")
else:
raise ValueError("'dataset' argument must be either 'pdtsc' or 'oral'")
full_save_path = os.path.join(save_path, file_names[i])
LOGGER.info(f"\tApplying SoX transormation on audio from {full_save_path}")
for ii in range(len(audio)):
LOGGER.debug(f"\t\t input.shape: {audio[ii].shape}")
audio[ii] = audio_transformer(audio[ii])
LOGGER.debug(f"\t\t output.shape: {audio[ii].shape}")
LOGGER.info(f"\tApplying FeatureExtractor on audio")
feat_ext = FeatureExtractor(fs, feature_type=feature_type, energy=energy, deltas=deltas, nbanks=nbanks)
cepstra = feat_ext.transform_data(audio) # list of 2D arrays
# filter out cepstra which are containing nan values
if filter_nan:
LOGGER.info(f"\tFiltering out NaN values")
# boolean list where False marks cepstra in which there is at least one nan value present
mask_nan = [not np.isnan(cepstrum).any() for cepstrum in cepstra]
# mask out cepstra and their corresponding labels with nan values
cepstra = list(compress(cepstra, mask_nan))
labels = list(compress(labels, mask_nan))
# SAVE Cepstra to files (features)
LOGGER.info(f"\tSaving cepstra to files")
FeatureExtractor.save_cepstra(cepstra, full_save_path, exist_ok=True)
LOGGER.debug(f"\t\tfull_save_path: {full_save_path}")
# SAVE Transcripts to files (labels)
LOGGER.info(f"\tSaving transcripts to files")
if dataset == 'pdtsc':
pdtsc.save_labels([labels], save_path, exist_ok=True)
elif dataset == 'oral':
label_dict[file_names[i]] = labels
oral.save_labels(label_dict, save_path, exist_ok=True)
else:
raise ValueError("'dataset' argument must be either 'pdtsc' or 'oral'")
LOGGER.info(f"\tChecking SAVE/LOAD consistency")
loaded_cepstra, loaded_cepstra_paths = FeatureExtractor.load_cepstra(full_save_path)
loaded_labels, loaded_label_paths = DataLoader.load_labels(full_save_path)
# flatten the lists
loaded_cepstra, loaded_cepstra_paths, loaded_labels, loaded_label_paths = (loaded_cepstra[0],
loaded_cepstra_paths[0],
loaded_labels[0],
loaded_label_paths[0])
for j in range(len(cepstra)):
if np.any(np.not_equal(cepstra[j], loaded_cepstra[j])):
raise UserWarning("Saved and loaded cepstra are not value consistent.")
if dataset == 'pdtsc':
if np.any(np.not_equal(labels[j], loaded_labels[j])):
raise UserWarning("Saved and loaded labels are not value consistent.")
elif dataset == 'oral':
if np.any(np.not_equal(labels[j][0], loaded_labels[j])):
raise UserWarning("Saved and loaded labels are not value consistent.")
# add (cepstrum_path, label_path, cepstrum_length) tuple into collective list for sorting
cepstra_length_list.append((loaded_cepstra_paths[j], loaded_label_paths[j], loaded_cepstra[j].shape[0]))
LOGGER.debug(f'files from {file_names[i]} transformed and saved into {os.path.abspath(save_path)}.')
# sort cepstra and labels by time length (number of frames)
if sort:
LOGGER.info(f"Sorting cepstra and labels by time length (number of frames)")
sort_indices = np.argsort([c[2] for c in cepstra_length_list]) # indices which sort the lists by cepstra length
cepstra_length_list = [cepstra_length_list[i] for i in sort_indices] # sort the cepstra list
num_digits = len(str(len(cepstra_length_list)))
for idx, file in enumerate(cepstra_length_list):
cepstrum_path, label_path, _ = file
os.rename(cepstrum_path, "{0}/cepstrum-{1:0{2}d}.npy".format(save_path, idx, num_digits))
os.rename(label_path, "{0}/transcript-{1:0{2}d}.npy".format(save_path, idx, num_digits))
subfolders = next(os.walk(save_path))[1]
for folder in subfolders:
try:
os.rmdir(os.path.join(save_path, folder))
except OSError:
LOGGER.warning("Folder {} is not empty! Can't delete.".format(os.path.join(save_path, folder)))
if __name__ == '__main__':
# extracting audiofiles, transforming into cepstra and saving to separate folders
debug = True
dataset = "pdtsc"
feature_type = "MFSC"
label_type = "unigram"
bigrams = True if label_type == "bigram" else False
repeated = False
energy = True
deltas = (2, 2)
nbanks = 40
filter_nan = True
sort = False
audio_folder = "b:/!DATASETS/raw_debug/audio/"
transcript_folder = "b:/!DATASETS/raw_debug/transcripts/"
save_folder = f'B:/!temp/{dataset.upper()}_{feature_type}_{label_type}_{nbanks}_banks_{"DEBUG" if debug else ""}/'
files = get_file_paths(audio_folder, transcript_folder)
prepare_data(files, save_folder, dataset=dataset, feature_type=feature_type,
bigrams=bigrams, repeated=repeated, energy=energy,
deltas=deltas, nbanks=nbanks, filter_nan=filter_nan, sort=sort)
| en | 0.712687 | # list of lists of 1D numpy arrays # flatten label list # Dict['file_name':Tuple[sents_list, starts_list, ends_list]] # Dicts['file_name'] # list of 2D arrays # filter out cepstra which are containing nan values # boolean list where False marks cepstra in which there is at least one nan value present # mask out cepstra and their corresponding labels with nan values # SAVE Cepstra to files (features) # SAVE Transcripts to files (labels) # flatten the lists # add (cepstrum_path, label_path, cepstrum_length) tuple into collective list for sorting # sort cepstra and labels by time length (number of frames) # indices which sort the lists by cepstra length # sort the cepstra list # extracting audiofiles, transforming into cepstra and saving to separate folders | 2.291805 | 2 |
amazon/python/palindrome_pairs_336.py | Xiaoyu-Xing/algorithms | 0 | 6616432 | <reponame>Xiaoyu-Xing/algorithms
# First method: by using slicing
# Time complexity of this solution is O(n * w^2) n being length of the list,
# w being the average word length. It is not better or worse than O(n^2),
# if the average word length is very long this solution is very slow,
# but with very long list and every word is very short this is a much better solution.
def is_palindrome(s):
left = 0
right = len(s) - 1
while left < right:
if s[left] != s[right]:
return False
left += 1
right -= 1
return True
def palindromePairs(words):
solution = set()
words_dict = {}
for i, word in enumerate(words):
words_dict[word] = i
# print(words_dict)
for word, i in words_dict.items():
for j in range(len(word) + 1):
prefix = word[:j]
surfix = word[j:]
back_pre = prefix[::-1]
back_sur = surfix[::-1]
if is_palindrome(prefix) and back_sur in words_dict:
k = words_dict[back_sur]
if i != k:
solution.add((k, i))
if is_palindrome(surfix) and back_pre in words_dict:
k = words_dict[back_pre]
if i != k:
solution.add((i, k))
# print(solution)
return [list(i) for i in solution]
print(palindromePairs(["abcd", "dcba", "lls", "s", "sssll"]))
print(palindromePairs(["bat", "tab", "cat"]))
# Trie version: use dictionary as trie node, and special key as termination of word
# Trie: https://www.youtube.com/watch?v=AXjmTQ8LEoI
# python trie: https://fizzbuzzed.com/top-interview-questions-5/
# Not correct yet.
class TrieNode:
def __init__(self):
self.next = {}
self.end = -1
self.next_palindrom = set()
class Solution:
def palindromePairs(self, words):
root = TrieNode()
for i, word in enumerate(words):
# print(word)
trie = root
for j in range(len(word) - 1, -1, -1):
char = word[j]
if char not in trie.next:
trie.next[char] = TrieNode()
if j > 0 and self.is_palindrome(word[:j]):
trie.next_palindrom.add(i)
if j != 0:
trie = trie.next[char]
trie.end = i
# print(root.next, root.end, root.next_palindrom, root.next['a'])
solution = set()
for i, word in enumerate(words):
trie = root
if word == '':
for k, new_word in enumerate(words):
if i != k:
solution.add((i, k))
solution.add((k, i))
for j in range(len(word)):
if word[j] not in trie.next:
break
if trie.end != -1 and trie.end != i and self.is_palindrome(word[j + 1:]):
solution.add((trie.end, i))
if j == len(word) - 1 and trie.next_palindrom:
for each in trie.next_palindrom:
solution.add((i, each))
trie = trie.next[word[j]]
# print(word, solution)
return [list(i) for i in solution]
def is_palindrome(self, word):
return word == word[::-1]
palindrome_pair_builder = Solution()
print(palindrome_pair_builder.palindromePairs(
["abcd", "dcba", "lls", "s", "sssll"]))
print(palindrome_pair_builder.palindromePairs(
["bat", "tab", "cat"]))
print(palindrome_pair_builder.palindromePairs(
['a', '']))
# Other's method:
class Trie:
def __init__(self):
# letter -> next trie node.
self.paths = defaultdict(Trie)
# If a word ends at this node, then this will be a positive value
# that indicates the location of the word in the input list.
self.wordEndIndex = -1
# Stores all words that are palindromes from this node to end of word.
# e.g. if we are on a path 'a','c' and word "babca" exists in this trie
# (words are added in reverse), then "acbab"'s index will be in this
# list since "bab" is a palindrome.
self.palindromesBelow = []
# Adds a word to the trie - the word will be added in
# reverse (e.g. adding abcd adds the path d,c,b,a,$index) to the trie.
# word - string the word to be added
# index - int index of the word in the list, used as word identifier.
def addWord(self, word, index):
trie = self
for j, char in enumerate(reversed(word)):
if isPalindrome(word[0:len(word) - j]):
trie.palindromesBelow.append(index)
trie = trie.paths[char]
trie.wordEndIndex = index
def makeTrie(words):
trie = Trie()
for i, word in enumerate(words):
trie.addWord(word, i)
return trie
# Takes the trie, a word, and its index in the word array
# and returns the index of every word that could be appended
# to it to form a palindrome.
def getPalindromesForWord(trie, word, index):
# Walk trie. Every time we find a word ending,
# we need to check if we could make a palindrome.
# Once we get to the end of the word, we must check
# all endings below for palindromes (they are already
# stored in 'palindromesBelow').
output = []
while word:
if trie.wordEndIndex >= 0:
if isPalindrome(word):
output.append(trie.wordEndIndex)
if not word[0] in trie.paths:
return output
trie = trie.paths[word[0]]
word = word[1:]
if trie.wordEndIndex >= 0:
output.append(trie.wordEndIndex)
output.extend(trie.palindromesBelow)
return output
def palindromePairs(words):
trie = makeTrie(words)
output = []
for i, word in enumerate(words):
candidates = getPalindromesForWord(trie, word, i)
output.extend([[i, c] for c in candidates if i != c])
return output
| # First method: by using slicing
# Time complexity of this solution is O(n * w^2) n being length of the list,
# w being the average word length. It is not better or worse than O(n^2),
# if the average word length is very long this solution is very slow,
# but with very long list and every word is very short this is a much better solution.
def is_palindrome(s):
left = 0
right = len(s) - 1
while left < right:
if s[left] != s[right]:
return False
left += 1
right -= 1
return True
def palindromePairs(words):
solution = set()
words_dict = {}
for i, word in enumerate(words):
words_dict[word] = i
# print(words_dict)
for word, i in words_dict.items():
for j in range(len(word) + 1):
prefix = word[:j]
surfix = word[j:]
back_pre = prefix[::-1]
back_sur = surfix[::-1]
if is_palindrome(prefix) and back_sur in words_dict:
k = words_dict[back_sur]
if i != k:
solution.add((k, i))
if is_palindrome(surfix) and back_pre in words_dict:
k = words_dict[back_pre]
if i != k:
solution.add((i, k))
# print(solution)
return [list(i) for i in solution]
print(palindromePairs(["abcd", "dcba", "lls", "s", "sssll"]))
print(palindromePairs(["bat", "tab", "cat"]))
# Trie version: use dictionary as trie node, and special key as termination of word
# Trie: https://www.youtube.com/watch?v=AXjmTQ8LEoI
# python trie: https://fizzbuzzed.com/top-interview-questions-5/
# Not correct yet.
class TrieNode:
def __init__(self):
self.next = {}
self.end = -1
self.next_palindrom = set()
class Solution:
def palindromePairs(self, words):
root = TrieNode()
for i, word in enumerate(words):
# print(word)
trie = root
for j in range(len(word) - 1, -1, -1):
char = word[j]
if char not in trie.next:
trie.next[char] = TrieNode()
if j > 0 and self.is_palindrome(word[:j]):
trie.next_palindrom.add(i)
if j != 0:
trie = trie.next[char]
trie.end = i
# print(root.next, root.end, root.next_palindrom, root.next['a'])
solution = set()
for i, word in enumerate(words):
trie = root
if word == '':
for k, new_word in enumerate(words):
if i != k:
solution.add((i, k))
solution.add((k, i))
for j in range(len(word)):
if word[j] not in trie.next:
break
if trie.end != -1 and trie.end != i and self.is_palindrome(word[j + 1:]):
solution.add((trie.end, i))
if j == len(word) - 1 and trie.next_palindrom:
for each in trie.next_palindrom:
solution.add((i, each))
trie = trie.next[word[j]]
# print(word, solution)
return [list(i) for i in solution]
def is_palindrome(self, word):
return word == word[::-1]
palindrome_pair_builder = Solution()
print(palindrome_pair_builder.palindromePairs(
["abcd", "dcba", "lls", "s", "sssll"]))
print(palindrome_pair_builder.palindromePairs(
["bat", "tab", "cat"]))
print(palindrome_pair_builder.palindromePairs(
['a', '']))
# Other's method:
class Trie:
def __init__(self):
# letter -> next trie node.
self.paths = defaultdict(Trie)
# If a word ends at this node, then this will be a positive value
# that indicates the location of the word in the input list.
self.wordEndIndex = -1
# Stores all words that are palindromes from this node to end of word.
# e.g. if we are on a path 'a','c' and word "babca" exists in this trie
# (words are added in reverse), then "acbab"'s index will be in this
# list since "bab" is a palindrome.
self.palindromesBelow = []
# Adds a word to the trie - the word will be added in
# reverse (e.g. adding abcd adds the path d,c,b,a,$index) to the trie.
# word - string the word to be added
# index - int index of the word in the list, used as word identifier.
def addWord(self, word, index):
trie = self
for j, char in enumerate(reversed(word)):
if isPalindrome(word[0:len(word) - j]):
trie.palindromesBelow.append(index)
trie = trie.paths[char]
trie.wordEndIndex = index
def makeTrie(words):
trie = Trie()
for i, word in enumerate(words):
trie.addWord(word, i)
return trie
# Takes the trie, a word, and its index in the word array
# and returns the index of every word that could be appended
# to it to form a palindrome.
def getPalindromesForWord(trie, word, index):
# Walk trie. Every time we find a word ending,
# we need to check if we could make a palindrome.
# Once we get to the end of the word, we must check
# all endings below for palindromes (they are already
# stored in 'palindromesBelow').
output = []
while word:
if trie.wordEndIndex >= 0:
if isPalindrome(word):
output.append(trie.wordEndIndex)
if not word[0] in trie.paths:
return output
trie = trie.paths[word[0]]
word = word[1:]
if trie.wordEndIndex >= 0:
output.append(trie.wordEndIndex)
output.extend(trie.palindromesBelow)
return output
def palindromePairs(words):
trie = makeTrie(words)
output = []
for i, word in enumerate(words):
candidates = getPalindromesForWord(trie, word, i)
output.extend([[i, c] for c in candidates if i != c])
return output | en | 0.85856 | # First method: by using slicing # Time complexity of this solution is O(n * w^2) n being length of the list, # w being the average word length. It is not better or worse than O(n^2), # if the average word length is very long this solution is very slow, # but with very long list and every word is very short this is a much better solution. # print(words_dict) # print(solution) # Trie version: use dictionary as trie node, and special key as termination of word # Trie: https://www.youtube.com/watch?v=AXjmTQ8LEoI # python trie: https://fizzbuzzed.com/top-interview-questions-5/ # Not correct yet. # print(word) # print(root.next, root.end, root.next_palindrom, root.next['a']) # print(word, solution) # Other's method: # letter -> next trie node. # If a word ends at this node, then this will be a positive value # that indicates the location of the word in the input list. # Stores all words that are palindromes from this node to end of word. # e.g. if we are on a path 'a','c' and word "babca" exists in this trie # (words are added in reverse), then "acbab"'s index will be in this # list since "bab" is a palindrome. # Adds a word to the trie - the word will be added in # reverse (e.g. adding abcd adds the path d,c,b,a,$index) to the trie. # word - string the word to be added # index - int index of the word in the list, used as word identifier. # Takes the trie, a word, and its index in the word array # and returns the index of every word that could be appended # to it to form a palindrome. # Walk trie. Every time we find a word ending, # we need to check if we could make a palindrome. # Once we get to the end of the word, we must check # all endings below for palindromes (they are already # stored in 'palindromesBelow'). | 3.576466 | 4 |
lang_id.py | jlibovicky/asses-multilingual-bert | 1 | 6616433 | <filename>lang_id.py
#!/usr/bin/env python
# coding: utf-8
"""Train language ID with BERT."""
import argparse
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from utils import (
text_data_generator, batch_generator, get_repr_from_layer, load_bert)
logging.basicConfig(level=logging.INFO)
def lng_data_generator(path, lng2idx, epochs=1):
for _ in range(epochs):
with open(path, 'r', encoding='utf-8') as f_lang:
for line in f_lang:
lng = line.strip()
lng_id = lng2idx[lng]
yield torch.tensor(lng_id)
def get_centroids(
device, model, data, languages, labels, layer, tokenizer, mean_pool=False):
"""Get language centeroids based on labels."""
labels = torch.cat(labels).to(device)
text_repr = torch.cat([
get_repr_from_layer(model, d.to(device), layer,
tokenizer.pad_token_id, mean_pool=mean_pool)
for d in data])
centroids = torch.zeros((len(languages), text_repr.size(1)))
for i, _ in enumerate(languages):
centroids[i] = text_repr[labels == i].mean(0)
return centroids
def load_and_batch_data(txt, lng, tokenizer, lng2idx, batch_size=32, epochs=1):
text_batches = batch_generator(
text_data_generator(
txt, tokenizer, epochs=epochs, max_len=110),
size=batch_size, tokenizer=tokenizer, padding=True)
lng_batches = batch_generator(
lng_data_generator(lng, lng2idx, epochs=epochs),
size=batch_size, tokenizer=None, padding=False)
return zip(text_batches, lng_batches)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"bert_model", type=str, help="Variant of pre-trained model.")
parser.add_argument(
"layer", type=int,
help="Layer from of layer from which the representation is taken.")
parser.add_argument(
"languages", type=str,
help="File with a list of languages.")
parser.add_argument(
"train_data_txt", type=str, help="Training sentences.")
parser.add_argument(
"train_data_lng", type=str,
help="Language codes for training sentences.")
parser.add_argument(
"val_data_txt", type=str, help="Validation sentences.")
parser.add_argument(
"val_data_lng", type=str,
help="Language codes for validation sentences.")
parser.add_argument(
"test_data_txt", type=str, help="Test sentences.")
parser.add_argument(
"test_data_lng", type=str, help="Language codes for test sentences.")
parser.add_argument(
"--hidden", default=None, type=int,
help="Size of the hidden classification layer.")
parser.add_argument("--num-threads", type=int, default=4)
parser.add_argument(
"--save-model", type=str, help="Path where to save the best model.")
parser.add_argument(
"--save-centroids", type=str, help="Path to save language centroids.")
parser.add_argument(
"--test-output", type=str, default=None,
help="Output for example classification.")
parser.add_argument(
"--skip-tokenization", default=False, action="store_true",
help="Only split on spaces, skip wordpieces.")
parser.add_argument(
"--mean-pool", default=False, action="store_true",
help="If true, use mean-pooling instead of [CLS] vecotr.")
parser.add_argument(
"--center-lng", default=False, action="store_true",
help="Center languages to be around coordinate origin.")
args = parser.parse_args()
with open(args.languages) as f_lang:
languages = [line.strip() for line in f_lang]
lng2idx = {lng: i for i, lng in enumerate(languages)}
torch.set_num_threads(args.num_threads)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer, model, model_dim, _ = load_bert(
args.bert_model, device)
if args.layer < -1:
print("Layer index cannot be negative.")
exit(1)
num_layers = None
if hasattr(model.config, "num_hidden_layers"):
num_layers = model.config.num_hidden_layers
if hasattr(model.config, "n_layers"):
num_layers = model.config.n_layers
if args.layer >= num_layers:
print(f"Model only has {num_layers} layers, {args.layer} is too much.")
exit(1)
train_batches = load_and_batch_data(
args.train_data_txt, args.train_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1000)
print("Train data iterator initialized.")
centroids = None
if args.center_lng:
print("Estimating language centroids.")
with torch.no_grad():
texts, labels = [], []
for _, (txt, lab) in zip(range(100), train_batches):
texts.append(txt)
labels.append(lab)
centroids = get_centroids(
device, model, texts, languages, labels,
args.layer, tokenizer, mean_pool=args.mean_pool)
centroids = centroids.to(device)
if args.save_centroids:
torch.save(centroids.cpu(), args.save_centroids)
print("Loading validation data.")
val_batches_raw = list(load_and_batch_data(
args.val_data_txt, args.val_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1))
print("Validation data loaded in memory, pre-computing BERT.")
val_batches = []
with torch.no_grad():
for tokens, lng in val_batches_raw:
bert_features = get_repr_from_layer(
model, tokens.to(device), args.layer,
tokenizer.pad_token_id, args.mean_pool).cpu()
val_batches.append((bert_features, lng))
print("Loading test data.")
test_batches_raw = list(load_and_batch_data(
args.test_data_txt, args.test_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1))
print("Test data loaded in memory, pre-computing BERT.")
test_batches = []
with torch.no_grad():
for tokens, lng in test_batches_raw:
bert_features = get_repr_from_layer(
model, tokens.to(device), args.layer,
tokenizer.pad_token_id, args.mean_pool).cpu()
test_batches.append((bert_features, lng))
print()
test_accuracies = []
all_test_outputs = []
trained_models = []
for exp_no in range(5):
print(f"Starting experiment no {exp_no + 1}")
print(f"------------------------------------")
if args.hidden is None:
classifier = nn.Linear(model_dim, len(languages))
else:
classifier = nn.Sequential(
nn.Linear(model_dim, args.hidden),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(args.hidden, len(languages)))
classifier = classifier.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=1e-3)
def evaluate(data_batches):
classifier.eval()
with torch.no_grad():
running_val_loss = 0.
running_val_acc = 0.
val_count = 0
outputs = []
for bert_features, lng in data_batches:
bert_features, lng = (
bert_features.to(device), lng.to(device))
batch_size = bert_features.size(0)
if centroids is not None:
bert_features = bert_features - centroids[lng]
prediction = classifier(bert_features)
batch_loss = criterion(prediction, lng)
predicted_lng = prediction.max(-1)[1]
batch_accuracy = torch.sum((predicted_lng == lng).float())
running_val_loss += (
batch_size * batch_loss.cpu().numpy().tolist())
running_val_acc += batch_accuracy.cpu().numpy().tolist()
val_count += batch_size
outputs.extend(predicted_lng.cpu().numpy().tolist())
val_loss = running_val_loss / val_count
accuracy = running_val_acc / val_count
return val_loss, accuracy, outputs
best_accuracy = 0.0
no_improvement = 0
learning_rate_decreased = 0
learning_rate = 1e-3
for i, (sentences, lng) in enumerate(train_batches):
try:
classifier.train()
optimizer.zero_grad()
sentences, lng = sentences.to(device), lng.to(device)
bert_features = get_repr_from_layer(
model, sentences, args.layer, tokenizer.pad_token_id,
mean_pool=args.mean_pool)
if centroids is not None:
with torch.no_grad():
bert_features = bert_features - centroids[lng]
prediction = classifier(bert_features)
loss = criterion(prediction, lng)
loss.backward()
optimizer.step()
if i % 10 == 9:
print(f"loss: {loss.cpu().detach().numpy().tolist():5g}")
if i % 50 == 49:
print()
val_loss, accuracy, _ = evaluate(val_batches)
print("Validation: "
f"loss: {val_loss:5g}, "
f"accuracy: {accuracy:5g}")
if accuracy > best_accuracy:
best_accuracy = accuracy
no_improvement = 0
else:
no_improvement += 1
if no_improvement >= 5:
if learning_rate_decreased >= 5:
print(
"Learning rate decreased five times, ending.")
break
learning_rate /= 2
print(f"Decreasing learning rate to {learning_rate}.")
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
learning_rate_decreased += 1
no_improvement = 0
print()
except KeyboardInterrupt:
break
model.eval()
test_loss, test_accuracy, test_outputs = evaluate(test_batches)
print()
print("Testing:")
print(f"test loss: {test_loss:5g}, "
f"test accuracy: {test_accuracy:5g}")
test_accuracies.append(test_accuracy)
this_test_outputs = []
for lng_prediction in test_outputs:
this_test_outputs.append(languages[lng_prediction])
all_test_outputs.append(this_test_outputs)
trained_models.append(classifier.cpu())
print()
print("===============================================")
print("All experiments done.")
print("===============================================")
print(f"Mean test accuracy {np.mean(test_accuracies)}")
print(f"Mean test stdev {np.std(test_accuracies)}")
best_exp_id = np.argmax(test_accuracies)
print(f"Best test accuracy {max(test_accuracies)}")
if args.save_model:
torch.save(trained_models[best_exp_id], args.save_model)
if args.test_output is not None:
with open(args.test_output, 'w') as f_out:
for prediction in all_test_outputs[best_exp_id]:
print(prediction, file=f_out)
if __name__ == "__main__":
main()
| <filename>lang_id.py
#!/usr/bin/env python
# coding: utf-8
"""Train language ID with BERT."""
import argparse
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from utils import (
text_data_generator, batch_generator, get_repr_from_layer, load_bert)
logging.basicConfig(level=logging.INFO)
def lng_data_generator(path, lng2idx, epochs=1):
for _ in range(epochs):
with open(path, 'r', encoding='utf-8') as f_lang:
for line in f_lang:
lng = line.strip()
lng_id = lng2idx[lng]
yield torch.tensor(lng_id)
def get_centroids(
device, model, data, languages, labels, layer, tokenizer, mean_pool=False):
"""Get language centeroids based on labels."""
labels = torch.cat(labels).to(device)
text_repr = torch.cat([
get_repr_from_layer(model, d.to(device), layer,
tokenizer.pad_token_id, mean_pool=mean_pool)
for d in data])
centroids = torch.zeros((len(languages), text_repr.size(1)))
for i, _ in enumerate(languages):
centroids[i] = text_repr[labels == i].mean(0)
return centroids
def load_and_batch_data(txt, lng, tokenizer, lng2idx, batch_size=32, epochs=1):
text_batches = batch_generator(
text_data_generator(
txt, tokenizer, epochs=epochs, max_len=110),
size=batch_size, tokenizer=tokenizer, padding=True)
lng_batches = batch_generator(
lng_data_generator(lng, lng2idx, epochs=epochs),
size=batch_size, tokenizer=None, padding=False)
return zip(text_batches, lng_batches)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"bert_model", type=str, help="Variant of pre-trained model.")
parser.add_argument(
"layer", type=int,
help="Layer from of layer from which the representation is taken.")
parser.add_argument(
"languages", type=str,
help="File with a list of languages.")
parser.add_argument(
"train_data_txt", type=str, help="Training sentences.")
parser.add_argument(
"train_data_lng", type=str,
help="Language codes for training sentences.")
parser.add_argument(
"val_data_txt", type=str, help="Validation sentences.")
parser.add_argument(
"val_data_lng", type=str,
help="Language codes for validation sentences.")
parser.add_argument(
"test_data_txt", type=str, help="Test sentences.")
parser.add_argument(
"test_data_lng", type=str, help="Language codes for test sentences.")
parser.add_argument(
"--hidden", default=None, type=int,
help="Size of the hidden classification layer.")
parser.add_argument("--num-threads", type=int, default=4)
parser.add_argument(
"--save-model", type=str, help="Path where to save the best model.")
parser.add_argument(
"--save-centroids", type=str, help="Path to save language centroids.")
parser.add_argument(
"--test-output", type=str, default=None,
help="Output for example classification.")
parser.add_argument(
"--skip-tokenization", default=False, action="store_true",
help="Only split on spaces, skip wordpieces.")
parser.add_argument(
"--mean-pool", default=False, action="store_true",
help="If true, use mean-pooling instead of [CLS] vecotr.")
parser.add_argument(
"--center-lng", default=False, action="store_true",
help="Center languages to be around coordinate origin.")
args = parser.parse_args()
with open(args.languages) as f_lang:
languages = [line.strip() for line in f_lang]
lng2idx = {lng: i for i, lng in enumerate(languages)}
torch.set_num_threads(args.num_threads)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer, model, model_dim, _ = load_bert(
args.bert_model, device)
if args.layer < -1:
print("Layer index cannot be negative.")
exit(1)
num_layers = None
if hasattr(model.config, "num_hidden_layers"):
num_layers = model.config.num_hidden_layers
if hasattr(model.config, "n_layers"):
num_layers = model.config.n_layers
if args.layer >= num_layers:
print(f"Model only has {num_layers} layers, {args.layer} is too much.")
exit(1)
train_batches = load_and_batch_data(
args.train_data_txt, args.train_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1000)
print("Train data iterator initialized.")
centroids = None
if args.center_lng:
print("Estimating language centroids.")
with torch.no_grad():
texts, labels = [], []
for _, (txt, lab) in zip(range(100), train_batches):
texts.append(txt)
labels.append(lab)
centroids = get_centroids(
device, model, texts, languages, labels,
args.layer, tokenizer, mean_pool=args.mean_pool)
centroids = centroids.to(device)
if args.save_centroids:
torch.save(centroids.cpu(), args.save_centroids)
print("Loading validation data.")
val_batches_raw = list(load_and_batch_data(
args.val_data_txt, args.val_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1))
print("Validation data loaded in memory, pre-computing BERT.")
val_batches = []
with torch.no_grad():
for tokens, lng in val_batches_raw:
bert_features = get_repr_from_layer(
model, tokens.to(device), args.layer,
tokenizer.pad_token_id, args.mean_pool).cpu()
val_batches.append((bert_features, lng))
print("Loading test data.")
test_batches_raw = list(load_and_batch_data(
args.test_data_txt, args.test_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1))
print("Test data loaded in memory, pre-computing BERT.")
test_batches = []
with torch.no_grad():
for tokens, lng in test_batches_raw:
bert_features = get_repr_from_layer(
model, tokens.to(device), args.layer,
tokenizer.pad_token_id, args.mean_pool).cpu()
test_batches.append((bert_features, lng))
print()
test_accuracies = []
all_test_outputs = []
trained_models = []
for exp_no in range(5):
print(f"Starting experiment no {exp_no + 1}")
print(f"------------------------------------")
if args.hidden is None:
classifier = nn.Linear(model_dim, len(languages))
else:
classifier = nn.Sequential(
nn.Linear(model_dim, args.hidden),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(args.hidden, len(languages)))
classifier = classifier.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=1e-3)
def evaluate(data_batches):
classifier.eval()
with torch.no_grad():
running_val_loss = 0.
running_val_acc = 0.
val_count = 0
outputs = []
for bert_features, lng in data_batches:
bert_features, lng = (
bert_features.to(device), lng.to(device))
batch_size = bert_features.size(0)
if centroids is not None:
bert_features = bert_features - centroids[lng]
prediction = classifier(bert_features)
batch_loss = criterion(prediction, lng)
predicted_lng = prediction.max(-1)[1]
batch_accuracy = torch.sum((predicted_lng == lng).float())
running_val_loss += (
batch_size * batch_loss.cpu().numpy().tolist())
running_val_acc += batch_accuracy.cpu().numpy().tolist()
val_count += batch_size
outputs.extend(predicted_lng.cpu().numpy().tolist())
val_loss = running_val_loss / val_count
accuracy = running_val_acc / val_count
return val_loss, accuracy, outputs
best_accuracy = 0.0
no_improvement = 0
learning_rate_decreased = 0
learning_rate = 1e-3
for i, (sentences, lng) in enumerate(train_batches):
try:
classifier.train()
optimizer.zero_grad()
sentences, lng = sentences.to(device), lng.to(device)
bert_features = get_repr_from_layer(
model, sentences, args.layer, tokenizer.pad_token_id,
mean_pool=args.mean_pool)
if centroids is not None:
with torch.no_grad():
bert_features = bert_features - centroids[lng]
prediction = classifier(bert_features)
loss = criterion(prediction, lng)
loss.backward()
optimizer.step()
if i % 10 == 9:
print(f"loss: {loss.cpu().detach().numpy().tolist():5g}")
if i % 50 == 49:
print()
val_loss, accuracy, _ = evaluate(val_batches)
print("Validation: "
f"loss: {val_loss:5g}, "
f"accuracy: {accuracy:5g}")
if accuracy > best_accuracy:
best_accuracy = accuracy
no_improvement = 0
else:
no_improvement += 1
if no_improvement >= 5:
if learning_rate_decreased >= 5:
print(
"Learning rate decreased five times, ending.")
break
learning_rate /= 2
print(f"Decreasing learning rate to {learning_rate}.")
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
learning_rate_decreased += 1
no_improvement = 0
print()
except KeyboardInterrupt:
break
model.eval()
test_loss, test_accuracy, test_outputs = evaluate(test_batches)
print()
print("Testing:")
print(f"test loss: {test_loss:5g}, "
f"test accuracy: {test_accuracy:5g}")
test_accuracies.append(test_accuracy)
this_test_outputs = []
for lng_prediction in test_outputs:
this_test_outputs.append(languages[lng_prediction])
all_test_outputs.append(this_test_outputs)
trained_models.append(classifier.cpu())
print()
print("===============================================")
print("All experiments done.")
print("===============================================")
print(f"Mean test accuracy {np.mean(test_accuracies)}")
print(f"Mean test stdev {np.std(test_accuracies)}")
best_exp_id = np.argmax(test_accuracies)
print(f"Best test accuracy {max(test_accuracies)}")
if args.save_model:
torch.save(trained_models[best_exp_id], args.save_model)
if args.test_output is not None:
with open(args.test_output, 'w') as f_out:
for prediction in all_test_outputs[best_exp_id]:
print(prediction, file=f_out)
if __name__ == "__main__":
main()
| en | 0.762865 | #!/usr/bin/env python # coding: utf-8 Train language ID with BERT. Get language centeroids based on labels. | 2.646733 | 3 |
en/047/python/main.py | franciscogomes2020/exercises | 0 | 6616434 | <gh_stars>0
# Create a program that displays on screen all the even numbers that are in the range between 1 and 50.
| # Create a program that displays on screen all the even numbers that are in the range between 1 and 50. | en | 0.960951 | # Create a program that displays on screen all the even numbers that are in the range between 1 and 50. | 2.375157 | 2 |
tests/test_dijkstra.py | Luke-Poeppel/decitala | 6 | 6616435 | import os
import numpy as np
import pytest
from decitala.fragment import GreekFoot
from decitala.hash_table import GreekFootHashTable, DecitalaHashTable
from decitala.search import rolling_hash_search, path_finder
from decitala.path_finding import dijkstra, path_finding_utils
here = os.path.abspath(os.path.dirname(__file__))
s1_fp = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_1.xml"
s3_fp = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_3.xml"
s4_fp = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_4.xml"
bach_fp = os.path.dirname(here) + "/tests/static/bwv67.7.mxl"
@pytest.fixture
def s1_fragments():
return rolling_hash_search(
filepath=s1_fp,
part_num=0,
table=GreekFootHashTable(),
allow_subdivision=False
)
@pytest.fixture
def s3_fragments():
return rolling_hash_search(
filepath=s3_fp,
part_num=0,
table=GreekFootHashTable()
)
@pytest.fixture
def s4_fragments():
return rolling_hash_search(
filepath=s4_fp,
part_num=0,
table=GreekFootHashTable()
)
def test_dijkstra_path_1(s1_fragments):
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(
data=s1_fragments,
cost_function_class=path_finding_utils.CostFunction3D(0.8, 0.1, 0.1)
)
best_path = dijkstra.generate_path(
best_pred,
source,
target
)
path_frags = sorted([x for x in s1_fragments if x.id_ in best_path], key=lambda x: x.onset_range[0])
expected_fragments = [
GreekFoot("Peon_IV"),
GreekFoot("Iamb"),
GreekFoot("Peon_IV"),
GreekFoot("Iamb"),
GreekFoot("Peon_IV"),
]
expected_onset_ranges = [
(0.0, 0.625),
(0.875, 1.25),
(1.25, 1.875),
(1.875, 2.375),
(2.375, 3.0)
]
assert set(x.fragment for x in path_frags) == set(expected_fragments)
assert [x.onset_range for x in path_frags] == expected_onset_ranges
def test_dijkstra_path_2(s3_fragments):
expected_fragments = [GreekFoot("Anapest"), GreekFoot("Choriamb")]
expected_onset_ranges = [(0.0, 0.5), (0.5, 1.25)]
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(data=s3_fragments)
best_path = dijkstra.generate_path(
best_pred,
source,
target
)
path_frags = sorted([x for x in s3_fragments if x.id_ in best_path], key=lambda x: x.onset_range[0])
assert [x.onset_range for x in path_frags] == expected_onset_ranges
assert set(x.fragment for x in path_frags) == set(expected_fragments)
def test_dijkstra_path_3(s4_fragments):
expected_fragment = GreekFoot("Peon_IV")
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(data=s4_fragments)
best_path = dijkstra.generate_path(
best_pred,
source,
target
)
path_frags = sorted([x for x in s4_fragments if x.id_ in best_path], key=lambda x: x.onset_range[0])
assert len(path_frags) == 1
assert path_frags[0].fragment == expected_fragment
def test_dijkstra_best_source_and_sink():
exact_bach_frags = rolling_hash_search(
filepath=bach_fp,
part_num=0,
table=DecitalaHashTable(exact=True),
)
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(data=exact_bach_frags)
assert source == target | import os
import numpy as np
import pytest
from decitala.fragment import GreekFoot
from decitala.hash_table import GreekFootHashTable, DecitalaHashTable
from decitala.search import rolling_hash_search, path_finder
from decitala.path_finding import dijkstra, path_finding_utils
here = os.path.abspath(os.path.dirname(__file__))
s1_fp = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_1.xml"
s3_fp = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_3.xml"
s4_fp = os.path.dirname(here) + "/tests/static/Shuffled_Transcription_4.xml"
bach_fp = os.path.dirname(here) + "/tests/static/bwv67.7.mxl"
@pytest.fixture
def s1_fragments():
return rolling_hash_search(
filepath=s1_fp,
part_num=0,
table=GreekFootHashTable(),
allow_subdivision=False
)
@pytest.fixture
def s3_fragments():
return rolling_hash_search(
filepath=s3_fp,
part_num=0,
table=GreekFootHashTable()
)
@pytest.fixture
def s4_fragments():
return rolling_hash_search(
filepath=s4_fp,
part_num=0,
table=GreekFootHashTable()
)
def test_dijkstra_path_1(s1_fragments):
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(
data=s1_fragments,
cost_function_class=path_finding_utils.CostFunction3D(0.8, 0.1, 0.1)
)
best_path = dijkstra.generate_path(
best_pred,
source,
target
)
path_frags = sorted([x for x in s1_fragments if x.id_ in best_path], key=lambda x: x.onset_range[0])
expected_fragments = [
GreekFoot("Peon_IV"),
GreekFoot("Iamb"),
GreekFoot("Peon_IV"),
GreekFoot("Iamb"),
GreekFoot("Peon_IV"),
]
expected_onset_ranges = [
(0.0, 0.625),
(0.875, 1.25),
(1.25, 1.875),
(1.875, 2.375),
(2.375, 3.0)
]
assert set(x.fragment for x in path_frags) == set(expected_fragments)
assert [x.onset_range for x in path_frags] == expected_onset_ranges
def test_dijkstra_path_2(s3_fragments):
expected_fragments = [GreekFoot("Anapest"), GreekFoot("Choriamb")]
expected_onset_ranges = [(0.0, 0.5), (0.5, 1.25)]
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(data=s3_fragments)
best_path = dijkstra.generate_path(
best_pred,
source,
target
)
path_frags = sorted([x for x in s3_fragments if x.id_ in best_path], key=lambda x: x.onset_range[0])
assert [x.onset_range for x in path_frags] == expected_onset_ranges
assert set(x.fragment for x in path_frags) == set(expected_fragments)
def test_dijkstra_path_3(s4_fragments):
expected_fragment = GreekFoot("Peon_IV")
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(data=s4_fragments)
best_path = dijkstra.generate_path(
best_pred,
source,
target
)
path_frags = sorted([x for x in s4_fragments if x.id_ in best_path], key=lambda x: x.onset_range[0])
assert len(path_frags) == 1
assert path_frags[0].fragment == expected_fragment
def test_dijkstra_best_source_and_sink():
exact_bach_frags = rolling_hash_search(
filepath=bach_fp,
part_num=0,
table=DecitalaHashTable(exact=True),
)
source, target, best_pred = dijkstra.dijkstra_best_source_and_sink(data=exact_bach_frags)
assert source == target | none | 1 | 2.030968 | 2 | |
tests/modules/test_caffeine.py | kunalshetye/bumblebee-status | 5 | 6616436 | <reponame>kunalshetye/bumblebee-status<filename>tests/modules/test_caffeine.py
# pylint: disable=C0103,C0111
import json
import unittest
import mock
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.caffeine import Module
class TestCaffeineModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self.xset_active = " timeout: 0 cycle: 123"
self.xset_inactive = " timeout: 600 cycle: 123"
def tearDown(self):
mocks.teardown_test(self)
def test_text(self):
self.assertEquals(self.module.caffeine(self.anyWidget), "")
def test_active(self):
self.popen.mock.communicate.return_value = (self.xset_active, None)
self.assertTrue(not "deactivated" in self.module.state(self.anyWidget))
self.assertTrue("activated" in self.module.state(self.anyWidget))
def test_inactive(self):
self.popen.mock.communicate.return_value = (self.xset_inactive, None)
self.assertTrue("deactivated" in self.module.state(self.anyWidget))
self.popen.mock.communicate.return_value = ("no text", None)
self.assertTrue("deactivated" in self.module.state(self.anyWidget))
def test_toggle(self):
self.popen.mock.communicate.return_value = (self.xset_active, None)
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("xset s default")
self.popen.assert_call("notify-send \"Out of coffee\"")
self.popen.mock.communicate.return_value = (self.xset_inactive, None)
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("xset s off")
self.popen.assert_call("notify-send \"Consuming caffeine\"")
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| # pylint: disable=C0103,C0111
import json
import unittest
import mock
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.caffeine import Module
class TestCaffeineModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self.xset_active = " timeout: 0 cycle: 123"
self.xset_inactive = " timeout: 600 cycle: 123"
def tearDown(self):
mocks.teardown_test(self)
def test_text(self):
self.assertEquals(self.module.caffeine(self.anyWidget), "")
def test_active(self):
self.popen.mock.communicate.return_value = (self.xset_active, None)
self.assertTrue(not "deactivated" in self.module.state(self.anyWidget))
self.assertTrue("activated" in self.module.state(self.anyWidget))
def test_inactive(self):
self.popen.mock.communicate.return_value = (self.xset_inactive, None)
self.assertTrue("deactivated" in self.module.state(self.anyWidget))
self.popen.mock.communicate.return_value = ("no text", None)
self.assertTrue("deactivated" in self.module.state(self.anyWidget))
def test_toggle(self):
self.popen.mock.communicate.return_value = (self.xset_active, None)
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("xset s default")
self.popen.assert_call("notify-send \"Out of coffee\"")
self.popen.mock.communicate.return_value = (self.xset_inactive, None)
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("xset s off")
self.popen.assert_call("notify-send \"Consuming caffeine\"")
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 | de | 0.176571 | # pylint: disable=C0103,C0111 # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 | 2.281999 | 2 |
tbl-maker/tmp/thread.py | robdelacruz/boneyard | 0 | 6616437 | <reponame>robdelacruz/boneyard
import threading
n_times = 0
def on_timer():
global n_times
print("on_timer()")
n_times += 1
if n_times >= 3:
return
t = threading.Timer(2.0, on_timer)
t.start()
t = threading.Timer(2.0, on_timer)
t.start()
| import threading
n_times = 0
def on_timer():
global n_times
print("on_timer()")
n_times += 1
if n_times >= 3:
return
t = threading.Timer(2.0, on_timer)
t.start()
t = threading.Timer(2.0, on_timer)
t.start() | none | 1 | 3.611065 | 4 | |
CellProfiler/tests/modules/test_classifyobjects.py | aidotse/Team-rahma.ai | 0 | 6616438 | import numpy
import cellprofiler_core.image
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT, COLTYPE_INTEGER
import cellprofiler.modules.classifyobjects
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
OBJECTS_NAME = "myobjects"
MEASUREMENT_NAME_1 = "Measurement1"
MEASUREMENT_NAME_2 = "Measurement2"
IMAGE_NAME = "image"
def make_workspace(labels, contrast_choice, measurement1=None, measurement2=None):
object_set = cellprofiler_core.object.ObjectSet()
objects = cellprofiler_core.object.Objects()
objects.segmented = labels
object_set.add_objects(objects, OBJECTS_NAME)
measurements = cellprofiler_core.measurement.Measurements()
module = cellprofiler.modules.classifyobjects.ClassifyObjects()
m_names = []
if measurement1 is not None:
measurements.add_measurement(OBJECTS_NAME, MEASUREMENT_NAME_1, measurement1)
m_names.append(MEASUREMENT_NAME_1)
if measurement2 is not None:
measurements.add_measurement(OBJECTS_NAME, MEASUREMENT_NAME_2, measurement2)
module.add_single_measurement()
m_names.append(MEASUREMENT_NAME_2)
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
module.contrast_choice.value = contrast_choice
if (
module.contrast_choice
== cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT
):
for i, m in enumerate(m_names):
group = module.single_measurements[i]
group.object_name.value = OBJECTS_NAME
group.measurement.value = m
group.image_name.value = IMAGE_NAME
else:
module.object_name.value = OBJECTS_NAME
module.image_name.value = IMAGE_NAME
module.first_measurement.value = MEASUREMENT_NAME_1
module.second_measurement.value = MEASUREMENT_NAME_2
module.set_module_num(1)
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
workspace = cellprofiler_core.workspace.Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
return workspace, module
def test_classify_single_none():
"""Make sure the single measurement mode can handle no objects"""
workspace, module = make_workspace(
numpy.zeros((10, 10), int),
cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT,
numpy.zeros((0,), float),
)
module.run(workspace)
for m_name in (
"Classify_Measurement1_Bin_1",
"Classify_Measurement1_Bin_2",
"Classify_Measurement1_Bin_3",
):
m = workspace.measurements.get_current_measurement(OBJECTS_NAME, m_name)
assert len(m) == 0
def test_classify_single_even():
m = numpy.array((0.5, 0, 1, 0.1))
labels = numpy.zeros((20, 10), int)
labels[2:5, 3:7] = 1
labels[12:15, 1:4] = 2
labels[6:11, 5:9] = 3
labels[16:19, 5:9] = 4
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT, m
)
module.single_measurements[
0
].bin_choice.value = cellprofiler.modules.classifyobjects.BC_EVEN
module.single_measurements[0].low_threshold.value = 0.2
module.single_measurements[0].high_threshold.value = 0.7
module.single_measurements[0].bin_count.value = 1
module.single_measurements[0].wants_low_bin.value = True
module.single_measurements[0].wants_high_bin.value = True
module.single_measurements[0].wants_images.value = True
expected_obj = dict(
Classify_Measurement1_Bin_1=(0, 1, 0, 1),
Classify_Measurement1_Bin_2=(1, 0, 0, 0),
Classify_Measurement1_Bin_3=(0, 0, 1, 0),
)
expected_img = dict(
Classify_Measurement1_Bin_1_NumObjectsPerBin=2,
Classify_Measurement1_Bin_2_NumObjectsPerBin=1,
Classify_Measurement1_Bin_3_NumObjectsPerBin=1,
Classify_Measurement1_Bin_1_PctObjectsPerBin=50.0,
Classify_Measurement1_Bin_2_PctObjectsPerBin=25.0,
Classify_Measurement1_Bin_3_PctObjectsPerBin=25.0,
)
module.run(workspace)
for measurement, expected_values in list(expected_obj.items()):
values = workspace.measurements.get_current_measurement(
OBJECTS_NAME, measurement
)
assert len(values) == 4
assert numpy.all(values == numpy.array(expected_values))
for measurement, expected_values in list(expected_img.items()):
values = workspace.measurements.get_current_measurement(
"Image", measurement
)
assert values == expected_values
image = workspace.image_set.get_image(IMAGE_NAME)
pixel_data = image.pixel_data
assert numpy.all(pixel_data[labels == 0, :] == 0)
colors = [pixel_data[x, y, :] for x, y in ((2, 3), (12, 1), (6, 5))]
for i, color in enumerate(colors + [colors[1]]):
assert numpy.all(pixel_data[labels == i + 1, :] == color)
columns = module.get_measurement_columns(None)
assert len(columns) == 9
assert len(set([column[1] for column in columns])) == 9 # no duplicates
for column in columns:
if column[0] != OBJECTS_NAME: # Must be image
assert column[0] == "Image"
assert column[1] in list(expected_img.keys())
assert (
column[2] == COLTYPE_INTEGER
if column[1].endswith(
cellprofiler.modules.classifyobjects.F_NUM_PER_BIN
)
else COLTYPE_FLOAT
)
else:
assert column[0] == OBJECTS_NAME
assert column[1] in list(expected_obj.keys())
assert column[2] == COLTYPE_INTEGER
categories = module.get_categories(None, "Image")
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, "Image", "foo")
assert len(names) == 0
categories = module.get_categories(None, OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, OBJECTS_NAME, "foo")
assert len(names) == 0
names = module.get_measurements(
None, "foo", cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 0
names = module.get_measurements(
None, OBJECTS_NAME, cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 3
assert len(set(names)) == 3
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_obj.keys())
for name in names
]
)
names = module.get_measurements(
None,
"Image",
cellprofiler.modules.classifyobjects.M_CATEGORY,
)
assert len(names) == 6
assert len(set(names)) == 6
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_img.keys())
for name in names
]
)
def test_classify_single_custom():
m = numpy.array((0.5, 0, 1, 0.1))
labels = numpy.zeros((20, 10), int)
labels[2:5, 3:7] = 1
labels[12:15, 1:4] = 2
labels[6:11, 5:9] = 3
labels[16:19, 5:9] = 4
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT, m
)
module.single_measurements[
0
].bin_choice.value = cellprofiler.modules.classifyobjects.BC_CUSTOM
module.single_measurements[0].custom_thresholds.value = ".2,.7"
module.single_measurements[0].bin_count.value = 14 # should ignore
module.single_measurements[0].wants_custom_names.value = True
module.single_measurements[0].wants_low_bin.value = True
module.single_measurements[0].wants_high_bin.value = True
module.single_measurements[0].bin_names.value = "Three,Blind,Mice"
module.single_measurements[0].wants_images.value = True
expected_img = dict(
Classify_Three_NumObjectsPerBin=2,
Classify_Three_PctObjectsPerBin=50.0,
Classify_Blind_NumObjectsPerBin=1,
Classify_Blind_PctObjectsPerBin=25.0,
Classify_Mice_NumObjectsPerBin=1,
Classify_Mice_PctObjectsPerBin=25.0,
)
expected_obj = dict(
Classify_Three=(0, 1, 0, 1),
Classify_Blind=(1, 0, 0, 0),
Classify_Mice=(0, 0, 1, 0),
)
module.run(workspace)
for measurement, expected_values in list(expected_obj.items()):
values = workspace.measurements.get_current_measurement(
OBJECTS_NAME, measurement
)
assert len(values) == 4
assert numpy.all(values == numpy.array(expected_values))
for measurement, expected_values in list(expected_img.items()):
values = workspace.measurements.get_current_measurement(
"Image", measurement
)
assert values == expected_values
image = workspace.image_set.get_image(IMAGE_NAME)
pixel_data = image.pixel_data
assert numpy.all(pixel_data[labels == 0, :] == 0)
colors = [pixel_data[x, y, :] for x, y in ((2, 3), (12, 1), (6, 5))]
for i, color in enumerate(colors + [colors[1]]):
assert numpy.all(pixel_data[labels == i + 1, :] == color)
columns = module.get_measurement_columns(None)
assert len(columns) == 9
assert len(set([column[1] for column in columns])) == 9 # no duplicates
for column in columns:
if column[0] != OBJECTS_NAME: # Must be image
assert column[0] == "Image"
assert column[1] in list(expected_img.keys())
assert (
column[2] == COLTYPE_INTEGER
if column[1].endswith(
cellprofiler.modules.classifyobjects.F_NUM_PER_BIN
)
else COLTYPE_FLOAT
)
else:
assert column[0] == OBJECTS_NAME
assert column[1] in list(expected_obj.keys())
assert column[2] == COLTYPE_INTEGER
categories = module.get_categories(None, "Image")
assert len(categories) == 1
categories = module.get_categories(None, OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, OBJECTS_NAME, "foo")
assert len(names) == 0
names = module.get_measurements(
None, "foo", cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 0
names = module.get_measurements(
None, OBJECTS_NAME, cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 3
assert len(set(names)) == 3
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_obj.keys())
for name in names
]
)
names = module.get_measurements(
None,
"Image",
cellprofiler.modules.classifyobjects.M_CATEGORY,
)
assert len(names) == 6
assert len(set(names)) == 6
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_img.keys())
for name in names
]
)
def test_last_is_nan():
# regression test for issue #1553
#
# Test that classify objects classifies an object whose measurement
# is NaN as none of the categories. Test for no exception thrown
# if showing the figure and last object has a measurement of NaN
#
for leave_last_out in (False, True):
m = numpy.array((0.5, 0, 1, numpy.NaN))
if leave_last_out:
m = m[:-1]
labels = numpy.zeros((20, 10), int)
labels[2:5, 3:7] = 1
labels[12:15, 1:4] = 2
labels[6:11, 5:9] = 3
labels[16:19, 5:9] = 4
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT, m
)
module.single_measurements[
0
].bin_choice.value = cellprofiler.modules.classifyobjects.BC_CUSTOM
module.single_measurements[0].custom_thresholds.value = ".2,.7"
module.single_measurements[0].bin_count.value = 14 # should ignore
module.single_measurements[0].wants_custom_names.value = True
module.single_measurements[0].wants_low_bin.value = True
module.single_measurements[0].wants_high_bin.value = True
module.single_measurements[0].bin_names.value = "Three,Blind,Mice"
module.single_measurements[0].wants_images.value = True
expected_img = dict(
Classify_Three_NumObjectsPerBin=1,
Classify_Three_PctObjectsPerBin=25.0,
Classify_Blind_NumObjectsPerBin=1,
Classify_Blind_PctObjectsPerBin=25.0,
Classify_Mice_NumObjectsPerBin=1,
Classify_Mice_PctObjectsPerBin=25.0,
)
expected_obj = dict(
Classify_Three=(0, 1, 0, 0),
Classify_Blind=(1, 0, 0, 0),
Classify_Mice=(0, 0, 1, 0),
)
module.run(workspace)
for measurement, expected_values in list(expected_obj.items()):
values = workspace.measurements.get_current_measurement(
OBJECTS_NAME, measurement
)
assert len(values) == 4
assert numpy.all(values == numpy.array(expected_values))
for measurement, expected_values in list(expected_img.items()):
values = workspace.measurements.get_current_measurement(
"Image", measurement
)
assert values == expected_values
image = workspace.image_set.get_image(IMAGE_NAME)
pixel_data = image.pixel_data
assert numpy.all(pixel_data[labels == 0, :] == 0)
colors = [pixel_data[x, y, :] for x, y in ((2, 3), (12, 1), (6, 5), (16, 5))]
for i, color in enumerate(colors + [colors[1]]):
assert numpy.all(pixel_data[labels == i + 1, :] == color)
def test_two_none():
workspace, module = make_workspace(
numpy.zeros((10, 10), int),
cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS,
numpy.zeros((0,), float),
numpy.zeros((0,), float),
)
module.run(workspace)
for lh1 in ("low", "high"):
for lh2 in ("low", "high"):
m_name = "Classify_Measurement1_%s_Measurement2_%s" % (lh1, lh2)
m = workspace.measurements.get_current_measurement(OBJECTS_NAME, m_name)
assert len(m) == 0
def test_two():
numpy.random.seed(0)
labels = numpy.zeros((10, 20), int)
index = 1
for i_min, i_max in ((1, 4), (6, 9)):
for j_min, j_max in ((2, 6), (8, 11), (13, 18)):
labels[i_min:i_max, j_min:j_max] = index
index += 1
num_labels = index - 1
exps = numpy.exp(numpy.arange(numpy.max(labels)))
m1 = numpy.random.permutation(exps)
m2 = numpy.random.permutation(exps)
for wants_custom_names in (False, True):
for tm1 in (
cellprofiler.modules.classifyobjects.TM_MEAN,
cellprofiler.modules.classifyobjects.TM_MEDIAN,
cellprofiler.modules.classifyobjects.TM_CUSTOM,
):
for tm2 in (
cellprofiler.modules.classifyobjects.TM_MEAN,
cellprofiler.modules.classifyobjects.TM_MEDIAN,
cellprofiler.modules.classifyobjects.TM_CUSTOM,
):
workspace, module = make_workspace(
labels,
cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS,
m1,
m2,
)
assert isinstance(
module, cellprofiler.modules.classifyobjects.ClassifyObjects
)
module.first_threshold_method.value = tm1
module.first_threshold.value = 8
module.second_threshold_method.value = tm2
module.second_threshold.value = 70
module.wants_image.value = True
def cutoff(method, custom_cutoff):
if method == cellprofiler.modules.classifyobjects.TM_MEAN:
return numpy.mean(exps)
elif method == cellprofiler.modules.classifyobjects.TM_MEDIAN:
return numpy.median(exps)
else:
return custom_cutoff
c1 = cutoff(tm1, module.first_threshold.value)
c2 = cutoff(tm2, module.second_threshold.value)
m1_over = m1 >= c1
m2_over = m2 >= c2
if wants_custom_names:
f_names = ("TL", "TR", "BL", "BR")
module.wants_custom_names.value = True
module.low_low_custom_name.value = f_names[0]
module.low_high_custom_name.value = f_names[1]
module.high_low_custom_name.value = f_names[2]
module.high_high_custom_name.value = f_names[3]
else:
f_names = (
"Measurement1_low_Measurement2_low",
"Measurement1_low_Measurement2_high",
"Measurement1_high_Measurement2_low",
"Measurement1_high_Measurement2_high",
)
m_names = [
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
for name in f_names
]
module.run(workspace)
columns = module.get_measurement_columns(None)
for column in columns:
if column[0] != OBJECTS_NAME: # Must be image
assert column[0] == "Image"
assert (
column[2] == COLTYPE_INTEGER
if column[1].endswith(
cellprofiler.modules.classifyobjects.F_NUM_PER_BIN
)
else COLTYPE_FLOAT
)
else:
assert column[0] == OBJECTS_NAME
assert (
column[2] == COLTYPE_INTEGER
)
assert len(columns) == 12
assert (
len(set([column[1] for column in columns])) == 12
) # no duplicates
categories = module.get_categories(
None, "Image"
)
assert len(categories) == 1
categories = module.get_categories(None, OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, OBJECTS_NAME, "foo")
assert len(names) == 0
names = module.get_measurements(
None, "foo", cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 0
names = module.get_measurements(
None, OBJECTS_NAME, cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 4
for m_name, expected in zip(
m_names,
(
(~m1_over) & (~m2_over),
(~m1_over) & m2_over,
m1_over & ~m2_over,
m1_over & m2_over,
),
):
m = workspace.measurements.get_current_measurement(
"Image",
"_".join(
(m_name, cellprofiler.modules.classifyobjects.F_NUM_PER_BIN)
),
)
assert m == expected.astype(int).sum()
m = workspace.measurements.get_current_measurement(
"Image",
"_".join(
(m_name, cellprofiler.modules.classifyobjects.F_PCT_PER_BIN)
),
)
assert m == 100.0 * float(expected.astype(int).sum()) / num_labels
m = workspace.measurements.get_current_measurement(
OBJECTS_NAME, m_name
)
assert numpy.all(m == expected.astype(int))
assert m_name in [column[1] for column in columns]
assert m_name in [
"_".join(
(cellprofiler.modules.classifyobjects.M_CATEGORY, name)
)
for name in names
]
image = workspace.image_set.get_image(IMAGE_NAME).pixel_data
assert numpy.all(image[labels == 0, :] == 0)
colors = image[(labels > 0) & (m[labels - 1] == 1), :]
if colors.shape[0] > 0:
assert all(
[numpy.all(colors[:, i] == colors[0, i]) for i in range(3)]
)
def test_nans():
# Test for NaN values in two measurements.
#
labels = numpy.zeros((10, 15), int)
labels[3:5, 3:5] = 1
labels[6:8, 3:5] = 3
labels[3:5, 6:8] = 4
labels[6:8, 6:8] = 5
labels[3:5, 10:12] = 2
m1 = numpy.array((1, 2, numpy.NaN, 1, numpy.NaN))
m2 = numpy.array((1, 2, 1, numpy.NaN, numpy.NaN))
for leave_last_out in (False, True):
end = numpy.max(labels) - 1 if leave_last_out else numpy.max(labels)
workspace, module = make_workspace(
labels,
cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS,
m1[:end],
m2[:end],
)
assert isinstance(module, cellprofiler.modules.classifyobjects.ClassifyObjects)
module.first_threshold_method.value = (
cellprofiler.modules.classifyobjects.TM_MEAN
)
module.first_threshold.value = 2
module.second_threshold_method.value = (
cellprofiler.modules.classifyobjects.TM_MEAN
)
module.second_threshold.value = 2
module.wants_image.value = True
module.wants_custom_names.value = False
module.run(workspace)
f_names = (
"Measurement1_low_Measurement2_low",
"Measurement1_low_Measurement2_high",
"Measurement1_high_Measurement2_low",
"Measurement1_high_Measurement2_high",
)
m_names = [
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
for name in f_names
]
m = workspace.measurements
for m_name, expected in zip(
m_names,
[
numpy.array((1, 0, 0, 0, 0)),
numpy.array((0, 0, 0, 0, 0)),
numpy.array((0, 0, 0, 0, 0)),
numpy.array((0, 1, 0, 0, 0)),
],
):
values = m[OBJECTS_NAME, m_name]
numpy.testing.assert_array_equal(values, expected)
def test_nan_offset_by_1():
# Regression test of 1636
labels = numpy.zeros((10, 15), int)
labels[3:5, 3:5] = 1
labels[6:8, 3:5] = 2
m1 = numpy.array((4, numpy.NaN))
m2 = numpy.array((4, 4))
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS, m1, m2
)
assert isinstance(module, cellprofiler.modules.classifyobjects.ClassifyObjects)
module.first_threshold_method.value = cellprofiler.modules.classifyobjects.TM_MEAN
module.first_threshold.value = 2
module.second_threshold_method.value = cellprofiler.modules.classifyobjects.TM_MEAN
module.second_threshold.value = 2
module.wants_image.value = True
module.wants_custom_names.value = False
module.run(workspace)
image = workspace.image_set.get_image(IMAGE_NAME).pixel_data
colors = module.get_colors(4)
reverse = numpy.zeros(image.shape[:2], int)
for idx, color in enumerate(colors):
reverse[numpy.all(image == color[numpy.newaxis, numpy.newaxis, :3], 2)] = idx
assert numpy.all(reverse[labels == 1] == 4)
| import numpy
import cellprofiler_core.image
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT, COLTYPE_INTEGER
import cellprofiler.modules.classifyobjects
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
OBJECTS_NAME = "myobjects"
MEASUREMENT_NAME_1 = "Measurement1"
MEASUREMENT_NAME_2 = "Measurement2"
IMAGE_NAME = "image"
def make_workspace(labels, contrast_choice, measurement1=None, measurement2=None):
object_set = cellprofiler_core.object.ObjectSet()
objects = cellprofiler_core.object.Objects()
objects.segmented = labels
object_set.add_objects(objects, OBJECTS_NAME)
measurements = cellprofiler_core.measurement.Measurements()
module = cellprofiler.modules.classifyobjects.ClassifyObjects()
m_names = []
if measurement1 is not None:
measurements.add_measurement(OBJECTS_NAME, MEASUREMENT_NAME_1, measurement1)
m_names.append(MEASUREMENT_NAME_1)
if measurement2 is not None:
measurements.add_measurement(OBJECTS_NAME, MEASUREMENT_NAME_2, measurement2)
module.add_single_measurement()
m_names.append(MEASUREMENT_NAME_2)
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
module.contrast_choice.value = contrast_choice
if (
module.contrast_choice
== cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT
):
for i, m in enumerate(m_names):
group = module.single_measurements[i]
group.object_name.value = OBJECTS_NAME
group.measurement.value = m
group.image_name.value = IMAGE_NAME
else:
module.object_name.value = OBJECTS_NAME
module.image_name.value = IMAGE_NAME
module.first_measurement.value = MEASUREMENT_NAME_1
module.second_measurement.value = MEASUREMENT_NAME_2
module.set_module_num(1)
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
workspace = cellprofiler_core.workspace.Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
return workspace, module
def test_classify_single_none():
"""Make sure the single measurement mode can handle no objects"""
workspace, module = make_workspace(
numpy.zeros((10, 10), int),
cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT,
numpy.zeros((0,), float),
)
module.run(workspace)
for m_name in (
"Classify_Measurement1_Bin_1",
"Classify_Measurement1_Bin_2",
"Classify_Measurement1_Bin_3",
):
m = workspace.measurements.get_current_measurement(OBJECTS_NAME, m_name)
assert len(m) == 0
def test_classify_single_even():
m = numpy.array((0.5, 0, 1, 0.1))
labels = numpy.zeros((20, 10), int)
labels[2:5, 3:7] = 1
labels[12:15, 1:4] = 2
labels[6:11, 5:9] = 3
labels[16:19, 5:9] = 4
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT, m
)
module.single_measurements[
0
].bin_choice.value = cellprofiler.modules.classifyobjects.BC_EVEN
module.single_measurements[0].low_threshold.value = 0.2
module.single_measurements[0].high_threshold.value = 0.7
module.single_measurements[0].bin_count.value = 1
module.single_measurements[0].wants_low_bin.value = True
module.single_measurements[0].wants_high_bin.value = True
module.single_measurements[0].wants_images.value = True
expected_obj = dict(
Classify_Measurement1_Bin_1=(0, 1, 0, 1),
Classify_Measurement1_Bin_2=(1, 0, 0, 0),
Classify_Measurement1_Bin_3=(0, 0, 1, 0),
)
expected_img = dict(
Classify_Measurement1_Bin_1_NumObjectsPerBin=2,
Classify_Measurement1_Bin_2_NumObjectsPerBin=1,
Classify_Measurement1_Bin_3_NumObjectsPerBin=1,
Classify_Measurement1_Bin_1_PctObjectsPerBin=50.0,
Classify_Measurement1_Bin_2_PctObjectsPerBin=25.0,
Classify_Measurement1_Bin_3_PctObjectsPerBin=25.0,
)
module.run(workspace)
for measurement, expected_values in list(expected_obj.items()):
values = workspace.measurements.get_current_measurement(
OBJECTS_NAME, measurement
)
assert len(values) == 4
assert numpy.all(values == numpy.array(expected_values))
for measurement, expected_values in list(expected_img.items()):
values = workspace.measurements.get_current_measurement(
"Image", measurement
)
assert values == expected_values
image = workspace.image_set.get_image(IMAGE_NAME)
pixel_data = image.pixel_data
assert numpy.all(pixel_data[labels == 0, :] == 0)
colors = [pixel_data[x, y, :] for x, y in ((2, 3), (12, 1), (6, 5))]
for i, color in enumerate(colors + [colors[1]]):
assert numpy.all(pixel_data[labels == i + 1, :] == color)
columns = module.get_measurement_columns(None)
assert len(columns) == 9
assert len(set([column[1] for column in columns])) == 9 # no duplicates
for column in columns:
if column[0] != OBJECTS_NAME: # Must be image
assert column[0] == "Image"
assert column[1] in list(expected_img.keys())
assert (
column[2] == COLTYPE_INTEGER
if column[1].endswith(
cellprofiler.modules.classifyobjects.F_NUM_PER_BIN
)
else COLTYPE_FLOAT
)
else:
assert column[0] == OBJECTS_NAME
assert column[1] in list(expected_obj.keys())
assert column[2] == COLTYPE_INTEGER
categories = module.get_categories(None, "Image")
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, "Image", "foo")
assert len(names) == 0
categories = module.get_categories(None, OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, OBJECTS_NAME, "foo")
assert len(names) == 0
names = module.get_measurements(
None, "foo", cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 0
names = module.get_measurements(
None, OBJECTS_NAME, cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 3
assert len(set(names)) == 3
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_obj.keys())
for name in names
]
)
names = module.get_measurements(
None,
"Image",
cellprofiler.modules.classifyobjects.M_CATEGORY,
)
assert len(names) == 6
assert len(set(names)) == 6
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_img.keys())
for name in names
]
)
def test_classify_single_custom():
m = numpy.array((0.5, 0, 1, 0.1))
labels = numpy.zeros((20, 10), int)
labels[2:5, 3:7] = 1
labels[12:15, 1:4] = 2
labels[6:11, 5:9] = 3
labels[16:19, 5:9] = 4
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT, m
)
module.single_measurements[
0
].bin_choice.value = cellprofiler.modules.classifyobjects.BC_CUSTOM
module.single_measurements[0].custom_thresholds.value = ".2,.7"
module.single_measurements[0].bin_count.value = 14 # should ignore
module.single_measurements[0].wants_custom_names.value = True
module.single_measurements[0].wants_low_bin.value = True
module.single_measurements[0].wants_high_bin.value = True
module.single_measurements[0].bin_names.value = "Three,Blind,Mice"
module.single_measurements[0].wants_images.value = True
expected_img = dict(
Classify_Three_NumObjectsPerBin=2,
Classify_Three_PctObjectsPerBin=50.0,
Classify_Blind_NumObjectsPerBin=1,
Classify_Blind_PctObjectsPerBin=25.0,
Classify_Mice_NumObjectsPerBin=1,
Classify_Mice_PctObjectsPerBin=25.0,
)
expected_obj = dict(
Classify_Three=(0, 1, 0, 1),
Classify_Blind=(1, 0, 0, 0),
Classify_Mice=(0, 0, 1, 0),
)
module.run(workspace)
for measurement, expected_values in list(expected_obj.items()):
values = workspace.measurements.get_current_measurement(
OBJECTS_NAME, measurement
)
assert len(values) == 4
assert numpy.all(values == numpy.array(expected_values))
for measurement, expected_values in list(expected_img.items()):
values = workspace.measurements.get_current_measurement(
"Image", measurement
)
assert values == expected_values
image = workspace.image_set.get_image(IMAGE_NAME)
pixel_data = image.pixel_data
assert numpy.all(pixel_data[labels == 0, :] == 0)
colors = [pixel_data[x, y, :] for x, y in ((2, 3), (12, 1), (6, 5))]
for i, color in enumerate(colors + [colors[1]]):
assert numpy.all(pixel_data[labels == i + 1, :] == color)
columns = module.get_measurement_columns(None)
assert len(columns) == 9
assert len(set([column[1] for column in columns])) == 9 # no duplicates
for column in columns:
if column[0] != OBJECTS_NAME: # Must be image
assert column[0] == "Image"
assert column[1] in list(expected_img.keys())
assert (
column[2] == COLTYPE_INTEGER
if column[1].endswith(
cellprofiler.modules.classifyobjects.F_NUM_PER_BIN
)
else COLTYPE_FLOAT
)
else:
assert column[0] == OBJECTS_NAME
assert column[1] in list(expected_obj.keys())
assert column[2] == COLTYPE_INTEGER
categories = module.get_categories(None, "Image")
assert len(categories) == 1
categories = module.get_categories(None, OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, OBJECTS_NAME, "foo")
assert len(names) == 0
names = module.get_measurements(
None, "foo", cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 0
names = module.get_measurements(
None, OBJECTS_NAME, cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 3
assert len(set(names)) == 3
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_obj.keys())
for name in names
]
)
names = module.get_measurements(
None,
"Image",
cellprofiler.modules.classifyobjects.M_CATEGORY,
)
assert len(names) == 6
assert len(set(names)) == 6
assert all(
[
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
in list(expected_img.keys())
for name in names
]
)
def test_last_is_nan():
# regression test for issue #1553
#
# Test that classify objects classifies an object whose measurement
# is NaN as none of the categories. Test for no exception thrown
# if showing the figure and last object has a measurement of NaN
#
for leave_last_out in (False, True):
m = numpy.array((0.5, 0, 1, numpy.NaN))
if leave_last_out:
m = m[:-1]
labels = numpy.zeros((20, 10), int)
labels[2:5, 3:7] = 1
labels[12:15, 1:4] = 2
labels[6:11, 5:9] = 3
labels[16:19, 5:9] = 4
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_SINGLE_MEASUREMENT, m
)
module.single_measurements[
0
].bin_choice.value = cellprofiler.modules.classifyobjects.BC_CUSTOM
module.single_measurements[0].custom_thresholds.value = ".2,.7"
module.single_measurements[0].bin_count.value = 14 # should ignore
module.single_measurements[0].wants_custom_names.value = True
module.single_measurements[0].wants_low_bin.value = True
module.single_measurements[0].wants_high_bin.value = True
module.single_measurements[0].bin_names.value = "Three,Blind,Mice"
module.single_measurements[0].wants_images.value = True
expected_img = dict(
Classify_Three_NumObjectsPerBin=1,
Classify_Three_PctObjectsPerBin=25.0,
Classify_Blind_NumObjectsPerBin=1,
Classify_Blind_PctObjectsPerBin=25.0,
Classify_Mice_NumObjectsPerBin=1,
Classify_Mice_PctObjectsPerBin=25.0,
)
expected_obj = dict(
Classify_Three=(0, 1, 0, 0),
Classify_Blind=(1, 0, 0, 0),
Classify_Mice=(0, 0, 1, 0),
)
module.run(workspace)
for measurement, expected_values in list(expected_obj.items()):
values = workspace.measurements.get_current_measurement(
OBJECTS_NAME, measurement
)
assert len(values) == 4
assert numpy.all(values == numpy.array(expected_values))
for measurement, expected_values in list(expected_img.items()):
values = workspace.measurements.get_current_measurement(
"Image", measurement
)
assert values == expected_values
image = workspace.image_set.get_image(IMAGE_NAME)
pixel_data = image.pixel_data
assert numpy.all(pixel_data[labels == 0, :] == 0)
colors = [pixel_data[x, y, :] for x, y in ((2, 3), (12, 1), (6, 5), (16, 5))]
for i, color in enumerate(colors + [colors[1]]):
assert numpy.all(pixel_data[labels == i + 1, :] == color)
def test_two_none():
workspace, module = make_workspace(
numpy.zeros((10, 10), int),
cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS,
numpy.zeros((0,), float),
numpy.zeros((0,), float),
)
module.run(workspace)
for lh1 in ("low", "high"):
for lh2 in ("low", "high"):
m_name = "Classify_Measurement1_%s_Measurement2_%s" % (lh1, lh2)
m = workspace.measurements.get_current_measurement(OBJECTS_NAME, m_name)
assert len(m) == 0
def test_two():
numpy.random.seed(0)
labels = numpy.zeros((10, 20), int)
index = 1
for i_min, i_max in ((1, 4), (6, 9)):
for j_min, j_max in ((2, 6), (8, 11), (13, 18)):
labels[i_min:i_max, j_min:j_max] = index
index += 1
num_labels = index - 1
exps = numpy.exp(numpy.arange(numpy.max(labels)))
m1 = numpy.random.permutation(exps)
m2 = numpy.random.permutation(exps)
for wants_custom_names in (False, True):
for tm1 in (
cellprofiler.modules.classifyobjects.TM_MEAN,
cellprofiler.modules.classifyobjects.TM_MEDIAN,
cellprofiler.modules.classifyobjects.TM_CUSTOM,
):
for tm2 in (
cellprofiler.modules.classifyobjects.TM_MEAN,
cellprofiler.modules.classifyobjects.TM_MEDIAN,
cellprofiler.modules.classifyobjects.TM_CUSTOM,
):
workspace, module = make_workspace(
labels,
cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS,
m1,
m2,
)
assert isinstance(
module, cellprofiler.modules.classifyobjects.ClassifyObjects
)
module.first_threshold_method.value = tm1
module.first_threshold.value = 8
module.second_threshold_method.value = tm2
module.second_threshold.value = 70
module.wants_image.value = True
def cutoff(method, custom_cutoff):
if method == cellprofiler.modules.classifyobjects.TM_MEAN:
return numpy.mean(exps)
elif method == cellprofiler.modules.classifyobjects.TM_MEDIAN:
return numpy.median(exps)
else:
return custom_cutoff
c1 = cutoff(tm1, module.first_threshold.value)
c2 = cutoff(tm2, module.second_threshold.value)
m1_over = m1 >= c1
m2_over = m2 >= c2
if wants_custom_names:
f_names = ("TL", "TR", "BL", "BR")
module.wants_custom_names.value = True
module.low_low_custom_name.value = f_names[0]
module.low_high_custom_name.value = f_names[1]
module.high_low_custom_name.value = f_names[2]
module.high_high_custom_name.value = f_names[3]
else:
f_names = (
"Measurement1_low_Measurement2_low",
"Measurement1_low_Measurement2_high",
"Measurement1_high_Measurement2_low",
"Measurement1_high_Measurement2_high",
)
m_names = [
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
for name in f_names
]
module.run(workspace)
columns = module.get_measurement_columns(None)
for column in columns:
if column[0] != OBJECTS_NAME: # Must be image
assert column[0] == "Image"
assert (
column[2] == COLTYPE_INTEGER
if column[1].endswith(
cellprofiler.modules.classifyobjects.F_NUM_PER_BIN
)
else COLTYPE_FLOAT
)
else:
assert column[0] == OBJECTS_NAME
assert (
column[2] == COLTYPE_INTEGER
)
assert len(columns) == 12
assert (
len(set([column[1] for column in columns])) == 12
) # no duplicates
categories = module.get_categories(
None, "Image"
)
assert len(categories) == 1
categories = module.get_categories(None, OBJECTS_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.classifyobjects.M_CATEGORY
names = module.get_measurements(None, OBJECTS_NAME, "foo")
assert len(names) == 0
names = module.get_measurements(
None, "foo", cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 0
names = module.get_measurements(
None, OBJECTS_NAME, cellprofiler.modules.classifyobjects.M_CATEGORY
)
assert len(names) == 4
for m_name, expected in zip(
m_names,
(
(~m1_over) & (~m2_over),
(~m1_over) & m2_over,
m1_over & ~m2_over,
m1_over & m2_over,
),
):
m = workspace.measurements.get_current_measurement(
"Image",
"_".join(
(m_name, cellprofiler.modules.classifyobjects.F_NUM_PER_BIN)
),
)
assert m == expected.astype(int).sum()
m = workspace.measurements.get_current_measurement(
"Image",
"_".join(
(m_name, cellprofiler.modules.classifyobjects.F_PCT_PER_BIN)
),
)
assert m == 100.0 * float(expected.astype(int).sum()) / num_labels
m = workspace.measurements.get_current_measurement(
OBJECTS_NAME, m_name
)
assert numpy.all(m == expected.astype(int))
assert m_name in [column[1] for column in columns]
assert m_name in [
"_".join(
(cellprofiler.modules.classifyobjects.M_CATEGORY, name)
)
for name in names
]
image = workspace.image_set.get_image(IMAGE_NAME).pixel_data
assert numpy.all(image[labels == 0, :] == 0)
colors = image[(labels > 0) & (m[labels - 1] == 1), :]
if colors.shape[0] > 0:
assert all(
[numpy.all(colors[:, i] == colors[0, i]) for i in range(3)]
)
def test_nans():
# Test for NaN values in two measurements.
#
labels = numpy.zeros((10, 15), int)
labels[3:5, 3:5] = 1
labels[6:8, 3:5] = 3
labels[3:5, 6:8] = 4
labels[6:8, 6:8] = 5
labels[3:5, 10:12] = 2
m1 = numpy.array((1, 2, numpy.NaN, 1, numpy.NaN))
m2 = numpy.array((1, 2, 1, numpy.NaN, numpy.NaN))
for leave_last_out in (False, True):
end = numpy.max(labels) - 1 if leave_last_out else numpy.max(labels)
workspace, module = make_workspace(
labels,
cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS,
m1[:end],
m2[:end],
)
assert isinstance(module, cellprofiler.modules.classifyobjects.ClassifyObjects)
module.first_threshold_method.value = (
cellprofiler.modules.classifyobjects.TM_MEAN
)
module.first_threshold.value = 2
module.second_threshold_method.value = (
cellprofiler.modules.classifyobjects.TM_MEAN
)
module.second_threshold.value = 2
module.wants_image.value = True
module.wants_custom_names.value = False
module.run(workspace)
f_names = (
"Measurement1_low_Measurement2_low",
"Measurement1_low_Measurement2_high",
"Measurement1_high_Measurement2_low",
"Measurement1_high_Measurement2_high",
)
m_names = [
"_".join((cellprofiler.modules.classifyobjects.M_CATEGORY, name))
for name in f_names
]
m = workspace.measurements
for m_name, expected in zip(
m_names,
[
numpy.array((1, 0, 0, 0, 0)),
numpy.array((0, 0, 0, 0, 0)),
numpy.array((0, 0, 0, 0, 0)),
numpy.array((0, 1, 0, 0, 0)),
],
):
values = m[OBJECTS_NAME, m_name]
numpy.testing.assert_array_equal(values, expected)
def test_nan_offset_by_1():
# Regression test of 1636
labels = numpy.zeros((10, 15), int)
labels[3:5, 3:5] = 1
labels[6:8, 3:5] = 2
m1 = numpy.array((4, numpy.NaN))
m2 = numpy.array((4, 4))
workspace, module = make_workspace(
labels, cellprofiler.modules.classifyobjects.BY_TWO_MEASUREMENTS, m1, m2
)
assert isinstance(module, cellprofiler.modules.classifyobjects.ClassifyObjects)
module.first_threshold_method.value = cellprofiler.modules.classifyobjects.TM_MEAN
module.first_threshold.value = 2
module.second_threshold_method.value = cellprofiler.modules.classifyobjects.TM_MEAN
module.second_threshold.value = 2
module.wants_image.value = True
module.wants_custom_names.value = False
module.run(workspace)
image = workspace.image_set.get_image(IMAGE_NAME).pixel_data
colors = module.get_colors(4)
reverse = numpy.zeros(image.shape[:2], int)
for idx, color in enumerate(colors):
reverse[numpy.all(image == color[numpy.newaxis, numpy.newaxis, :3], 2)] = idx
assert numpy.all(reverse[labels == 1] == 4)
| en | 0.804122 | Make sure the single measurement mode can handle no objects # no duplicates # Must be image # should ignore # no duplicates # Must be image # regression test for issue #1553 # # Test that classify objects classifies an object whose measurement # is NaN as none of the categories. Test for no exception thrown # if showing the figure and last object has a measurement of NaN # # should ignore # Must be image # no duplicates # Test for NaN values in two measurements. # # Regression test of 1636 | 2.252661 | 2 |
Scripts/pextract_schism_xyz.py | wzhengui/pylibs | 5 | 6616439 | <reponame>wzhengui/pylibs
#!/usr/bin/env python3
'''
Extract SCHISM variable values at (x,y,z) from station.bp.
1). work for both uncombined and combined SCHISM outputs
2). can extract multiple variables at the same time
3). can work in interactive or batch mode
4). output in ACSII or *npz format
'''
from pylib import *
import time
#-----------------------------------------------------------------------------
#Input
#-----------------------------------------------------------------------------
run='/sciclone/data10/wangzg/fabm_dev/RUN12' #run dir containing outputs
stacks=[1,146] #stacks of schout_*.nc
sname='RUN12/cosine' #name for results
svars=['elev','salt','temp','COS_1'] #SCHISM variables to be extracted
rvars=['elev','salt','temp','NO3'] #rename variable names
bpfile='/sciclone/data10/wangzg/fabm_dev/RUN12/station.bp' #file name of station.bp
icmb=0 #icmb=0: work on uncombined; icmb=1: work on combined schout_*.nc
ifs=1 #ifs=1: depth relative to surface; ifs=0: fixed depth (z coordiante)
fmt=0 #fmt=0: output as *.npz format; fmt=1: output as ASCII
#optional
grid='/sciclone/data10/wangzg/fabm_dev/RUN12/grid.npz' #saved grid info, to speed up; use hgrid.gr3 and vgrid.in if not exist
igather=1 #igather=1: save data on each rank,then combine; igather=0: use MPI
#resource requst
walltime='00:10:00'
qnode='x5672'; nnode=2; ppn=8 #hurricane, ppn=8
#qnode='bora'; nnode=2; ppn=20 #bora, ppn=20
#qnode='vortex'; nnode=2; ppn=12 #vortex, ppn=12
#qnode='femto'; nnode=2; ppn=12 #femto,ppn=32
#qnode='potomac'; nnode=4; ppn=8 #ches, ppn=12
#qnode='james'; nnode=5; ppn=20 #james, ppn=20
#qnode='skylake'; nnode=2; ppn=36 #viz3,skylake, ppn=36
#qnode='haswell'; nnode=2; ppn=2 #viz3,haswell, ppn=24,or 28
#qnode='frontera'; nnode=1; ppn=56 #frontera, ppn=56
qname='flex' #partition name (needed for frontera)
jname='Rd_{}'.format(os.path.basename(run)) #job name
ibatch=1; scrout='screen.out'; bdir=os.path.abspath(os.path.curdir)
#-----------------------------------------------------------------------------
#on front node: 1). submit jobs first (qsub), 2) running parallel jobs (mpirun)
#-----------------------------------------------------------------------------
if ibatch==0: os.environ['job_on_node']='1'; os.environ['bdir']=bdir #run locally
if os.getenv('job_on_node')==None:
if os.getenv('param')==None: fmt=0; bcode=sys.argv[0]
if os.getenv('param')!=None: fmt=1; bdir,bcode=os.getenv('param').split(); os.chdir(bdir)
scode=get_hpc_command(bcode,bdir,jname,qnode,nnode,ppn,walltime,scrout,fmt=fmt,qname=qname)
print(scode); os.system(scode); os._exit(0)
#-----------------------------------------------------------------------------
#on computation node
#-----------------------------------------------------------------------------
bdir=os.getenv('bdir'); os.chdir(bdir) #enter working dir
comm=MPI.COMM_WORLD; nproc=comm.Get_size(); myrank=comm.Get_rank()
if myrank==0: t0=time.time()
#-----------------------------------------------------------------------------
#do MPI work on each core
#-----------------------------------------------------------------------------
nproc=max(min(nproc,int(diff(stacks))),1)
if myrank==0:
sdir=os.path.dirname(sname)
if (not os.path.exists(sdir)) and sdir!='': os.mkdir(sdir)
#-----------------------------------------------------------------------------
#compute grid and bpfile information
#-----------------------------------------------------------------------------
#read grid information
t00=time.time()
if os.path.exists(grid):
gd=loadz(grid).hgrid; vd=loadz(grid).vgrid
else:
gd=read_schism_hgrid('{}/hgrid.gr3'.format(run))
vd=read_schism_vgrid('{}/vgrid.in'.format(run))
#compute area coordinate for stations
bp=read_schism_bpfile(bpfile)
bp.ie,bp.ip,bp.acor=gd.compute_acor(c_[bp.x,bp.y]); #bp.ne,bp.np=gd.ne,gd.np
bp.dp=gd.dp[bp.ip]; bp.dp0=(bp.dp*bp.acor).sum(axis=1)
if vd.ivcor==1: bp.sigma=vd.sigma[bp.ip]; bp.kbp=vd.kbp[bp.ip]; vd.sigma=None
#check pts inside grid
sindn=nonzero(bp.ie==-1)[0]
if len(sindn)!=0: sys.exit('pts outside of domain: {}'.format(c_[bp.x[sindn],bp.y[sindn]]))
dt00=time.time()-t00; print('finish reading grid info: time={:0.2f}s, myrank={}'.format(dt00,myrank)); sys.stdout.flush()
#read subdomain info
if icmb==0:
t00=time.time()
subs=gd.read_prop('{}/outputs/global_to_local.prop'.format(run)).astype('int')[bp.ie]
isub=unique(subs); sbps=[]; sindes=[]
for i, isubi in enumerate(isub):
sinde=nonzero(subs==isubi)[0] #elem index of stations
#build the iegl and ipgl
T=read_schism_local_to_global('{}/outputs/local_to_global_{}'.format(run,srank(isubi,run)))
iegl=dict(zip(T.ielg,arange(T.ne))); ipgl=dict(zip(T.iplg,arange(T.np)))
#compute subdomain ie,ip and acor,dp,z,sigma,kbp
sbp=zdata(); #sbp.ne,sbp.np=T.ne,T.np
sbp.ie=array([iegl[k] for k in bp.ie[sinde]])
sbp.ip=array([[ipgl[k] for k in n ] for n in bp.ip[sinde]])
sbp.acor=bp.acor[sinde]; sbp.dp=bp.dp[sinde]; sbp.z=bp.z[sinde]; sbp.nsta=len(sinde)
if vd.ivcor==1: sbp.sigma=bp.sigma[sinde]; sbp.kbp=bp.kbp[sinde]
sbps.append(sbp); sindes.extend(sinde)
sinds=argsort(array(sindes)) #indices to sort station order
dt00=time.time()-t00; print('finish reading subdomain info: time={:0.2f}s, myrank={}'.format(dt00,myrank)); sys.stdout.flush()
else:
isub=[None]; sbps=[bp]; sinds=arange(bp.nsta)
#-----------------------------------------------------------------------------
#extract data on each processor
#-----------------------------------------------------------------------------
#distribute jobs
istacks=[i for i in arange(stacks[0],stacks[1]+1) if i%nproc==myrank]
#initilize data capsule
S=zdata(); S.time=[]; #S.bp=bp
for i in svars: exec('S.{}=[]'.format(i))
#extract (x,y,z) value for each stack and each subdomain
for n,istack in enumerate(istacks):
t00=time.time(); Si=zdata()
for m in svars: exec('Si.{}=[]'.format(m))
for m,isubi in enumerate(isub):
#open schout_*.nc
if icmb==0: fname='{}/outputs/schout_{}_{}.nc'.format(run,srank(isubi,run),istack)
if icmb==1: fname='{}/outputs/schout_{}.nc'.format(run,istack)
if (not os.path.exists(fname)) and icmb==0: sys.exit('not exist: {}'.format(fname))
C=ReadNC(fname,1); sbp=sbps[m]
#read time
mti=array(C.variables['time'][:])/86400; nt=len(mti);
if m==0: S.time.extend(mti)
#extract elevation -> compute zcor -> vertical interploate
eis=[]; k1s=[]; k2s=[]; rats=[]
for i in arange(nt):
eii=array(C.variables['elev'][i][sbp.ip]) if ('elev' in C.variables) else 0*sbp.dp
ei=(eii*sbp.acor).sum(axis=1); eis.append(ei)
if len(svars)==1 and svars[0]=='elev': continue
#compute zcor
zii=[]; kbpii=[]
for k in arange(3):
if vd.ivcor==1: ziii=vd.compute_zcor(sbp.dp[:,k],eii[:,k],sigma=sbp.sigma[:,k,:],kbp=sbp.kbp[:,k],method=1)
if vd.ivcor==2: ziii,kbpiii=vd.compute_zcor(sbp.dp[:,k],eii[:,k],method=1,ifix=1); kbpii.append(kbpiii)
zii.append(ziii)
zi=(array(zii)*sbp.acor.T[...,None]).sum(axis=0).T
if vd.ivcor==2: sbp.kbp=array(kbpii).T.astype('int')
#station depth
mzi=sbp.z.copy()
if ifs==1: mzi=-mzi+ei
#interpolation in the vertical
k1=ones(sbp.nsta)*nan; k2=ones(sbp.nsta)*nan; rat=ones(sbp.nsta)*nan
fp=mzi<=zi[0]; k1[fp]=0; k2[fp]=0; rat[fp]=0 #bottom
fp=mzi>=zi[-1]; k1[fp]=(vd.nvrt-1); k2[fp]=(vd.nvrt-1); rat[fp]=1 #surface
for k in arange(vd.nvrt-1):
fp=(mzi>=zi[k])*(mzi<zi[k+1])
k1[fp]=k; k2[fp]=k+1
rat[fp]=(mzi[fp]-zi[k][fp])/(zi[k+1][fp]-zi[k][fp])
if sum(isnan(r_[k1,k2,rat]))!=0: sys.exit('check vertical interpolation')
k1s.append(k1); k2s.append(k2); rats.append(rat)
eis=array(eis); k1s=array(k1s).astype('int'); k2s=array(k2s).astype('int'); rats=array(rats)
if len(svars)==1 and svars[0]=='elev': Si.elev.extend(array(eis).T); continue
#compute (x,y,z) for each variables
Sii=zdata()
for mm, svar in enumerate(svars):
exec('Sii.{}=[]'.format(svar))
ndim=C.variables[svar].ndim; dim=C.variables[svar].shape; dimname=C.variables[svar].dimensions
data=[]
for i in arange(nt):
k1=k1s[i]; k2=k2s[i]; rat=rats[i]
#get variable values
if ('nSCHISM_hgrid_node' in dimname):
trii=array(C.variables[svar][i][sbp.ip])
elif ('nSCHISM_hgrid_face' in dimname):
trii=array(C.variables[svar][i][sbp.ie])
else:
sys.exit('unknown variable format: {},{}'.format(svar,dim))
#extend values in the bottom: dim[2] is nvrt
if ('nSCHISM_vgrid_layers' in dimname):
sindp=arange(sbp.nsta)
if ('nSCHISM_hgrid_node' in dimname):
for nn in arange(3):
kbp=sbp.kbp[:,nn]; btri=trii[sindp,nn,kbp]
for k in arange(vd.nvrt):
fp=k<kbp
trii[sindp[fp],nn,k]=btri[fp]
elif ('nSCHISM_hgrid_face' in dimname):
kbe=sbp.kbp.max(axis=1); btri=trii[sindp,kbe]
for k in arange(vd.nvrt):
fp=k<kbe
trii[sindp[fp],k]=btri[fp]
else:
sys.exit('unknown variable format: {},{}'.format(svar,dim))
#horizontal interp
if ('nSCHISM_hgrid_node' in dimname):
if ndim==2: tri=(trii*sbp.acor).sum(axis=1)
if ndim==3: tri=(trii*sbp.acor[...,None]).sum(axis=1)
if ndim==4: tri=(trii*sbp.acor[...,None,None]).sum(axis=1); rat=rat[:,None]
else:
tri=trii
#vertical interp
if ('nSCHISM_vgrid_layers' in dimname):
datai=(tri[sindp,k1]*(1-rat)+tri[sindp,k2]*rat)
else:
datai=tri
data.append(datai)
#save result from each variables
exec('ds=[1,0,*arange(2,{}-1)]; Sii.{}.extend(array(data).transpose(ds))'.format(ndim,svar))
#save result form subdomain
for i in svars: exec('Si.{}.extend(Sii.{})'.format(i,i))
#combine istack results
for i in svars: exec('ds=[1,0,*arange(2,array(Si.{}).ndim)]; S.{}.extend(array(Si.{})[sinds].transpose(ds))'.format(i,i,i))
dt00=time.time()-t00; print('finish reading stack={}; time={:0.2f}s, myrank={}'.format(istack,dt00,myrank)); sys.stdout.flush()
S.time=array(S.time); ['S.{}=array(S.{}).astype("float32")'.format(i,i) for i in svars]
#-----------------------------------------------------------------------------
#combine results from all ranks
#-----------------------------------------------------------------------------
if igather==1 and myrank<nproc: savez('{}_{}'.format(sname,myrank),S)
comm.Barrier()
if igather==0: sdata=comm.gather(S,root=0)
if igather==1 and myrank==0: sdata=[loadz('{}_{}.npz'.format(sname,i)) for i in arange(nproc)]
if myrank==0:
S=zdata(); S.time=[]; S.bp=bp
for i in rvars: exec('S.{}=[]'.format(i))
for i in arange(nproc):
Si=sdata[i]; S.time.extend(Si.time)
for m,[svar,rvar] in enumerate(zip(svars,rvars)): exec('S.{}.extend(Si.{})'.format(rvar,svar))
#save data
S.time=array(S.time); sind=argsort(S.time); S.time=S.time[sind]
for i in rvars: exec('ds=[1,0,*arange(2,array(S.{}).ndim)]; S.{}=array(S.{})[sind].transpose(ds)'.format(i,i,i))
if fmt==0:
savez('{}'.format(sname),S)
else:
#write out ASCII file
for i in rvars: exec('ds=[1,*arange(2,array(S.{}).ndim),0]; S.{}=array(S.{}).transpose(ds)'.format(i,i,i))
fid=open('{}.dat'.format(sname),'w+')
for i,ti in enumerate(S.time):
datai=[]
for rvar in rvars: exec('datai.extend(S.{}[{}].ravel())'.format(rvar,i))
fid.write(('{:12.6f}'+' {:10.6f}'*len(datai)+'\n').format(ti,*datai))
fid.close()
if igather==1: [os.remove('{}_{}.npz'.format(sname,i)) for i in arange(nproc)] #clean
#-----------------------------------------------------------------------------
#finish MPI jobs
#-----------------------------------------------------------------------------
comm.Barrier()
if myrank==0: dt=time.time()-t0; print('total time used: {} s'.format(dt)); sys.stdout.flush()
sys.exit(0) if qnode in ['bora'] else os._exit(0)
| #!/usr/bin/env python3
'''
Extract SCHISM variable values at (x,y,z) from station.bp.
1). work for both uncombined and combined SCHISM outputs
2). can extract multiple variables at the same time
3). can work in interactive or batch mode
4). output in ACSII or *npz format
'''
from pylib import *
import time
#-----------------------------------------------------------------------------
#Input
#-----------------------------------------------------------------------------
run='/sciclone/data10/wangzg/fabm_dev/RUN12' #run dir containing outputs
stacks=[1,146] #stacks of schout_*.nc
sname='RUN12/cosine' #name for results
svars=['elev','salt','temp','COS_1'] #SCHISM variables to be extracted
rvars=['elev','salt','temp','NO3'] #rename variable names
bpfile='/sciclone/data10/wangzg/fabm_dev/RUN12/station.bp' #file name of station.bp
icmb=0 #icmb=0: work on uncombined; icmb=1: work on combined schout_*.nc
ifs=1 #ifs=1: depth relative to surface; ifs=0: fixed depth (z coordiante)
fmt=0 #fmt=0: output as *.npz format; fmt=1: output as ASCII
#optional
grid='/sciclone/data10/wangzg/fabm_dev/RUN12/grid.npz' #saved grid info, to speed up; use hgrid.gr3 and vgrid.in if not exist
igather=1 #igather=1: save data on each rank,then combine; igather=0: use MPI
#resource requst
walltime='00:10:00'
qnode='x5672'; nnode=2; ppn=8 #hurricane, ppn=8
#qnode='bora'; nnode=2; ppn=20 #bora, ppn=20
#qnode='vortex'; nnode=2; ppn=12 #vortex, ppn=12
#qnode='femto'; nnode=2; ppn=12 #femto,ppn=32
#qnode='potomac'; nnode=4; ppn=8 #ches, ppn=12
#qnode='james'; nnode=5; ppn=20 #james, ppn=20
#qnode='skylake'; nnode=2; ppn=36 #viz3,skylake, ppn=36
#qnode='haswell'; nnode=2; ppn=2 #viz3,haswell, ppn=24,or 28
#qnode='frontera'; nnode=1; ppn=56 #frontera, ppn=56
qname='flex' #partition name (needed for frontera)
jname='Rd_{}'.format(os.path.basename(run)) #job name
ibatch=1; scrout='screen.out'; bdir=os.path.abspath(os.path.curdir)
#-----------------------------------------------------------------------------
#on front node: 1). submit jobs first (qsub), 2) running parallel jobs (mpirun)
#-----------------------------------------------------------------------------
if ibatch==0: os.environ['job_on_node']='1'; os.environ['bdir']=bdir #run locally
if os.getenv('job_on_node')==None:
if os.getenv('param')==None: fmt=0; bcode=sys.argv[0]
if os.getenv('param')!=None: fmt=1; bdir,bcode=os.getenv('param').split(); os.chdir(bdir)
scode=get_hpc_command(bcode,bdir,jname,qnode,nnode,ppn,walltime,scrout,fmt=fmt,qname=qname)
print(scode); os.system(scode); os._exit(0)
#-----------------------------------------------------------------------------
#on computation node
#-----------------------------------------------------------------------------
bdir=os.getenv('bdir'); os.chdir(bdir) #enter working dir
comm=MPI.COMM_WORLD; nproc=comm.Get_size(); myrank=comm.Get_rank()
if myrank==0: t0=time.time()
#-----------------------------------------------------------------------------
#do MPI work on each core
#-----------------------------------------------------------------------------
nproc=max(min(nproc,int(diff(stacks))),1)
if myrank==0:
sdir=os.path.dirname(sname)
if (not os.path.exists(sdir)) and sdir!='': os.mkdir(sdir)
#-----------------------------------------------------------------------------
#compute grid and bpfile information
#-----------------------------------------------------------------------------
#read grid information
t00=time.time()
if os.path.exists(grid):
gd=loadz(grid).hgrid; vd=loadz(grid).vgrid
else:
gd=read_schism_hgrid('{}/hgrid.gr3'.format(run))
vd=read_schism_vgrid('{}/vgrid.in'.format(run))
#compute area coordinate for stations
bp=read_schism_bpfile(bpfile)
bp.ie,bp.ip,bp.acor=gd.compute_acor(c_[bp.x,bp.y]); #bp.ne,bp.np=gd.ne,gd.np
bp.dp=gd.dp[bp.ip]; bp.dp0=(bp.dp*bp.acor).sum(axis=1)
if vd.ivcor==1: bp.sigma=vd.sigma[bp.ip]; bp.kbp=vd.kbp[bp.ip]; vd.sigma=None
#check pts inside grid
sindn=nonzero(bp.ie==-1)[0]
if len(sindn)!=0: sys.exit('pts outside of domain: {}'.format(c_[bp.x[sindn],bp.y[sindn]]))
dt00=time.time()-t00; print('finish reading grid info: time={:0.2f}s, myrank={}'.format(dt00,myrank)); sys.stdout.flush()
#read subdomain info
if icmb==0:
t00=time.time()
subs=gd.read_prop('{}/outputs/global_to_local.prop'.format(run)).astype('int')[bp.ie]
isub=unique(subs); sbps=[]; sindes=[]
for i, isubi in enumerate(isub):
sinde=nonzero(subs==isubi)[0] #elem index of stations
#build the iegl and ipgl
T=read_schism_local_to_global('{}/outputs/local_to_global_{}'.format(run,srank(isubi,run)))
iegl=dict(zip(T.ielg,arange(T.ne))); ipgl=dict(zip(T.iplg,arange(T.np)))
#compute subdomain ie,ip and acor,dp,z,sigma,kbp
sbp=zdata(); #sbp.ne,sbp.np=T.ne,T.np
sbp.ie=array([iegl[k] for k in bp.ie[sinde]])
sbp.ip=array([[ipgl[k] for k in n ] for n in bp.ip[sinde]])
sbp.acor=bp.acor[sinde]; sbp.dp=bp.dp[sinde]; sbp.z=bp.z[sinde]; sbp.nsta=len(sinde)
if vd.ivcor==1: sbp.sigma=bp.sigma[sinde]; sbp.kbp=bp.kbp[sinde]
sbps.append(sbp); sindes.extend(sinde)
sinds=argsort(array(sindes)) #indices to sort station order
dt00=time.time()-t00; print('finish reading subdomain info: time={:0.2f}s, myrank={}'.format(dt00,myrank)); sys.stdout.flush()
else:
isub=[None]; sbps=[bp]; sinds=arange(bp.nsta)
#-----------------------------------------------------------------------------
#extract data on each processor
#-----------------------------------------------------------------------------
#distribute jobs
istacks=[i for i in arange(stacks[0],stacks[1]+1) if i%nproc==myrank]
#initilize data capsule
S=zdata(); S.time=[]; #S.bp=bp
for i in svars: exec('S.{}=[]'.format(i))
#extract (x,y,z) value for each stack and each subdomain
for n,istack in enumerate(istacks):
t00=time.time(); Si=zdata()
for m in svars: exec('Si.{}=[]'.format(m))
for m,isubi in enumerate(isub):
#open schout_*.nc
if icmb==0: fname='{}/outputs/schout_{}_{}.nc'.format(run,srank(isubi,run),istack)
if icmb==1: fname='{}/outputs/schout_{}.nc'.format(run,istack)
if (not os.path.exists(fname)) and icmb==0: sys.exit('not exist: {}'.format(fname))
C=ReadNC(fname,1); sbp=sbps[m]
#read time
mti=array(C.variables['time'][:])/86400; nt=len(mti);
if m==0: S.time.extend(mti)
#extract elevation -> compute zcor -> vertical interploate
eis=[]; k1s=[]; k2s=[]; rats=[]
for i in arange(nt):
eii=array(C.variables['elev'][i][sbp.ip]) if ('elev' in C.variables) else 0*sbp.dp
ei=(eii*sbp.acor).sum(axis=1); eis.append(ei)
if len(svars)==1 and svars[0]=='elev': continue
#compute zcor
zii=[]; kbpii=[]
for k in arange(3):
if vd.ivcor==1: ziii=vd.compute_zcor(sbp.dp[:,k],eii[:,k],sigma=sbp.sigma[:,k,:],kbp=sbp.kbp[:,k],method=1)
if vd.ivcor==2: ziii,kbpiii=vd.compute_zcor(sbp.dp[:,k],eii[:,k],method=1,ifix=1); kbpii.append(kbpiii)
zii.append(ziii)
zi=(array(zii)*sbp.acor.T[...,None]).sum(axis=0).T
if vd.ivcor==2: sbp.kbp=array(kbpii).T.astype('int')
#station depth
mzi=sbp.z.copy()
if ifs==1: mzi=-mzi+ei
#interpolation in the vertical
k1=ones(sbp.nsta)*nan; k2=ones(sbp.nsta)*nan; rat=ones(sbp.nsta)*nan
fp=mzi<=zi[0]; k1[fp]=0; k2[fp]=0; rat[fp]=0 #bottom
fp=mzi>=zi[-1]; k1[fp]=(vd.nvrt-1); k2[fp]=(vd.nvrt-1); rat[fp]=1 #surface
for k in arange(vd.nvrt-1):
fp=(mzi>=zi[k])*(mzi<zi[k+1])
k1[fp]=k; k2[fp]=k+1
rat[fp]=(mzi[fp]-zi[k][fp])/(zi[k+1][fp]-zi[k][fp])
if sum(isnan(r_[k1,k2,rat]))!=0: sys.exit('check vertical interpolation')
k1s.append(k1); k2s.append(k2); rats.append(rat)
eis=array(eis); k1s=array(k1s).astype('int'); k2s=array(k2s).astype('int'); rats=array(rats)
if len(svars)==1 and svars[0]=='elev': Si.elev.extend(array(eis).T); continue
#compute (x,y,z) for each variables
Sii=zdata()
for mm, svar in enumerate(svars):
exec('Sii.{}=[]'.format(svar))
ndim=C.variables[svar].ndim; dim=C.variables[svar].shape; dimname=C.variables[svar].dimensions
data=[]
for i in arange(nt):
k1=k1s[i]; k2=k2s[i]; rat=rats[i]
#get variable values
if ('nSCHISM_hgrid_node' in dimname):
trii=array(C.variables[svar][i][sbp.ip])
elif ('nSCHISM_hgrid_face' in dimname):
trii=array(C.variables[svar][i][sbp.ie])
else:
sys.exit('unknown variable format: {},{}'.format(svar,dim))
#extend values in the bottom: dim[2] is nvrt
if ('nSCHISM_vgrid_layers' in dimname):
sindp=arange(sbp.nsta)
if ('nSCHISM_hgrid_node' in dimname):
for nn in arange(3):
kbp=sbp.kbp[:,nn]; btri=trii[sindp,nn,kbp]
for k in arange(vd.nvrt):
fp=k<kbp
trii[sindp[fp],nn,k]=btri[fp]
elif ('nSCHISM_hgrid_face' in dimname):
kbe=sbp.kbp.max(axis=1); btri=trii[sindp,kbe]
for k in arange(vd.nvrt):
fp=k<kbe
trii[sindp[fp],k]=btri[fp]
else:
sys.exit('unknown variable format: {},{}'.format(svar,dim))
#horizontal interp
if ('nSCHISM_hgrid_node' in dimname):
if ndim==2: tri=(trii*sbp.acor).sum(axis=1)
if ndim==3: tri=(trii*sbp.acor[...,None]).sum(axis=1)
if ndim==4: tri=(trii*sbp.acor[...,None,None]).sum(axis=1); rat=rat[:,None]
else:
tri=trii
#vertical interp
if ('nSCHISM_vgrid_layers' in dimname):
datai=(tri[sindp,k1]*(1-rat)+tri[sindp,k2]*rat)
else:
datai=tri
data.append(datai)
#save result from each variables
exec('ds=[1,0,*arange(2,{}-1)]; Sii.{}.extend(array(data).transpose(ds))'.format(ndim,svar))
#save result form subdomain
for i in svars: exec('Si.{}.extend(Sii.{})'.format(i,i))
#combine istack results
for i in svars: exec('ds=[1,0,*arange(2,array(Si.{}).ndim)]; S.{}.extend(array(Si.{})[sinds].transpose(ds))'.format(i,i,i))
dt00=time.time()-t00; print('finish reading stack={}; time={:0.2f}s, myrank={}'.format(istack,dt00,myrank)); sys.stdout.flush()
S.time=array(S.time); ['S.{}=array(S.{}).astype("float32")'.format(i,i) for i in svars]
#-----------------------------------------------------------------------------
#combine results from all ranks
#-----------------------------------------------------------------------------
if igather==1 and myrank<nproc: savez('{}_{}'.format(sname,myrank),S)
comm.Barrier()
if igather==0: sdata=comm.gather(S,root=0)
if igather==1 and myrank==0: sdata=[loadz('{}_{}.npz'.format(sname,i)) for i in arange(nproc)]
if myrank==0:
S=zdata(); S.time=[]; S.bp=bp
for i in rvars: exec('S.{}=[]'.format(i))
for i in arange(nproc):
Si=sdata[i]; S.time.extend(Si.time)
for m,[svar,rvar] in enumerate(zip(svars,rvars)): exec('S.{}.extend(Si.{})'.format(rvar,svar))
#save data
S.time=array(S.time); sind=argsort(S.time); S.time=S.time[sind]
for i in rvars: exec('ds=[1,0,*arange(2,array(S.{}).ndim)]; S.{}=array(S.{})[sind].transpose(ds)'.format(i,i,i))
if fmt==0:
savez('{}'.format(sname),S)
else:
#write out ASCII file
for i in rvars: exec('ds=[1,*arange(2,array(S.{}).ndim),0]; S.{}=array(S.{}).transpose(ds)'.format(i,i,i))
fid=open('{}.dat'.format(sname),'w+')
for i,ti in enumerate(S.time):
datai=[]
for rvar in rvars: exec('datai.extend(S.{}[{}].ravel())'.format(rvar,i))
fid.write(('{:12.6f}'+' {:10.6f}'*len(datai)+'\n').format(ti,*datai))
fid.close()
if igather==1: [os.remove('{}_{}.npz'.format(sname,i)) for i in arange(nproc)] #clean
#-----------------------------------------------------------------------------
#finish MPI jobs
#-----------------------------------------------------------------------------
comm.Barrier()
if myrank==0: dt=time.time()-t0; print('total time used: {} s'.format(dt)); sys.stdout.flush()
sys.exit(0) if qnode in ['bora'] else os._exit(0) | en | 0.243904 | #!/usr/bin/env python3 Extract SCHISM variable values at (x,y,z) from station.bp. 1). work for both uncombined and combined SCHISM outputs 2). can extract multiple variables at the same time 3). can work in interactive or batch mode 4). output in ACSII or *npz format #----------------------------------------------------------------------------- #Input #----------------------------------------------------------------------------- #run dir containing outputs #stacks of schout_*.nc #name for results #SCHISM variables to be extracted #rename variable names #file name of station.bp #icmb=0: work on uncombined; icmb=1: work on combined schout_*.nc #ifs=1: depth relative to surface; ifs=0: fixed depth (z coordiante) #fmt=0: output as *.npz format; fmt=1: output as ASCII #optional #saved grid info, to speed up; use hgrid.gr3 and vgrid.in if not exist #igather=1: save data on each rank,then combine; igather=0: use MPI #resource requst #hurricane, ppn=8 #qnode='bora'; nnode=2; ppn=20 #bora, ppn=20 #qnode='vortex'; nnode=2; ppn=12 #vortex, ppn=12 #qnode='femto'; nnode=2; ppn=12 #femto,ppn=32 #qnode='potomac'; nnode=4; ppn=8 #ches, ppn=12 #qnode='james'; nnode=5; ppn=20 #james, ppn=20 #qnode='skylake'; nnode=2; ppn=36 #viz3,skylake, ppn=36 #qnode='haswell'; nnode=2; ppn=2 #viz3,haswell, ppn=24,or 28 #qnode='frontera'; nnode=1; ppn=56 #frontera, ppn=56 #partition name (needed for frontera) #job name #----------------------------------------------------------------------------- #on front node: 1). submit jobs first (qsub), 2) running parallel jobs (mpirun) #----------------------------------------------------------------------------- #run locally #----------------------------------------------------------------------------- #on computation node #----------------------------------------------------------------------------- #enter working dir #----------------------------------------------------------------------------- #do MPI work on each core #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- #compute grid and bpfile information #----------------------------------------------------------------------------- #read grid information #compute area coordinate for stations #bp.ne,bp.np=gd.ne,gd.np #check pts inside grid #read subdomain info #elem index of stations #build the iegl and ipgl #compute subdomain ie,ip and acor,dp,z,sigma,kbp #sbp.ne,sbp.np=T.ne,T.np #indices to sort station order #----------------------------------------------------------------------------- #extract data on each processor #----------------------------------------------------------------------------- #distribute jobs #initilize data capsule #S.bp=bp #extract (x,y,z) value for each stack and each subdomain #open schout_*.nc #read time #extract elevation -> compute zcor -> vertical interploate #compute zcor #station depth #interpolation in the vertical #bottom #surface #compute (x,y,z) for each variables #get variable values #extend values in the bottom: dim[2] is nvrt #horizontal interp #vertical interp #save result from each variables #save result form subdomain #combine istack results #----------------------------------------------------------------------------- #combine results from all ranks #----------------------------------------------------------------------------- #save data #write out ASCII file #clean #----------------------------------------------------------------------------- #finish MPI jobs #----------------------------------------------------------------------------- | 2.274819 | 2 |
src/napari_pssr/widget.py | pattonw/pssr-napari | 0 | 6616440 | <filename>src/napari_pssr/widget.py
from magicgui import magic_factory
import napari
@magic_factory
def train_pssr_widget(
model: str,
high_res: napari.layers.Image,
bluring: float,
noise: float,
downsampling: float,
):
pass
@magic_factory
def predict_pssr_widget(
model: str,
raw: napari.layers.Image,
):
pass
| <filename>src/napari_pssr/widget.py
from magicgui import magic_factory
import napari
@magic_factory
def train_pssr_widget(
model: str,
high_res: napari.layers.Image,
bluring: float,
noise: float,
downsampling: float,
):
pass
@magic_factory
def predict_pssr_widget(
model: str,
raw: napari.layers.Image,
):
pass
| none | 1 | 1.453112 | 1 | |
wormpose/pose/distance_metrics.py | AntonioCCosta/wormpose | 29 | 6616441 | """
Contains function to calculate distances between worm poses, either represented as angles or as skeletons
"""
import math
import numpy as np
def angle_distance(theta_a: np.ndarray, theta_b: np.ndarray) -> float:
"""
Angle distance that takes into account the periodicity of angles
"""
diff = np.abs(np.arctan2(np.sin(theta_a - theta_b), np.cos(theta_a - theta_b)))
return diff.mean()
def _head_tail_diff(skel):
return skel[-1][0] - skel[0][0], skel[-1][1] - skel[0][1]
def _cos_similarity(a, b):
def _norm(x):
return math.sqrt(x[0] * x[0] + x[1] * x[1])
return (a[0] * b[0] + a[1] * b[1]) / (_norm(a) * _norm(b))
def skeleton_distance(skel_a: np.ndarray, skel_b: np.ndarray) -> float:
"""
Cosine similarity between the two head to tail vectors of the input skeletons
"""
return _cos_similarity(_head_tail_diff(skel_a), _head_tail_diff(skel_b))
| """
Contains function to calculate distances between worm poses, either represented as angles or as skeletons
"""
import math
import numpy as np
def angle_distance(theta_a: np.ndarray, theta_b: np.ndarray) -> float:
"""
Angle distance that takes into account the periodicity of angles
"""
diff = np.abs(np.arctan2(np.sin(theta_a - theta_b), np.cos(theta_a - theta_b)))
return diff.mean()
def _head_tail_diff(skel):
return skel[-1][0] - skel[0][0], skel[-1][1] - skel[0][1]
def _cos_similarity(a, b):
def _norm(x):
return math.sqrt(x[0] * x[0] + x[1] * x[1])
return (a[0] * b[0] + a[1] * b[1]) / (_norm(a) * _norm(b))
def skeleton_distance(skel_a: np.ndarray, skel_b: np.ndarray) -> float:
"""
Cosine similarity between the two head to tail vectors of the input skeletons
"""
return _cos_similarity(_head_tail_diff(skel_a), _head_tail_diff(skel_b))
| en | 0.926618 | Contains function to calculate distances between worm poses, either represented as angles or as skeletons Angle distance that takes into account the periodicity of angles Cosine similarity between the two head to tail vectors of the input skeletons | 3.40913 | 3 |
tutorial/examples/external_css_and_js/dash-meta-tags.py | blozano824/dash-docs | 1 | 6616442 | <filename>tutorial/examples/external_css_and_js/dash-meta-tags.py
import dash
import dash_html_components as html
app = dash.Dash(meta_tags=[
{
'name': 'description',
'content': 'My description'
},
{
'http-equiv': 'X-UA-Compatible',
'content': 'IE=edge'
}
])
app.layout = html.Div('Simple Dash App')
if __name__ == '__main__':
app.run_server(debug=True)
| <filename>tutorial/examples/external_css_and_js/dash-meta-tags.py
import dash
import dash_html_components as html
app = dash.Dash(meta_tags=[
{
'name': 'description',
'content': 'My description'
},
{
'http-equiv': 'X-UA-Compatible',
'content': 'IE=edge'
}
])
app.layout = html.Div('Simple Dash App')
if __name__ == '__main__':
app.run_server(debug=True)
| none | 1 | 2.444714 | 2 | |
fmuser/modify_user.py | sudaning/Fmuser | 2 | 6616443 | <reponame>sudaning/Fmuser<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
import xml.etree.ElementTree as ET
except ImportError:
import xml.etree.cElementTree as ET
import HTMLParser
# 美化XML文件,缩进一致
def pretty_xml(elem, indent = " ", newline = "\n", null_str_keep = True, level = 0):
#print(level, len(elem), elem.text, elem.tail)
i = newline + level * indent
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indent
for e in elem:
pretty_xml(e, indent, newline, null_str_keep, level + 1)
if not e.tail or not e.tail.strip():
e.tail = (e.tail.count(newline) * newline + level * indent) if null_str_keep and e.tail else i
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = (elem.tail.count(newline) * newline + level * indent) if null_str_keep and elem.tail else i
if not len(elem) and elem.text:
elem.text = elem.text.strip()
return elem
class commentTreeBuilder(ET.XMLTreeBuilder):
def __init__ (self, html = 0, target = None):
ET.XMLTreeBuilder.__init__(self, html, target)
self._parser.CommentHandler = self.handle_comment
def handle_comment(self, data):
self._target.start(ET.Comment, {})
self._target.data(data)
self._target.end(ET.Comment)
class Muser:
'''
<include>
<user id="075577010001">
<params>
<param name="password" value="<PASSWORD>"/>
<param name="vm-password" value="<PASSWORD>"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,international,local"/>
<variable name="accountcode" value="075577010001"/>
<variable name="user_context" value="sipp.33e9.com"/>
<variable name="effective_caller_id_name" value="18688717887"/>
<variable name="effective_caller_id_number" value="18688717887"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
<!-- <variable name="callgroup" value="techsupport"/> -->
</variables>
</user>
</include>
'''
def __init__(self, user_dir=r'/usr/local/freeswitch/conf/directory', include_sub_dir=True, exclude_dir=[], debug=False):
self.__user_dir = user_dir
self.__include_sub_dir = include_sub_dir
self.__exclude_dir = exclude_dir
self.__modify_rule = {'key':r'/params/param[@name="password"]', 'value':''}
self.__debug = debug
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_tb:
return False
else:
self.__del__()
def __del__(self):
pass
def set_modify_rule(self, key=r'/params/param[@name="password"]', value=''):
self.__modify_rule = {'key':key, 'value':value}
return self
def __modfiy_xml(self, file_path, numbers=[]):
tree = ET.parse(file_path, parser = commentTreeBuilder())
include_node = tree.getroot() # include节点
if self.__debug:
print("TARGET %s" % file_path)
user_node = include_node.find('user')
if user_node is not None:
id = user_node.attrib['id']
if id in numbers or len(numbers) == 0:
is_modify = False
key_xpath = "./" + self.__modify_rule.get('key', '')
value = self.__modify_rule.get('value', '')
for node in include_node.findall(key_xpath):
origion_value = node.get('value')
node.set('value', value)
is_modify = True
print("MODIFY NODE %s ATTR 'value' FROM %s TO %s IN FILE %s" % (key_xpath, origion_value, value, file_path))
break
if is_modify:
tree.write(file_path)
# 读取文件内容,替换HTML的格式,重新写入
with open(file_path, "r+") as f:
txt = HTMLParser.HTMLParser().unescape(f.read())
f.seek(0)
f.truncate()
f.write(txt)
def run(self, numbers=[]):
'''
numbers = [] 代表不检查此条件
'''
for root, dirs, files in os.walk(self.__user_dir):
# 搜索当前目录下的所有xml文件
for file in files:
if file[-3:] != "xml":
continue
self.__modfiy_xml(os.path.join(root, file), numbers)
else:
# 搜索完成,若不包含子目录,则直接break,不再继续搜索
if not self.__include_sub_dir:
break
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
import xml.etree.ElementTree as ET
except ImportError:
import xml.etree.cElementTree as ET
import HTMLParser
# 美化XML文件,缩进一致
def pretty_xml(elem, indent = " ", newline = "\n", null_str_keep = True, level = 0):
#print(level, len(elem), elem.text, elem.tail)
i = newline + level * indent
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indent
for e in elem:
pretty_xml(e, indent, newline, null_str_keep, level + 1)
if not e.tail or not e.tail.strip():
e.tail = (e.tail.count(newline) * newline + level * indent) if null_str_keep and e.tail else i
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = (elem.tail.count(newline) * newline + level * indent) if null_str_keep and elem.tail else i
if not len(elem) and elem.text:
elem.text = elem.text.strip()
return elem
class commentTreeBuilder(ET.XMLTreeBuilder):
def __init__ (self, html = 0, target = None):
ET.XMLTreeBuilder.__init__(self, html, target)
self._parser.CommentHandler = self.handle_comment
def handle_comment(self, data):
self._target.start(ET.Comment, {})
self._target.data(data)
self._target.end(ET.Comment)
class Muser:
'''
<include>
<user id="075577010001">
<params>
<param name="password" value="<PASSWORD>"/>
<param name="vm-password" value="<PASSWORD>"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,international,local"/>
<variable name="accountcode" value="075577010001"/>
<variable name="user_context" value="sipp.33e9.com"/>
<variable name="effective_caller_id_name" value="18688717887"/>
<variable name="effective_caller_id_number" value="18688717887"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
<!-- <variable name="callgroup" value="techsupport"/> -->
</variables>
</user>
</include>
'''
def __init__(self, user_dir=r'/usr/local/freeswitch/conf/directory', include_sub_dir=True, exclude_dir=[], debug=False):
self.__user_dir = user_dir
self.__include_sub_dir = include_sub_dir
self.__exclude_dir = exclude_dir
self.__modify_rule = {'key':r'/params/param[@name="password"]', 'value':''}
self.__debug = debug
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_tb:
return False
else:
self.__del__()
def __del__(self):
pass
def set_modify_rule(self, key=r'/params/param[@name="password"]', value=''):
self.__modify_rule = {'key':key, 'value':value}
return self
def __modfiy_xml(self, file_path, numbers=[]):
tree = ET.parse(file_path, parser = commentTreeBuilder())
include_node = tree.getroot() # include节点
if self.__debug:
print("TARGET %s" % file_path)
user_node = include_node.find('user')
if user_node is not None:
id = user_node.attrib['id']
if id in numbers or len(numbers) == 0:
is_modify = False
key_xpath = "./" + self.__modify_rule.get('key', '')
value = self.__modify_rule.get('value', '')
for node in include_node.findall(key_xpath):
origion_value = node.get('value')
node.set('value', value)
is_modify = True
print("MODIFY NODE %s ATTR 'value' FROM %s TO %s IN FILE %s" % (key_xpath, origion_value, value, file_path))
break
if is_modify:
tree.write(file_path)
# 读取文件内容,替换HTML的格式,重新写入
with open(file_path, "r+") as f:
txt = HTMLParser.HTMLParser().unescape(f.read())
f.seek(0)
f.truncate()
f.write(txt)
def run(self, numbers=[]):
'''
numbers = [] 代表不检查此条件
'''
for root, dirs, files in os.walk(self.__user_dir):
# 搜索当前目录下的所有xml文件
for file in files:
if file[-3:] != "xml":
continue
self.__modfiy_xml(os.path.join(root, file), numbers)
else:
# 搜索完成,若不包含子目录,则直接break,不再继续搜索
if not self.__include_sub_dir:
break | en | 0.196382 | #!/usr/bin/env python # -*- coding: utf-8 -*- # 美化XML文件,缩进一致 #print(level, len(elem), elem.text, elem.tail) <include> <user id="075577010001"> <params> <param name="password" value="<PASSWORD>"/> <param name="vm-password" value="<PASSWORD>"/> </params> <variables> <variable name="toll_allow" value="domestic,international,local"/> <variable name="accountcode" value="075577010001"/> <variable name="user_context" value="sipp.33e9.com"/> <variable name="effective_caller_id_name" value="18688717887"/> <variable name="effective_caller_id_number" value="18688717887"/> <variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/> <variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/> <!-- <variable name="callgroup" value="techsupport"/> --> </variables> </user> </include> # include节点 # 读取文件内容,替换HTML的格式,重新写入 numbers = [] 代表不检查此条件 # 搜索当前目录下的所有xml文件 # 搜索完成,若不包含子目录,则直接break,不再继续搜索 | 3.021827 | 3 |
emoji-export/misskey-emoji-export.py | CyberRex0/misskey-scripts | 5 | 6616444 | try:
import psycopg2
import psycopg2.extras
except ImportError:
print("You need to install psycopg2 from pip")
exit()
import requests
import time
import argparse
import datetime
import os
print('Misskey Emoji Export Tool v1.0')
print('(C)2022 CyberRex\n')
parser = argparse.ArgumentParser(description='Misskey Emoji Export Tool')
parser.add_argument('-s', '--host', help='hostname of database server', required=True)
parser.add_argument('-u', '--username', help='username to login database', required=True)
parser.add_argument('-p', '--password', help='password to login database', required=True)
parser.add_argument('-d', '--database', help='name of database', required=True)
parser.add_argument('--include-remote', help='include remote emoji', default=False, action='store_true')
parser.add_argument('--port', type=int, help='port of database server (optional, Default: 5432)', default=5432, required=False)
args = parser.parse_args()
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; MisskeyDBEmojiExportTool)'
print(f'Connecting to {args.username}@{args.host}:{args.port}...' , end='')
try:
db = psycopg2.connect(database=args.database, user=args.username, password=args.password, host=args.host, port=args.port or 5432)
except Exception as e:
print('Failed!')
print(str(e))
db.commit()
print('OK\n')
with db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute('SELECT * FROM "public"."emoji" ' + ('WHERE "host" IS NULL' if not args.include_remote else ''))
r = [dict(x) for x in cur.fetchall()]
outdir = 'emoji_export_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.mkdir(outdir)
for emoji in r:
print(f'Downloading {emoji["name"]}...', end='')
name = os.path.splitext(emoji['url'])
if len(name) == 1:
print('Skipping (no file extension)')
continue
fname = f'{emoji["name"]}{name[1]}'
r = requests.get(emoji['url'], headers={'User-Agent': USER_AGENT})
if r.status_code != 200:
print(f'Failed! ({r.status_code})')
continue
ftype = r.headers.get('Content-Type')
if name[1] == '':
if ftype == 'image/png':
fname = f'{emoji["name"]}.png'
elif ftype == 'image/jpeg' or ftype == 'image/jpg':
fname = f'{emoji["name"]}.jpg'
elif ftype == 'image/svg+xml':
fname = f'{emoji["name"]}.svg'
elif ftype == 'image/webp':
fname = f'{emoji["name"]}.webp'
elif ftype == 'image/bmp':
fname = f'{emoji["name"]}.bmp'
elif ftype == 'image/gif':
fname = f'{emoji["name"]}.gif'
with open(f'{outdir}/{fname}', 'wb') as f:
f.write(r.content)
print('OK')
db.close()
print('ALL DONE!')
| try:
import psycopg2
import psycopg2.extras
except ImportError:
print("You need to install psycopg2 from pip")
exit()
import requests
import time
import argparse
import datetime
import os
print('Misskey Emoji Export Tool v1.0')
print('(C)2022 CyberRex\n')
parser = argparse.ArgumentParser(description='Misskey Emoji Export Tool')
parser.add_argument('-s', '--host', help='hostname of database server', required=True)
parser.add_argument('-u', '--username', help='username to login database', required=True)
parser.add_argument('-p', '--password', help='password to login database', required=True)
parser.add_argument('-d', '--database', help='name of database', required=True)
parser.add_argument('--include-remote', help='include remote emoji', default=False, action='store_true')
parser.add_argument('--port', type=int, help='port of database server (optional, Default: 5432)', default=5432, required=False)
args = parser.parse_args()
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; MisskeyDBEmojiExportTool)'
print(f'Connecting to {args.username}@{args.host}:{args.port}...' , end='')
try:
db = psycopg2.connect(database=args.database, user=args.username, password=args.password, host=args.host, port=args.port or 5432)
except Exception as e:
print('Failed!')
print(str(e))
db.commit()
print('OK\n')
with db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute('SELECT * FROM "public"."emoji" ' + ('WHERE "host" IS NULL' if not args.include_remote else ''))
r = [dict(x) for x in cur.fetchall()]
outdir = 'emoji_export_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.mkdir(outdir)
for emoji in r:
print(f'Downloading {emoji["name"]}...', end='')
name = os.path.splitext(emoji['url'])
if len(name) == 1:
print('Skipping (no file extension)')
continue
fname = f'{emoji["name"]}{name[1]}'
r = requests.get(emoji['url'], headers={'User-Agent': USER_AGENT})
if r.status_code != 200:
print(f'Failed! ({r.status_code})')
continue
ftype = r.headers.get('Content-Type')
if name[1] == '':
if ftype == 'image/png':
fname = f'{emoji["name"]}.png'
elif ftype == 'image/jpeg' or ftype == 'image/jpg':
fname = f'{emoji["name"]}.jpg'
elif ftype == 'image/svg+xml':
fname = f'{emoji["name"]}.svg'
elif ftype == 'image/webp':
fname = f'{emoji["name"]}.webp'
elif ftype == 'image/bmp':
fname = f'{emoji["name"]}.bmp'
elif ftype == 'image/gif':
fname = f'{emoji["name"]}.gif'
with open(f'{outdir}/{fname}', 'wb') as f:
f.write(r.content)
print('OK')
db.close()
print('ALL DONE!')
| none | 1 | 2.603087 | 3 | |
mayaSDK/flux/core.py | FXTD-ODYSSEY/vscode-mayapy | 20 | 6616445 | <filename>mayaSDK/flux/core.py
"""
# DEV_FLAG
# import flux.ui.core
# import flux.utils
"""
from flux.ui.core import *
from flux.utils import endProgressBar
from flux.utils import getMayaWidget
from flux.utils import getQtWidgetAtPos
from flux.undo import undoChunk
from flux.utils import getColourFromLabel
from flux.utils import UTF8Wrapper
from flux.utils import getFuncFullName
from flux.utils import registerStringResources
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin as dockableMixin
from flux.utils import loadStringResources
from flux.utils import getFluxString
from flux.utils import startProgressBar
from flux.utils import printCallStack
from flux.utils import str_res as res
from flux.utils import str_res
from flux.utils import importJSONFile
from flux.utils import getReadFileName
from flux.utils import getWriteFileName
from flux.utils import getWidgetNameAtPos
from flux.utils import createColorIcon
from flux.utils import exportJSONFile
from flux.utils import getStringResource
from flux.utils import csv_reader
from flux.utils import applyRotations
from flux.utils import mayaViewport
from flux.utils import csv_writer
from flux.utils import mayaWindow
from flux.utils import stepProgressBar
kWhite = []
kLabelColour = []
kPurple = []
kBlue = []
kColours = []
kRed = []
kGreen = []
kGrey = []
kOrange = []
kYellow = []
moduleIdentifier = 'flux.utils'
| <filename>mayaSDK/flux/core.py
"""
# DEV_FLAG
# import flux.ui.core
# import flux.utils
"""
from flux.ui.core import *
from flux.utils import endProgressBar
from flux.utils import getMayaWidget
from flux.utils import getQtWidgetAtPos
from flux.undo import undoChunk
from flux.utils import getColourFromLabel
from flux.utils import UTF8Wrapper
from flux.utils import getFuncFullName
from flux.utils import registerStringResources
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin as dockableMixin
from flux.utils import loadStringResources
from flux.utils import getFluxString
from flux.utils import startProgressBar
from flux.utils import printCallStack
from flux.utils import str_res as res
from flux.utils import str_res
from flux.utils import importJSONFile
from flux.utils import getReadFileName
from flux.utils import getWriteFileName
from flux.utils import getWidgetNameAtPos
from flux.utils import createColorIcon
from flux.utils import exportJSONFile
from flux.utils import getStringResource
from flux.utils import csv_reader
from flux.utils import applyRotations
from flux.utils import mayaViewport
from flux.utils import csv_writer
from flux.utils import mayaWindow
from flux.utils import stepProgressBar
kWhite = []
kLabelColour = []
kPurple = []
kBlue = []
kColours = []
kRed = []
kGreen = []
kGrey = []
kOrange = []
kYellow = []
moduleIdentifier = 'flux.utils'
| en | 0.180305 | # DEV_FLAG # import flux.ui.core # import flux.utils | 1.3868 | 1 |
calculator_simple/main.py | jdpalmquist/python_school | 0 | 6616446 | #
run = True
while run:
print("Welcome to the simple python calculator!")
print(" ")
number1 = int(input("Please enter the first number:"))
number2 = int(input("Please enter the second number:"))
operation = 0
while operation <= 0:
print("Now choose a mathematical operation to perform on them: ")
print("1 - Add")
print("2 - Subtract")
print("3 - Multiply")
print("4 - Divide")
operation = int(input("Choice (1-4): "))
if operation >= 1 or operation <= 4:
if operation == 1:
print(number1, "+", number2, "=", str(number1 + number2))
elif operation == 2:
print(number1, "-", number2, "=", str(number1 - number2))
elif operation == 3:
print(number1, "x", number2, "=", str(number1 * number2))
elif operation == 4:
if number2 != 0:
print(number1, "/", number2, "=", str(number1 / number2))
else:
print("Divide by Zero error!")
else:
print("Invalid choice, try again!")
keep_going = input("Would you like to do another calculation? (y/n):")
if keep_going != 'y' and keep_going != 'Y':
run = False
| #
run = True
while run:
print("Welcome to the simple python calculator!")
print(" ")
number1 = int(input("Please enter the first number:"))
number2 = int(input("Please enter the second number:"))
operation = 0
while operation <= 0:
print("Now choose a mathematical operation to perform on them: ")
print("1 - Add")
print("2 - Subtract")
print("3 - Multiply")
print("4 - Divide")
operation = int(input("Choice (1-4): "))
if operation >= 1 or operation <= 4:
if operation == 1:
print(number1, "+", number2, "=", str(number1 + number2))
elif operation == 2:
print(number1, "-", number2, "=", str(number1 - number2))
elif operation == 3:
print(number1, "x", number2, "=", str(number1 * number2))
elif operation == 4:
if number2 != 0:
print(number1, "/", number2, "=", str(number1 / number2))
else:
print("Divide by Zero error!")
else:
print("Invalid choice, try again!")
keep_going = input("Would you like to do another calculation? (y/n):")
if keep_going != 'y' and keep_going != 'Y':
run = False
| none | 1 | 4.109721 | 4 | |
create_discordIDs.py | johpro/virtual-conference | 3 | 6616447 | import os
import sys
import pickle
if(not "DATA_FOLDER" in os.environ):
print("You must set $DATA_FOLDER to a folder which contains the working data of this tool.")
sys.exit(1)
if(len(sys.argv) != 4):
print("Usage: {} ServerID SyncChannelID RoleChannelID".format(sys.argv[0]))
discordIDs = {"Server": int(sys.argv[1]),
"SyncChannel": int(sys.argv[2]),
"RoleChannel": int(sys.argv[3])}
f = open(os.environ["DATA_FOLDER"] + "/discordIDs.dat", "wb")
pickle.dump(discordIDs, f)
f.close()
| import os
import sys
import pickle
if(not "DATA_FOLDER" in os.environ):
print("You must set $DATA_FOLDER to a folder which contains the working data of this tool.")
sys.exit(1)
if(len(sys.argv) != 4):
print("Usage: {} ServerID SyncChannelID RoleChannelID".format(sys.argv[0]))
discordIDs = {"Server": int(sys.argv[1]),
"SyncChannel": int(sys.argv[2]),
"RoleChannel": int(sys.argv[3])}
f = open(os.environ["DATA_FOLDER"] + "/discordIDs.dat", "wb")
pickle.dump(discordIDs, f)
f.close()
| none | 1 | 2.363476 | 2 | |
player.py | Herbsi/pathfinder | 0 | 6616448 | import helpers
from json_serialization import json_class
@json_class
class Player:
# TODO make child of Inventory_Holder
def __init__(self, **player):
self.name = ""
self.health = 100
self.attack = 0
self.defense = 0
self.speed = 0
self.gold = 100
self.inventory = []
self.__dict__.update(player)
@property
def isdead(self):
return self.health < 1
@property
def isalive(self):
return not self.isdead
def print_stats(self):
print("Name: {}".format(self.name))
print("Attributes:")
print()
print(" * Attack: {}".format(self.attack))
print(" * Defense: {}".format(self.defense))
print(" * Speed: {}".format(self.speed))
print()
def createNewCharacter(self):
while True:
print("Welcome to P0 Dungeon Quest character creator!")
self.name = input("Enter your name: ")
if self.__assigned_points():
break
def __assigned_points(self):
while True:
print("You have 100 points to assign to your character.")
print(
"Start now to assign those Points to your characters attack, defense and speed."
)
for stat in ["Attack", "Defense", "Speed"]:
new_value = helpers.validInput(
"{}: ".format(stat),
"Please input a positive integer.",
lambda s: s > 0,
cast=int,
)
setattr(self, stat.lower(), new_value)
if self.attack + self.defense + self.speed <= 100:
break
print(
"Sorry, it seems like you spent more than 100 ability points on your character... Try that again!"
)
print()
print("Before you store your character please confirm your stats!")
self.print_stats()
answer = input("Is this correct? (Y/N) ")
while answer.lower() not in ["y", "n"]:
answer = input("Please enter Y/y for yes or N/n for no! ")
if answer.lower() == "n":
return False
return True
def listInventory(self):
while True:
if not self.inventory:
print("Your inventory is empty.")
return
print("Welcome to your inventory {}!".format(self.name))
print("These are your items:")
print()
for item in self.inventory:
print(" * {0.name:<20} ({0.effect})".format(item))
print()
print("Type 'quit' or the name of the item you want to use/drop:")
user_input = input("> ")
if user_input == "quit":
return
try:
user_item = self.getItemByName(user_input)
print(
"Do you want to 'use' or 'drop' {}? Else 'quit'.".format(
user_item.name
)
)
user_input = input("> ")
if user_input == "use":
self.use(user_item)
return
elif user_input == "drop":
self.drop(user_item)
return
else:
print("Nothing done.")
return
except KeyError:
print("Item does not exist.")
def use(self, item):
if item.passive_effect is True:
print("You cannot use this item.")
else:
new_value = getattr(self, item.influenced_attribute) + item.amount
setattr(self, item.influenced_attribute, new_value)
self.remove(item)
print("You used {0.name}.".format(item))
print(
"It increased your {0.influenced_attribute} by {0.amount}.".format(item)
)
print(
"You now have {} {}.".format(
getattr(self, item.influenced_attribute), item.influenced_attribute
)
)
def drop(self, item):
print("You dropped {}.".format(item.name))
self.remove(item)
def remove(self, item):
if item.passive_effect:
new_value = getattr(self, item.influenced_attribute) - item.amount
setattr(self, item.influenced_attribute, new_value)
self.inventory.remove(item)
def addItem(self, item):
if item.passive_effect:
new_value = getattr(self, item.influenced_attribute) + item.amount
setattr(self, item.influenced_attribute, new_value)
self.inventory.append(item)
def getItemByName(self, name):
for item in self.inventory:
if item.name == name:
return item
raise KeyError("Invalid Item!")
def die(self):
while self.inventory:
self.remove(self.inventory[0])
def revive(self):
self.health = 100
| import helpers
from json_serialization import json_class
@json_class
class Player:
# TODO make child of Inventory_Holder
def __init__(self, **player):
self.name = ""
self.health = 100
self.attack = 0
self.defense = 0
self.speed = 0
self.gold = 100
self.inventory = []
self.__dict__.update(player)
@property
def isdead(self):
return self.health < 1
@property
def isalive(self):
return not self.isdead
def print_stats(self):
print("Name: {}".format(self.name))
print("Attributes:")
print()
print(" * Attack: {}".format(self.attack))
print(" * Defense: {}".format(self.defense))
print(" * Speed: {}".format(self.speed))
print()
def createNewCharacter(self):
while True:
print("Welcome to P0 Dungeon Quest character creator!")
self.name = input("Enter your name: ")
if self.__assigned_points():
break
def __assigned_points(self):
while True:
print("You have 100 points to assign to your character.")
print(
"Start now to assign those Points to your characters attack, defense and speed."
)
for stat in ["Attack", "Defense", "Speed"]:
new_value = helpers.validInput(
"{}: ".format(stat),
"Please input a positive integer.",
lambda s: s > 0,
cast=int,
)
setattr(self, stat.lower(), new_value)
if self.attack + self.defense + self.speed <= 100:
break
print(
"Sorry, it seems like you spent more than 100 ability points on your character... Try that again!"
)
print()
print("Before you store your character please confirm your stats!")
self.print_stats()
answer = input("Is this correct? (Y/N) ")
while answer.lower() not in ["y", "n"]:
answer = input("Please enter Y/y for yes or N/n for no! ")
if answer.lower() == "n":
return False
return True
def listInventory(self):
while True:
if not self.inventory:
print("Your inventory is empty.")
return
print("Welcome to your inventory {}!".format(self.name))
print("These are your items:")
print()
for item in self.inventory:
print(" * {0.name:<20} ({0.effect})".format(item))
print()
print("Type 'quit' or the name of the item you want to use/drop:")
user_input = input("> ")
if user_input == "quit":
return
try:
user_item = self.getItemByName(user_input)
print(
"Do you want to 'use' or 'drop' {}? Else 'quit'.".format(
user_item.name
)
)
user_input = input("> ")
if user_input == "use":
self.use(user_item)
return
elif user_input == "drop":
self.drop(user_item)
return
else:
print("Nothing done.")
return
except KeyError:
print("Item does not exist.")
def use(self, item):
if item.passive_effect is True:
print("You cannot use this item.")
else:
new_value = getattr(self, item.influenced_attribute) + item.amount
setattr(self, item.influenced_attribute, new_value)
self.remove(item)
print("You used {0.name}.".format(item))
print(
"It increased your {0.influenced_attribute} by {0.amount}.".format(item)
)
print(
"You now have {} {}.".format(
getattr(self, item.influenced_attribute), item.influenced_attribute
)
)
def drop(self, item):
print("You dropped {}.".format(item.name))
self.remove(item)
def remove(self, item):
if item.passive_effect:
new_value = getattr(self, item.influenced_attribute) - item.amount
setattr(self, item.influenced_attribute, new_value)
self.inventory.remove(item)
def addItem(self, item):
if item.passive_effect:
new_value = getattr(self, item.influenced_attribute) + item.amount
setattr(self, item.influenced_attribute, new_value)
self.inventory.append(item)
def getItemByName(self, name):
for item in self.inventory:
if item.name == name:
return item
raise KeyError("Invalid Item!")
def die(self):
while self.inventory:
self.remove(self.inventory[0])
def revive(self):
self.health = 100
| en | 0.620992 | # TODO make child of Inventory_Holder | 3.66785 | 4 |
src/dataset_processing_scripts/CBIS-DDSM-bounding-box.py | Adamouization/Breast-Cancer-Detection-Mammogram-Deep-Learning-Publication | 1 | 6616449 | import os
import pandas as pd
from pathlib import Path
import pydicom
from skimage.measure import label, regionprops
import tensorflow as tf
import tensorflow_io as tfio
import numpy as np
def main() -> None:
"""
Initial dataset pre-processing for the CBIS-DDSM dataset to generate the bounding box ground truth for images
:return: None
"""
csv_path = "../data/CBIS-DDSM-mask/final_mask_training.csv"
as_df = pd.read_csv(csv_path)
f = open("../data/CBIS-DDSM-mask/bbox_groud_truth.txt", "a")
for i in range(as_df.shape[0]):
string = as_df["img_path"][i]
# Get bounding box
minr, minc, maxr, maxc = get_bbox_of_mask(as_df["mask_img_path"][i])
string += "," + str(minr)
string += "," + str(minc)
string += "," + str(maxr)
string += "," + str(maxc)
string += ",0"
f.write("\n")
f.write(string)
f.close()
def get_bbox_of_mask(mask_path):
# Process input ground truth mask and generate bounding box dimensions of tumours
image_bytes = tf.io.read_file(mask_path)
image = tfio.image.decode_dicom_image(image_bytes, color_dim = True, dtype=tf.uint16)
image = tf.image.resize_with_pad(image, 1024, 640)
array = np.array(image)
array = array[0,:,:,0].astype(int)
regions = regionprops(array)
region = regions[0]
minr, minc, maxr, maxc = region.bbox
return minr, minc, maxr, maxc
if __name__ == '__main__':
main()
| import os
import pandas as pd
from pathlib import Path
import pydicom
from skimage.measure import label, regionprops
import tensorflow as tf
import tensorflow_io as tfio
import numpy as np
def main() -> None:
"""
Initial dataset pre-processing for the CBIS-DDSM dataset to generate the bounding box ground truth for images
:return: None
"""
csv_path = "../data/CBIS-DDSM-mask/final_mask_training.csv"
as_df = pd.read_csv(csv_path)
f = open("../data/CBIS-DDSM-mask/bbox_groud_truth.txt", "a")
for i in range(as_df.shape[0]):
string = as_df["img_path"][i]
# Get bounding box
minr, minc, maxr, maxc = get_bbox_of_mask(as_df["mask_img_path"][i])
string += "," + str(minr)
string += "," + str(minc)
string += "," + str(maxr)
string += "," + str(maxc)
string += ",0"
f.write("\n")
f.write(string)
f.close()
def get_bbox_of_mask(mask_path):
# Process input ground truth mask and generate bounding box dimensions of tumours
image_bytes = tf.io.read_file(mask_path)
image = tfio.image.decode_dicom_image(image_bytes, color_dim = True, dtype=tf.uint16)
image = tf.image.resize_with_pad(image, 1024, 640)
array = np.array(image)
array = array[0,:,:,0].astype(int)
regions = regionprops(array)
region = regions[0]
minr, minc, maxr, maxc = region.bbox
return minr, minc, maxr, maxc
if __name__ == '__main__':
main()
| en | 0.711966 | Initial dataset pre-processing for the CBIS-DDSM dataset to generate the bounding box ground truth for images :return: None # Get bounding box # Process input ground truth mask and generate bounding box dimensions of tumours | 2.256166 | 2 |
DQM/BeamMonitor/python/BeamSpotDipServer_cff.py | malbouis/cmssw | 852 | 6616450 | <reponame>malbouis/cmssw
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
beamSpotDipServer = DQMEDAnalyzer("BeamSpotDipServer",
monitorName = cms.untracked.string("BeamSpotDipServer"),
#
verbose = cms.untracked.bool(False),
testing = cms.untracked.bool(False),
#
subjectCMS = cms.untracked.string("dip/CMS/Tracker/BeamSpot"),
subjectLHC = cms.untracked.string("dip/CMS/LHC/LuminousRegion"),
subjectPV = cms.untracked.string("dip/CMS/Tracker/PrimaryVertices"),
#
readFromNFS = cms.untracked.bool(True),
#
dcsRecordInputTag = cms.InputTag ( "onlineMetaDataDigis" ),
#
sourceFile = cms.untracked.string(
"/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt"),
sourceFile1 = cms.untracked.string(
"/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults_TkStatus.txt"),
#
timeoutLS = cms.untracked.vint32(1,2)
)
| import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
beamSpotDipServer = DQMEDAnalyzer("BeamSpotDipServer",
monitorName = cms.untracked.string("BeamSpotDipServer"),
#
verbose = cms.untracked.bool(False),
testing = cms.untracked.bool(False),
#
subjectCMS = cms.untracked.string("dip/CMS/Tracker/BeamSpot"),
subjectLHC = cms.untracked.string("dip/CMS/LHC/LuminousRegion"),
subjectPV = cms.untracked.string("dip/CMS/Tracker/PrimaryVertices"),
#
readFromNFS = cms.untracked.bool(True),
#
dcsRecordInputTag = cms.InputTag ( "onlineMetaDataDigis" ),
#
sourceFile = cms.untracked.string(
"/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt"),
sourceFile1 = cms.untracked.string(
"/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults_TkStatus.txt"),
#
timeoutLS = cms.untracked.vint32(1,2)
) | en | 0.487337 | # # # # # # | 1.358685 | 1 |