index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,800 | cc6956aead8a3d326134cf7fd6105dc74a6daeef | import sys, time, os, heapq, threading
# command line arguments
history = sys.argv[1]
control_input = sys.argv[2]
control_output = sys.argv[3]
bid_stream_1 = sys.argv[4]
# min heap with negative numbers
# heapq.heapify(history_data)
bid_heap = []
heapq.heapify(bid_heap)
history_file = open(history)
for line in history_file:
heapq.heappush(bid_heap, -1*int(line.strip()))
history_file.close()
""" taken from stack overflow"""
#Set the filename and open the file
file = open(control_input,'r')
#Find the size of the file and move to the end
st_results = os.stat(control_input)
st_size = st_results[6]
file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
line.strip()
# print line, # already has newline
input_command = line.split()
print input_command
try:
# print input_command[0]
total_bids = int(input_command[1])
bids_requested = int(input_command[2])
if input_command[0] == 'top':
print "len(bid_heap)"
print len(bid_heap)
if len(bid_heap) == total_bids:
print heapq.nsmallest(bids_requested, bid_heap)
else:
print "old data"
except:
pass
# if input_command[0] == 'top':
# print bid_heap.size()
# remove from heap, add them back
# print heapq.nsmallest(3, bid_heap)
# while bid_heap:
# print -1*heapq.heappop(bid_heap)
# print bid_heap |
24,801 | 018282b6d07d8a468fb5c31cd1d21a3314c6e9d0 | """
Author: Jan Kowal
Description:
Firebase class that has all functionality to upload and download data from the broker
"""
# imports
import pyrebase
from datetime import datetime
class Firebase:
#Firebase DB config
_config = {
"apiKey": "<apiKey>",
"authDomain": "<authDomain>",
"databaseURL": "<databaseURL>",
"projectId": "<projectId>",
"storageBucket": "<storageBucket>",
"messagingSenderId": "<messagingSenderId>",
"serviceAccount": "<path_to_file>"
}
# login credentials as private access vars
_login = "<login>"
_password = "<password>"
# update data method
def update_data(self, data):
self._db.update(data)
# action for returning the book by the user
def return_the_book(self, id):
self._db.update({
"books/%s/rented_by" % id: "",
"books/%s/rented_date" % id: "",
})
# get data method
def get_data(self):
return self._db.get().val()
# set active user - one front of the camera
def set_active_user(self, face_id):
email = self.get_data()['users'][str(face_id)]
self.update_data({
"login/current_email" : email,
"login/last_seen" : datetime.now().strftime("%d-%m-%Y %H:%M:%S")
})
# create user with the firebase credentials
def create_user(self, email, password):
self._auth.create_user_with_email_and_password(email, password)
#Constructor method
def __init__(self):
# initialize firebase instance
self._firebase = pyrebase.initialize_app(self._config)
self._auth = self._firebase.auth()
self._user = self._auth.sign_in_with_email_and_password(self._login, self._password)
self._db = self._firebase.database() |
24,802 | aa75316732355e588c2ddd62658b60694e91ce7d | class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
# Create a hashmap
# for each word, transform them into alphabet increasing order form
# then add into hashmap
hashmap = {}
for word in strs:
w = sorted(word)
w = "".join(w)
if w in hashmap:
hashmap[w].append(word)
else:
hashmap[w] = [word]
output = []
for w in hashmap:
output.append(hashmap[w])
return output
|
24,803 | 1524e2003abcd9912d5d0d4750600b7a66f68992 | #!/usr/bin/env python
# Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy
import awkward
import uproot_methods
from uproot_methods import *
import inspect
class Test(unittest.TestCase):
def runTest(self):
pass
def test_issue10(self):
p4 = TLorentzVectorArray.from_ptetaphim(awkward.JaggedArray.fromiter([[1.0]]), awkward.JaggedArray.fromiter([[1.0]]), awkward.JaggedArray.fromiter([[1.0]]), awkward.JaggedArray.fromiter([[1.0]]))
assert p4.mass.tolist() == [[1.0]]
assert p4[0].mass.tolist() == [1.0]
assert p4[0][0].mass == 1.0
assert p4[0][0]._to_cartesian().mass == 0.9999999999999999
assert type(p4.mass) is awkward.JaggedArray
assert type(p4.x) is awkward.JaggedArray
p3 = TVector3Array.from_cylindrical(awkward.JaggedArray.fromiter([[1.0]]), awkward.JaggedArray.fromiter([[1.0]]), awkward.JaggedArray.fromiter([[1.0]]))
assert p3.rho.tolist() == [[1.0]]
assert p3[0].rho.tolist() == [1.0]
assert p3[0][0].rho == 1.0
assert type(p3.rho) is awkward.JaggedArray
assert type(p3.x) is awkward.JaggedArray
p2 = TVector2Array.from_polar(awkward.JaggedArray.fromiter([[1.0]]), awkward.JaggedArray.fromiter([[1.0]]))
assert p2.rho.tolist() == [[1.0]]
assert p2[0].rho.tolist() == [1.0]
assert p2[0][0].rho == 1.0
assert type(p2.rho) is awkward.JaggedArray
assert type(p2.x) is awkward.JaggedArray
def test_issue39(self):
counts = [2,2,2]
mask = [True, False, True]
pt = awkward.JaggedArray.fromcounts(counts, [42.71, 31.46, 58.72, 30.19, 47.75, 10.83])
eta = awkward.JaggedArray.fromcounts(counts, [0.54, 1.57, -2.33, -1.22, -2.03, -0.37])
phi = awkward.JaggedArray.fromcounts(counts, [-2.13, 0.65, 2.74, 0.36, 2.87, -0.47])
pt = pt[mask]
eta = eta[mask]
phi = phi[mask]
electrons = uproot_methods.TLorentzVectorArray.from_ptetaphim(pt, eta, phi, 0.000511)
|
24,804 | a2729e131ebcdb0928185e953a93350a6a8daddb | # coding: utf-8
#$ header class Point(public)
#$ header method __init__(Point, double [:])
#$ header method __del__(Point)
#$ header method translate(Point, double [:])
class Point(object):
def __init__(self, x):
self.x = x
def __del__(self):
pass
def translate(self, a):
self.x = self.x + a
x = ones(3, double)
p = Point (x)
a = zeros(3, double)
a[0] = 3
p.translate(a)
print(p.x)
b = p.x[0]
b = p.x[0] + 1.0
b = 2 * p.x[0] + 1.0
b = 2 * ( p.x[0] + 1.0 )
print(b)
p.x[1] = 2.0
del p
|
24,805 | 06ad7cbb53e039a6419ae0878f3e0b5a18ba84c9 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 24 21:19:41 2018
@author: t.pawlowski
"""
#Formatowanie napisów i typy numeryczne - LAB
name = 'Chris'
age = 17
daysInYear = 365
'''
message = name+' is '+str(age)+' years old, so is about '+str(age*daysInYear)+' days old'
print(message)
message1 = '%s is %d years old, so is about %d days old'
print(message1 % (name,age,daysInYear*age))
message2 = '{0:s} is {1:d} years old, so is about {2:d} days old'
print(message2.format(name,age,age*daysInYear))
'''
a = 1234567890
b = 12345
message9 = '{0:d} divided by {1:d} is {2:d} and rest is {3:d}'
print(message9.format(a,b,a//b,a%b))
#zajebicie zaliczone |
24,806 | 38a55ce873d943bab3617e54594fc55a897968c7 | #!/usr/bin/python
def main():
n = input()
l = map(int, raw_input().split())
s = sorted(l)
left, right = None, None
for i in xrange(len(l)):
if l[i] != s[i]:
left = i
break
for j in xrange(len(l)):
ind = len(l)-1-j
if l[ind] != s[ind]:
right = ind
break
swap = l[left+1:right] == s[left+1:right]
reverse = l[left:right+1][::-1] == s[left:right+1]
if swap or reverse: # swap
print 'yes'
if swap:
print 'swap ' + str(left+1) + ' ' + str(right+1)
elif reverse:
print 'reverse ' + str(left+1) + ' ' + str(right+1)
else:
print 'no'
if __name__ == '__main__':
main()
|
24,807 | 36336c9afc3471b25a48127f349b96da9bb53962 | from django import forms
from caritas_app import models
class ResponsavelForm(forms.ModelForm):
class Meta:
model = models.Responsavel
fields = (
'nome_responsavel',
'rg_responsavel',
'nis_responsavel',
'cpf_responsavel',
'nascimento_responsavel',
'rua_responsavel',
'numero_responsavel',
'bairro_responsavel',
'telefone_responsavel',
'email_responsavel')
class InstrutorForm(forms.ModelForm):
class Meta:
model = models.Instrutor
fields = (
'nome_instrutor',
'especialidade_instrutor',
'rg_instrutor',
'cpf_instrutor',
'nascimento_instrutor',
'rua_instrutor',
'numero_instrutor',
'bairro_instrutor',
'cidade_instrutor',
'telefone_instrutor',
'email_instrutor')
class AtendidoForm(forms.ModelForm):
class Meta:
model = models.Atendido
fields = (
'nome_atendido',
'ra_atendido',
'nis_atendido',
'nascimento_atendido',
'rua_atendido',
'numero_atendido',
'bairro_atendido',
'cidade_atendido',
'telefone_atendido',
'email_atendido',
'cod_responsavel'
)
def __init__(self, *args, **kwargs):
super(AtendidoForm, self).__init__(*args, **kwargs)
class OficinaForm(forms.ModelForm):
class Meta:
model = models.Oficina
fields = (
'nome_oficina',
'descricao_oficina',
'objetivo_oficina',
'publico_oficina'
)
class EspacoForm(forms.ModelForm):
class Meta:
model = models.Espaco
fields = (
'nome_espaco',
'lugares_espaco'
)
class PeriodoForm(forms.ModelForm):
class Meta:
model = models.Periodo
fields = (
'dia_periodo',
'periodo_periodo',
'horario_periodo'
)
class AtividadeForm(forms.ModelForm):
class Meta:
model = models.Atividade
fields = (
'cod_instrutor',
'cod_oficina'
)
class TurmaForm(forms.ModelForm):
class Meta:
model = models.Turma
fields = (
'nome_turma',
'cod_atividade'
)
class Responsavel_AtividadeForm(forms.ModelForm):
class Meta:
model = models.Responsavel_Atividade
fields = (
'cod_responsavel',
'cod_atividade'
)
class Periodo_AtividadeForm(forms.ModelForm):
class Meta:
model = models.Periodo_Atividade
fields = (
'cod_periodo',
'cod_atividade'
)
class Espaco_AtividadeForm(forms.ModelForm):
class Meta:
model = models.Espaco_Atividade
fields = (
'cod_espaco',
'cod_atividade'
)
class Turma_AtendidoForm(forms.ModelForm):
class Meta:
model = models.Turma_Atendido
fields = (
'cod_atendido',
'cod_turma'
)
class ChamadaForm(forms.ModelForm):
class Meta:
model = models.Chamada
fields = (
'data_chamada',
'cod_turma',
'atendidos_presentes'
)
class Turma_Atendido_ChamadaForm(forms.ModelForm):
class Meta:
model = models.Turma_Atendido_Chamada
fields = (
'cod_turma_atendido',
'cod_chamada',
'presente_turma_atendido_chamada'
)
|
24,808 | f1532e9761a7a305a459c08a531ef88d09e1dc68 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 20 11:42:31 2014
the Gaussian feature based on the DGPLVM and mutual information
@author: mountain
"""
from GPLVM_test import DGPLVM
import numpy as np
from scg import SCG
class GS_ftr_model:
def __init__(self,Y_tar,cls_tar,Y_src,cls_src,beta,dim=20,delta=1e-1):
#beta is the coefficent of the mutual information
#delta is the para in X_prior
#dim is the dimension of the latent variable X
self.Y_tar=Y_tar
self.cls_tar=cls_tar
self.Y_src=Y_src
self.cls_src=cls_src
self.beta=beta
self.DGPLVM_tar=DGPLVM(Y_tar,dim,cls_tar,delta)
self.DGPLVM_src=DGPLVM(Y_src,dim,cls_src,delta)
self.Y=np.concatenate((self.Y_tar, self.Y_src), axis=0)
self.cls=np.concatenate((self.cls_tar,self.cls_src))
self.DGPLVM_all=DGPLVM(self.Y,dim,self.cls,delta)
def poster_hyper(self):
self.poster_tar=np.exp(self.DGPLVM_tar.GP.marginal())+np.exp(self.DGPLVM_tar.GP.hyper_prior())
self.poster_all=np.exp(self.DGPLVM_all.GP.marginal())+np.exp(self.DGPLVM_all.GP.hyper_prior())
def ll_hyper(self,params=None):
self.DGPLVM_tar.GP.set_params(params)
self.DGPLVM_all.GP.set_params(params)
self.poster_hyper()
return self.DGPLVM_tar.GP.ll(params=params)*(1-self.beta*self.poster_tar)+\
self.beta*self.poster_all*(self.DGPLVM_all.GP.ll(params=params)-self.DGPLVM_src.GP.ll(params=params))
def ll_hyper_grad(self,params=None):
self.DGPLVM_tar.GP.set_params(params)
self.DGPLVM_all.GP.set_params(params)
self.poster_hyper()
return -(self.beta*(-self.DGPLVM_tar.GP.ll(params)+1)*self.poster_tar-1)*self.DGPLVM_tar.GP.ll_grad(params)\
-self.beta*self.poster_all*self.DGPLVM_src.GP.ll_grad(params)\
-self.beta*(self.DGPLVM_all.GP.ll(params)-self.DGPLVM_src.GP.ll(params)-1)*self.DGPLVM_all.GP.ll_grad(params)*self.poster_all
def optimise_GP_kernel(self,iters=1000):
"""Optimise the marginal likelihood. work with the log of beta - fmin works better that way. """
new_params=SCG(self.ll_hyper,self.ll_hyper_grad,np.hstack((self.DGPLVM_tar.GP.kernel.get_params(), np.log(self.DGPLVM_tar.GP.beta))),maxiters=iters,display=True,func_flg=0)
#gtol=1e-10,epsilon=1e-10,
# new_params = fmin_cg(self.ll,np.hstack((self.kernel.get_params(), np.log(self.beta))),fprime=self.ll_grad,maxiter=iters,gtol=1e-10,disp=False)
self.DGPLVM_src.GP.set_params(new_params)
self.DGPLVM_tar.GP.set_params(new_params)
self.DGPLVM_all.GP.set_params(new_params)
def poster_data(self,data_type):
if data_type=="tar":
self.poster_data_tar=np.exp(self.DGPLVM_tar.GP.marginal())+np.exp(self.DGPLVM_tar.x_prior())
elif data_type=="all":
self.poster_data_all=np.exp(self.DGPLVM_all.GP.marginal())+np.exp(self.DGPLVM_all.x_prior())
def ll(self,xx,i,xx_l,data_type):
if data_type=="tar":
self.DGPLVM_tar.GP.X[i]=xx
self.DGPLVM_tar.GP.update()
self.poster_data("tar")
self.DGPLVM_all.GP.X[i]=xx
self.DGPLVM_all.GP.update()
self.poster_data("all")
return self.DGPLVM_tar.ll(xx,i,xx_l,0)*(1-self.beta*self.poster_data_tar)+\
self.beta*self.poster_data_all*(self.DGPLVM_all.ll(xx,i,xx_l,0)+self.DGPLVM_src.GP.marginal_value+self.DGPLVM_src.x_prior_value)
elif data_type=="src":
self.DGPLVM_src.GP.X[i]=xx
self.DGPLVM_src.GP.update()
self.DGPLVM_all.GP.X[i+self.DGPLVM_tar.N]=xx
self.DGPLVM_all.GP.update()
self.poster_data("all")
# return (-self.DGPLVM_tar.GP.marginal()-self.DGPLVM_tar.x_prior())*(1-self.beta*self.poster_data_tar)+\
# self.beta*self.poster_data_all*(self.DGPLVM_all.ll(xx,i+self.DGPLVM_tar.N,xx_l)-self.DGPLVM_src.ll(xx,i,xx_l))
return self.beta*self.poster_data_all*(self.DGPLVM_all.ll(xx,i+self.DGPLVM_tar.N,xx_l,0)-self.DGPLVM_src.ll(xx,i,xx_l))
def ll_grad(self,xx,i,xx_l,data_type):
if data_type=="tar":
self.DGPLVM_tar.GP.X[i]=xx
self.DGPLVM_tar.GP.update()
self.poster_data("tar")
self.DGPLVM_all.GP.X[i]=xx
self.DGPLVM_all.GP.update()
self.poster_data("all")
return -(self.beta*(-self.DGPLVM_tar.ll(xx,i,xx_l,0)+1)*self.poster_data_tar-1)*self.DGPLVM_tar.ll_grad(xx,i,xx_l,0)\
-self.beta*(self.DGPLVM_all.ll(xx,i,xx_l,0)-1+self.DGPLVM_src.GP.marginal_value+self.DGPLVM_src.x_prior_value)*self.poster_data_all*self.DGPLVM_all.ll_grad(xx,i,xx_l,0)
if data_type=="src":
self.DGPLVM_all.GP.X[i+self.DGPLVM_tar.N]=xx
self.DGPLVM_all.GP.update()
self.poster_data("all")
#self.poster_data("tar")
#return (-self.DGPLVM_tar.GP.marginal()-self.DGPLVM_tar.x_prior())*(1-self.beta*self.poster_data_tar)-self.beta*self.poster_data_all*self.DGPLVM_src.ll_grad(xx,i,xx_l)\
#-self.beta*(self.DGPLVM_all.ll(xx,i+self.DGPLVM_tar.N,xx_l)-self.DGPLVM_src.ll(xx,i,xx_l)-1)*self.poster_data_all*self.DGPLVM_all.ll_grad(xx,i+self.DGPLVM_tar.N,xx_l)
return -self.beta*self.poster_data_all*self.DGPLVM_src.ll_grad(xx,i,xx_l)\
-self.beta*(self.DGPLVM_all.ll(xx,i+self.DGPLVM_tar.N,xx_l,0)-self.DGPLVM_src.ll(xx,i,xx_l)-1)*self.poster_data_all*self.DGPLVM_all.ll_grad(xx,i+self.DGPLVM_tar.N,xx_l,0)
def optimise_latents(self):
xtemp=np.zeros(self.DGPLVM_tar.GP.X.shape)
xtemp_src=np.zeros(self.DGPLVM_src.GP.X.shape)
self.DGPLVM_src.GP.marginal()
self.DGPLVM_src.x_prior()
for i,yy in enumerate(self.DGPLVM_tar.GP.Y):
original_x = self.DGPLVM_tar.GP.X[i].copy()
xx_l=self.DGPLVM_tar.cls[i]
#xopt = optimize.fmin_cg(self.ll,self.GP.X[i],fprime=self.ll_grad,gtol=1e-10,disp=True,args=(i,xx_l))
xopt=SCG(self.ll,self.ll_grad,self.DGPLVM_tar.GP.X[i],optargs=(i,xx_l,"tar"),display=False)
self.DGPLVM_tar.GP.X[i] = original_x
xtemp[i] = xopt
for i,yy in enumerate(self.DGPLVM_src.GP.Y):
original_x = self.DGPLVM_src.GP.X[i].copy()
xx_l=self.DGPLVM_src.cls[i]
#xopt = optimize.fmin_cg(self.ll,self.GP.X[i],fprime=self.ll_grad,gtol=1e-10,disp=True,args=(i,xx_l))
xopt=SCG(self.ll,self.ll_grad,self.DGPLVM_src.GP.X[i],optargs=(i,xx_l,"src"),display=False)
self.DGPLVM_src.GP.X[i] = original_x
xtemp_src[i] = xopt
self.DGPLVM_tar.GP.X=xtemp.copy()
self.DGPLVM_src.GP.X=xtemp_src.copy()
def learn(self,niters):
for i in range(niters):
self.optimise_latents()
self.optimise_GP_kernel()
def predict(self,ynew,nhidden=5,mlp_alpha=2):
return self.DGPLVM_tar.predict(ynew,nhidden,mlp_alpha)
|
24,809 | 59bcbacdae9cf17732a792f5985a2441283bb402 | import argparse
import csv
import os.path
from lib import db
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
return arg
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', dest='filename', required=True,
help="Input CSV file",
type=lambda x: is_valid_file(parser, x))
parser.add_argument('-s', '--shortcode', dest='shortcode', required=True,
help='Raffle shortcode', type=str)
parser.add_argument('-c', '--column', dest='column', required=False,
default='name', help='Column to use for identifier', type=str)
if __name__ == "__main__":
args = parser.parse_args()
entries = []
with open(args.filename, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
db.register_for_raffle(
args.shortcode,
row[args.column]
)
|
24,810 | 8a0c6ed6a7eefa5bb2f2d926cab0b47bc592dcd3 | """
This file will include the main functionality of the program.
It should display a menu of actions to the user and invoke the relevant function
"""
import Logic
import datetime
def prompt():
print("\nWhat would you like to do?")
print("1- Add employee manually")
print("2- Add employee from file")
print("3- Delete employee manually")
print("4- Delete employee from file")
print("5- Mark attendance")
print("6- Generate attendance report of an employee")
print("7- Generate monthly attendance report for all employees")
print("8- Generate late attendance report")
action = input("Please select an action by typing its corresponding number: ")
return action
def main():
user_action = prompt()
if user_action == '1':
name = input("Employee's name: ")
phone = input("Employee's phone number: ")
birth = input("Employee's year of birth: ")
uid = input("Employee's ID: ")
age = datetime.date.today().year - int(birth)
# Create an Employee instance and call add_employee()
new_emp = Logic.Employee(uid, name, phone, age)
new_emp.add_employee()
elif user_action == '2':
file_path = input("Please enter the path of the file with employees to add: ")
Logic.add_from_file(file_path)
elif user_action == '3':
uid_to_delete = input("Please select the UID of the user you'd like to delete: ")
Logic.delete_emp(uid_to_delete)
elif user_action == '4':
file_path = input("Please enter the path of the file with employees to delete: ")
Logic.delete_from_file(file_path)
elif user_action == '5':
pass
elif user_action == '6':
pass
elif user_action == '7':
pass
elif user_action == '8':
pass
if __name__ == '__main__':
main()
|
24,811 | 3c61241dbef0351a93e0ab85de3226906c230e9e | # -*- coding: utf-8 -*-
import logging
from collections import Counter
import re
from morph_analyzer import IgnoredToken, AnnotatedToken, WORD_WHITESPACE, WORD_PARAGRAPH, \
POS_ENDING, composable_pos_set
class Whitespace:
def __init__(self):
self.space_number = 1
self.text = ' ' * self.space_number
def jsonify(self):
return {'class': WORD_WHITESPACE}
def __repr__(self):
return '_'
class Paragraph:
def __init__(self):
pass
def jsonify(self):
return {'class': WORD_PARAGRAPH}
def __repr__(self):
return '_P'
re_word_counter = re.compile(r"[\w']+", re.UNICODE)
def get_word_number(text):
groups = re_word_counter.findall(text)
return len(Counter(groups))
def is_composable(pos):
return pos in composable_pos_set
class KTokenizer:
TWITTER = 1
MECAB = 2
def __init__(self, tokenizer=TWITTER):
self.debug_mode = False
self.lookedup_words = {}
if tokenizer == KTokenizer.TWITTER:
from twitter import TwitterAnalyzer
self.tokenizer = TwitterAnalyzer(lambda x: x)
elif tokenizer == KTokenizer.MECAB:
from mecab_analyzer import MecabAnalyzer
self.tokenizer = MecabAnalyzer()
else:
RuntimeError('Unknown tokenizer specified: ' + str(tokenizer))
def parse(self, text):
self.lookedup_words = {}
try:
tokens = self.tokenizer.parse(text)
except Exception as e:
logging.exception(e)
logging.error('Error on parsing text: ' + text)
if self.debug_mode:
raise
return [IgnoredToken(text)]
out = []
current_pos = 0
for index, token in enumerate(tokens):
skipped_chars = 0
while text[current_pos][0] != token.text[0]:
current_pos += 1
skipped_chars += 1
if skipped_chars:
out.append(Whitespace()) # convert all ws symbols to a space
out.append(token)
current_pos += len(token.text)
return self.merge_tokens(out)
def process_token(self, cur_token, prev_token, result_tokens):
if isinstance(prev_token, AnnotatedToken) and prev_token.pos in composable_pos_set:
if isinstance(cur_token, AnnotatedToken) and cur_token.pos == POS_ENDING:
prev_token.add_decomposed(cur_token)
return prev_token
else:
result_tokens.append(prev_token)
return cur_token
elif isinstance(prev_token, IgnoredToken):
if isinstance(cur_token, IgnoredToken) or \
isinstance(cur_token, Whitespace):
prev_token.text += cur_token.text
return prev_token
else:
result_tokens.append(prev_token)
return cur_token
else:
result_tokens.append(prev_token)
return cur_token
def merge_tokens(self, tokens):
if len(tokens) == 0:
return tokens
result_tokens = []
prev_token = tokens[0]
for cur_token in tokens[1:]:
prev_token = self.process_token(cur_token, prev_token, result_tokens)
result_tokens.append(prev_token)
return result_tokens
def tokenize(ktokenizer, line_generator):
text_objs = []
glossary = set()
total_words = 0
for line in line_generator():
line_objs = ktokenizer.parse(line)
# lookedup_words = ktokenizer.get_lookedup_words()
for obj in line_objs:
if isinstance(obj, AnnotatedToken):
glossary.add(obj.dictionary_form)
total_words += get_word_number(line)
text_objs.extend(line_objs)
text_objs.append(Paragraph())
if len(text_objs):
text_objs.pop()
unique_words = len(glossary)
return [obj.jsonify() for obj in text_objs], glossary, total_words, unique_words
if __name__ == '__main__':
pass
|
24,812 | 8f1eb03be22c8c5378bbd2bc36a009e3eeeb21ba | from setuptools import find_packages, setup
setup(
name='Intro_to_pytorch',
packages=find_packages(),
version='0.1.0',
description='This is a series of beginer level tutorial to masater pytorch.',
author='Taofiq-Bakare',
license='MIT',
)
|
24,813 | e3d84cf7b347dd937629f434f73f5afeba8dd912 | print("nhập 1 số")
n=int(input())
flag= True
if ( n<2):
flag = False
elif ( n==2):
flag=True
elif ( n % 2 == 0):
flag = False
else:
for i in range(3,n,2):
if (n%i == 0):
flag = False
if flag == True
print(n, "là snt")
else
print(n, "không là snt") |
24,814 | e3c644184bf9b60f03465273d9f507d12e26f47c | class BinaryTree:
'''
使用list类型来实现二叉树
'''
def init_tree(self, root_value):
return [root_value, [], []]
def inserLeft(self, binary_tree, left_value):
temp_left = binary_tree.pop(1) # 首先取出左子树
if len(temp_left) > 1: # 如果已有左子树,则直接将left_value作为根节点,原有左子树作为left_value的左子树
binary_tree.insert(1, [left_value, temp_left, []])
else:
binary_tree.insert(1, [left_value, [], []])
return binary_tree
def insertRight(self, binary_tree, right_value):
temp_right = binary_tree.pop(2) # 首先取出右子树
if len(temp_right) > 1: # 如果已有右子树,则直接将right_value作为根节点,原有左子树作为right_value的右子树
binary_tree.insert(2, [right_value, [], temp_right])
else:
binary_tree.insert(2, [right_value, [], []])
return binary_tree
def getRootValue(self, binary_tree):
return binary_tree[0]
def setRootValue(self, binary_tree, new_value):
binary_tree[0] = new_value
def getLeftChid(self, binary_tree):
return binary_tree[1]
def getRightChild(self, binary_tree):
return binary_tree[2]
# new_tree_object = BinaryTree()
# binary_tree = new_tree_object.init_tree(3) # 相当于定义了一个全局变量
# print(binary_tree)
# new_tree_object.inserLeft(binary_tree, 5)
# new_tree_object.inserLeft(binary_tree, 9)
# new_tree_object.insertRight(binary_tree, 4)
# new_tree_object.insertRight(binary_tree, 8)
# print(binary_tree)
#
# left_sub_tree = new_tree_object.getLeftChid(binary_tree)
# print(left_sub_tree)
# new_tree_object.setRootValue(left_sub_tree, 0, 1)
# print(binary_tree)
#
# new_tree_object.inserLeft(left_sub_tree, 2)
# print(binary_tree) |
24,815 | 5ea5044c07eb079451f44d9d37901f9de864486f | # -*- coding: utf-8 -*-
"""
test_rule_parsing
"""
import unittest
import orjson as json
from vulyk.blueprints.gamification.core.parsing import (JsonRuleParser,
RuleParsingException)
from vulyk.blueprints.gamification.core.rules import (ProjectRule, Rule,
RuleValidationException)
from ..base import BaseTest
class TestJsonRulesParsing(BaseTest):
def test_parse_ok(self):
image = 'base64 image'
name = 'achvm1'
descr = 'the very best acvmnt'
bonus = 100
tasks = 20
days = 21
weekend = True
adjacent = False
parsee = {
'badge': image,
'name': name,
'description': descr,
'bonus': bonus,
'tasks_number': tasks,
'days_number': days,
'is_weekend': weekend,
'is_adjacent': adjacent
}
string = json.dumps(parsee)
rule = Rule(
rule_id=str(hash(name)),
badge=image,
name=name,
description=descr,
bonus=bonus,
tasks_number=tasks,
days_number=days,
is_weekend=weekend,
is_adjacent=adjacent)
self.assertEqual(rule, JsonRuleParser.parse(string))
def test_parse_project_rule_ok(self):
project = 'fake_task'
image = 'base64 image'
name = 'achvm1'
descr = 'the very best acvmnt'
bonus = 100
tasks = 20
days = 21
weekend = True
adjacent = False
parsee = {
'task_type_name': project,
'badge': image,
'name': name,
'description': descr,
'bonus': bonus,
'tasks_number': tasks,
'days_number': days,
'is_weekend': weekend,
'is_adjacent': adjacent
}
string = json.dumps(parsee)
rule = ProjectRule(rule_id=str(hash(name + project)),
task_type_name=project,
badge=image,
name=name,
description=descr,
bonus=bonus,
tasks_number=tasks,
days_number=days,
is_weekend=weekend,
is_adjacent=adjacent)
self.assertEqual(rule, JsonRuleParser.parse(string))
def test_parse_limits_tasks(self):
image = 'base64 image'
name = 'achvm1'
descr = 'the very best acvmnt'
bonus = 100
tasks = 20
days = 21
weekend = True
adjacent = False
rule = Rule(
rule_id=str(hash(name)),
badge=image,
name=name,
description=descr,
bonus=bonus,
tasks_number=tasks,
days_number=days,
is_weekend=weekend,
is_adjacent=adjacent)
self.assertEqual(rule.limit, tasks)
def test_parse_limits_days(self):
image = 'base64 image'
name = 'achvm1'
descr = 'the very best acvmnt'
bonus = 100
tasks = 0
days = 21
weekend = True
adjacent = False
rule = Rule(
rule_id=str(hash(name)),
badge=image,
name=name,
description=descr,
bonus=bonus,
tasks_number=tasks,
days_number=days,
is_weekend=weekend,
is_adjacent=adjacent)
self.assertEqual(rule.limit, days)
def test_parse_hash(self):
image = 'base64 image'
name = 'achvm1'
descr = 'the very best acvmnt'
bonus = 100
tasks = 20
days = 21
weekend = True
adjacent = False
parsee = {
'badge': image,
'name': name,
'description': descr,
'bonus': bonus,
'tasks_number': tasks,
'days_number': days,
'is_weekend': weekend,
'is_adjacent': adjacent
}
string = json.dumps(parsee)
self.assertEqual(JsonRuleParser.parse(string).id,
str(hash(name)))
def test_fail_non_json(self):
string = "<xml></xml>"
self.assertRaises(RuleParsingException,
lambda: JsonRuleParser.parse(string))
def test_fail_malformed_json(self):
string = '{"1": , "2": "2"}'
self.assertRaises(RuleParsingException,
lambda: JsonRuleParser.parse(string))
def test_fail_incomplete_json(self):
image = 'base64 image'
# let's omit name
descr = 'the very best acvmnt'
bonus = 100
tasks = 20
days = 21
weekend = True
adjacent = True
parsee = {
'badge': image,
'description': descr,
'bonus': bonus,
'tasks_number': tasks,
'days_number': days,
'is_weekend': weekend,
'is_adjacent': adjacent
}
string = json.dumps(parsee)
self.assertRaises(RuleParsingException,
lambda: JsonRuleParser.parse(string))
def test_fail_adjacent_weekend_tasks(self):
parsee = {
'badge': 'base64 image',
'name': 'achvm1',
'description': 'the very best acvmnt',
'bonus': 100,
'tasks_number': 20,
'days_number': 7,
'is_weekend': True,
'is_adjacent': True
}
string = json.dumps(parsee)
self.assertRaises(RuleValidationException,
lambda: JsonRuleParser.parse(string))
def test_fail_adjacent_days_tasks(self):
parsee = {
'badge': 'base64 image',
'name': 'achvm1',
'description': 'the very best acvmnt',
'bonus': 100,
'tasks_number': 20,
'days_number': 7,
'is_weekend': False,
'is_adjacent': True
}
string = json.dumps(parsee)
self.assertRaises(RuleValidationException,
lambda: JsonRuleParser.parse(string))
def test_fail_zero_adjacent_weekends_tasks(self):
parsee = {
'badge': 'base64 image',
'name': 'achvm1',
'description': 'the very best acvmnt',
'bonus': 100,
'tasks_number': 20,
'days_number': 0,
'is_weekend': True,
'is_adjacent': True
}
string = json.dumps(parsee)
self.assertRaises(RuleValidationException,
lambda: JsonRuleParser.parse(string))
def test_fail_zero_adjacent_days_tasks(self):
parsee = {
'badge': 'base64 image',
'name': 'achvm1',
'description': 'the very best acvmnt',
'bonus': 100,
'tasks_number': 20,
'days_number': 0,
'is_weekend': False,
'is_adjacent': True
}
string = json.dumps(parsee)
self.assertRaises(RuleValidationException,
lambda: JsonRuleParser.parse(string))
def test_fail_zero_days_zero_tasks(self):
parsee = {
'badge': 'base64 image',
'name': 'achvm1',
'description': 'the very best acvmnt',
'bonus': 100,
'tasks_number': 0,
'days_number': 0,
'is_weekend': False,
'is_adjacent': False
}
string = json.dumps(parsee)
self.assertRaises(RuleValidationException,
lambda: JsonRuleParser.parse(string))
if __name__ == '__main__':
unittest.main()
|
24,816 | 2b1fdc351fd508b19677911f3ddee7513899d80d |
import datetime
onetd = datetime.timedelta(days=10000)
twotd = datetime.timedelta(days=20000)
two5td = datetime.timedelta(days=25000)
threetd = datetime.timedelta(days=30000)
fredrikbirth = datetime.date(1962,4,15)
print("Fredrik")
print(" 10000 dagar ->", fredrikbirth + onetd)
print(" 20000 dagar ->", fredrikbirth + twotd)
print(" 25000 dagar ->", fredrikbirth + two5td)
print(" 30000 dagar ->", fredrikbirth + threetd)
print('-' * 20)
|
24,817 | e27e35b1819c2d74b334a0f355455fb1b3a32622 | #!/usr/bin/env python
from __future__ import print_function
my_var = input("enter an IP address")
my_var = my_var.split(".")
print("{:<12} {:<12} {:<12} {:<12}".format(*my_var))
|
24,818 | 599b265609cdb755db9aab2dea44642db1cac24a | # python3.7
"""Collects all augmentation pipelines."""
from .no_aug import NoAug
from .ada_aug import AdaAug
__all__ = ['build_aug']
_AUGMENTATIONS = {
'NoAug': NoAug,
'AdaAug': AdaAug
}
def build_aug(aug_type, **kwargs):
"""Builds a differentiable augmentation pipeline based on its class type.
Args:
aug_type: Class type to which the augmentation belongs, which is case
sensitive.
**kwargs: Additional arguments to build the aug.
Raises:
ValueError: If the `aug_type` is not supported.
"""
if aug_type not in _AUGMENTATIONS:
raise ValueError(f'Invalid augmentation type: `{aug_type}`!\n'
f'Types allowed: {list(_AUGMENTATIONS)}.')
return _AUGMENTATIONS[aug_type](**kwargs)
|
24,819 | 6cefec556a00f03edd7df96101163d844facb25e | #
# Enthought product code
#
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
""" General utility methods for the web version """
import socket
def get_free_port():
"""
Returns a free socket port. It works by creating an empty socket, binding it
to port 0 so that the OS automatically assigns a free port to it, obtaining
the port using `getsockname` and then immediately closing it.
The application intending to use this port should bind to it immediately so
that no other application binds to it before us.
"""
sock = socket.socket()
# bind to a random port (so that the OS automatically assigns us a free port)
sock.bind(('', 0))
# obtain the random port value
port = sock.getsockname()[1]
# close the socket so that the port gets free
sock.close()
return port
def start_web_app(template, context, port=8000):
"""
Start a web app at the given port for serving the jigna view for the given
template and context.
"""
from tornado.ioloop import IOLoop
from jigna.web_app import WebApp
ioloop = IOLoop.instance()
app = WebApp(template=template, context=context)
app.listen(port)
print 'Starting the web app on port %s ...' % port
ioloop.start()
|
24,820 | b90f0400394cdceff8f7a431c33e7a01d9bea255 | import unittest
from migrations.test_base import OperationTestBase
from django.db import NotSupportedError, connection
from django.db.migrations.state import ProjectState
from django.db.models import Index
from django.test import modify_settings, override_settings
from django.test.utils import CaptureQueriesContext
from . import PostgreSQLTestCase
try:
from django.contrib.postgres.operations import (
AddIndexConcurrently, CreateExtension, RemoveIndexConcurrently,
)
from django.contrib.postgres.indexes import BrinIndex, BTreeIndex
except ImportError:
pass
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.')
@modify_settings(INSTALLED_APPS={'append': 'migrations'})
class AddIndexConcurrentlyTests(OperationTestBase):
app_label = 'test_add_concurrently'
def test_requires_atomic_false(self):
project_state = self.set_up_test_model(self.app_label)
new_state = project_state.clone()
operation = AddIndexConcurrently(
'Pony',
Index(fields=['pink'], name='pony_pink_idx'),
)
msg = (
'The AddIndexConcurrently operation cannot be executed inside '
'a transaction (set atomic = False on the migration).'
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
def test_add(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = '%s_pony' % self.app_label
index = Index(fields=['pink'], name='pony_pink_idx')
new_state = project_state.clone()
operation = AddIndexConcurrently('Pony', index)
self.assertEqual(
operation.describe(),
'Concurrently create index pony_pink_idx on field(s) pink of '
'model Pony'
)
operation.state_forwards(self.app_label, new_state)
self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 1)
self.assertIndexNotExists(table_name, ['pink'])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ['pink'])
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexNotExists(table_name, ['pink'])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, 'AddIndexConcurrently')
self.assertEqual(args, [])
self.assertEqual(kwargs, {'model_name': 'Pony', 'index': index})
def test_add_other_index_type(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = '%s_pony' % self.app_label
new_state = project_state.clone()
operation = AddIndexConcurrently(
'Pony',
BrinIndex(fields=['pink'], name='pony_pink_brin_idx'),
)
self.assertIndexNotExists(table_name, ['pink'])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ['pink'], index_type='brin')
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexNotExists(table_name, ['pink'])
def test_add_with_options(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = '%s_pony' % self.app_label
new_state = project_state.clone()
index = BTreeIndex(fields=['pink'], name='pony_pink_btree_idx', fillfactor=70)
operation = AddIndexConcurrently('Pony', index)
self.assertIndexNotExists(table_name, ['pink'])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ['pink'], index_type='btree')
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexNotExists(table_name, ['pink'])
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.')
@modify_settings(INSTALLED_APPS={'append': 'migrations'})
class RemoveIndexConcurrentlyTests(OperationTestBase):
app_label = 'test_rm_concurrently'
def test_requires_atomic_false(self):
project_state = self.set_up_test_model(self.app_label, index=True)
new_state = project_state.clone()
operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx')
msg = (
'The RemoveIndexConcurrently operation cannot be executed inside '
'a transaction (set atomic = False on the migration).'
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
def test_remove(self):
project_state = self.set_up_test_model(self.app_label, index=True)
table_name = '%s_pony' % self.app_label
self.assertTableExists(table_name)
new_state = project_state.clone()
operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx')
self.assertEqual(
operation.describe(),
'Concurrently remove index pony_pink_idx from Pony',
)
operation.state_forwards(self.app_label, new_state)
self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 0)
self.assertIndexExists(table_name, ['pink'])
# Remove index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ['pink'])
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexExists(table_name, ['pink'])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, 'RemoveIndexConcurrently')
self.assertEqual(args, [])
self.assertEqual(kwargs, {'model_name': 'Pony', 'name': 'pony_pink_idx'})
class NoExtensionRouter():
def allow_migrate(self, db, app_label, **hints):
return False
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.')
class CreateExtensionTests(PostgreSQLTestCase):
app_label = 'test_allow_create_extention'
@override_settings(DATABASE_ROUTERS=[NoExtensionRouter()])
def test_no_allow_migrate(self):
operation = CreateExtension('uuid-ossp')
project_state = ProjectState()
new_state = project_state.clone()
# Don't create an extension.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertEqual(len(captured_queries), 0)
# Reversal.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertEqual(len(captured_queries), 0)
def test_allow_migrate(self):
operation = CreateExtension('uuid-ossp')
project_state = ProjectState()
new_state = project_state.clone()
# Create an extension.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIn('CREATE EXTENSION', captured_queries[0]['sql'])
# Reversal.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIn('DROP EXTENSION', captured_queries[0]['sql'])
|
24,821 | 97106e1502ca3f1000e8e281ae63968a22fab7f6 | list = ['a','b','c','b','a']
list1 = list
list1.reverse()
if list == list1:
print 'Its a palindrome string'
else:
print 'Its not a palindrome string'
|
24,822 | 9437881acbeed2df364224b9e77cb4f61c471497 | import os
out = open('performance.csv', 'w')
for nserver in os.listdir("ptest"):
wt = 0
rt = 0
wl = 0
rl = 0
correct = True
for i, fname in enumerate(os.listdir("ptest/"+nserver+"/")):
f = open("ptest/"+nserver+"/"+fname)
for line in f:
print("{}\n\n".format(line))
tokens = line.split(':')
if tokens[0] == "Correctness":
correct = int(tokens[1].split("of")[0].strip()) == 1000
elif tokens[0] == "Throughput":
if "write" in tokens[1]:
wt += float(tokens[1].split("qps")[0].strip())
elif "read" in tokens[1]:
rt += float(tokens[1].split("qps")[0].strip())
elif tokens[0] == "Latency":
if "write" in tokens[1]:
wl += float(tokens[1].split("ms")[0].strip())
elif "read" in tokens[1]:
rl += float(tokens[1].split("ms")[0].strip())
rl = rl / float(i+1)
wl = wl / float(i+1)
out.write("%s,%.6f,%.6f,%.6f,%.6f\n"%(nserver,rt,wt,rl,wl))
out.close()
|
24,823 | b45616882b382ca8bbaf266fd0d333f6af94ab85 | #! -*- coding: utf-8 -*-
import datetime
import markdown
from django.db import models
from django.contrib.auth.models import User
from tagging.fields import TagField
from tagging.models import Tag
class Works(models.Model):
title = models.CharField(u'题目', max_length=255)
content = models.TextField(u'内容')
content_html = models.TextField(editable=False)
creater = models.ForeignKey(User)
pub_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
tags = TagField(u'标签', blank=True, help_text=u"标签间请用空格分隔")
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return "Works %s" % self.title
@models.permalink
def get_absolute_url(self):
return ('ws_page', (), {
'ws_pk': self.pk
})
def save(self, *args, **kwargs):
self.content_html = markdown.markdown(self.content)
super(Works, self).save(*args, **kwargs)
def _get_tags(self):
return Tag.objects.get_for_object(self)
def _set_tags(self, tags):
return Tag.objects.update_tags(self, tags)
obj_tags = property(_get_tags, _set_tags)
|
24,824 | fd571fd8a7512238b796337be648754989bd8826 | # (1) Prompt the user to input an integer between 0 and 155, a float, a character, and a string, storing each into separate vari# ables. Then, output those four values on a single line separated by a space. (Submit for 2 points).
userInt = int(input('Enter integer (0 - 155): \n'))
userFloat = float(input("Enter a float number such as 2.0: \n"))
userChar = input("Enter a single character such as 'g'. We will later convert that to Unicode: \n")
userString = input("Enter a string such as \"the dog runs fast\": \n")
userUni = ord(userChar)
myCombo = (userInt, userFloat, userChar, userString)
reverseCombo = (userString, userChar, userFloat, userInt)
print(myCombo)
print(reverseCombo)
print(userUni)
|
24,825 | 60abb78a3a484e5b6296969e409a7805335d2bde | # -*- coding: utf-8 -*-
from aiohttp_baseapi.data_providers.model import ModelDataProvider
from apps.demo.models import Book, Author
class BooksDataProvider(ModelDataProvider):
model = Book
class AuthorsDataProvider(ModelDataProvider):
model = Author
|
24,826 | fc3788659679edc86c0c96b1ac0c76a6b471c4e5 | class Solution:
def factorTarget(self, nums, target):
#target = x1 + x2
#x2 = target - x1
num_val = {}
for i, num in enumerate(nums):
num_val[num]=i
for num in nums:
##Find factors
##Check hashtable for factors
##Return index (numval_[num]) of item using hash table
def twoSum(self, nums: List[int], target: int) -> List[int]:
factors = self.factorTarget(nums, target)
if factors is not None:
return factors
return None
|
24,827 | b610bbe1f447a0202fcb79e7cfacf1e8ea967970 | #CONSTANTS
BSTARTROW = 0
WSTARTROW = 5
WHITE = 'w'
BLACK = 'b'
EMPTY = '-'
WIDTH = 5
HEIGHT = 6
WIN_FOR = WHITE
LOS_FOR = BLACK
WIN_VALUE = 100
class pos:
# Represent the gameState for tic tac toe.
# Minimax assumes objects that respond to the following methods:
# __str__(): return a unique string describing the state of the game (for use in hash table)
# isTerminal(): checks if the game is at a terminal state
# successors(): returns a list of all legal game states that extend this one by one move
# in this version, the list consists of a move,state pair
# isMinNode(): returns True if the node represents a state in which Min is to move
# isMaxNode(): returns True if the node represents a state in which Max is to move
def __init__(self,x,y):
"""
Create a new position.
:param x: an x position.
:param y: a y position.
:return:
"""
self.x = x
self.y = y
def __repr__(self):
"""
Display position in user friendly manner.
:return: User friendly representation.
"""
return "("+str(self.x)+","+str(self.y)+")"
def get(self):
"""
Returns a tuple of position in (x,y) form.
:return: Position in form of a tuple (x,y).
"""
return (self.x,self.y);
class Pawn:
def __init__(self,x,y,color):
"""
"""
self.pos = pos(x,y)
self.color = color
def __repr__(self):
"""
Displays a string in user friendly manner.
:return: User friendly string representation.
"""
return "("+str(self.pos)+","+str(self.color)+")"
def isColor(self,color):
"""
Compares color to pawn color, returns true if they are the same.
:param color: Color either BLACK or WHITE to compare pawn to.
:return: True if the pawn color and color are the same.
"""
return self.color==color
def move(self,x,y):
"""
Sets the position of the pawn
"""
self.pos.x = x
self.pos.y = y
class Board:
# Represent the gameState pawned
# Minimax assumes objects that respond to the following methods:
# __str__(): return a unique string describing the state of the game (for use in hash table)
# isTerminal(): checks if the game is at a terminal state
# successors(): returns a list of all legal game states that extend this one by one move
# in this version, the list consists of a move,state pair
# isMinNode(): returns True if the node represents a state in which Min is to move
# isMaxNode(): returns True if the node represents a state in which Max is to move
def __init__(self,state,player=WHITE):
"""
Create a new object
:param state: a description of the board for the current state
:param player: whose turn it isto play in the current state
:return:
"""
if(state==None):
self.gameState = dict()
for x in range(0,WIDTH):
for y in range(0,HEIGHT):
self.gameState[x,y] = EMPTY
for x in range(0,WIDTH):
self.gameState[x,BSTARTROW] = BLACK#Blacks starting row
self.gameState[x,WSTARTROW] = WHITE#Whites starting row
#whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE))
#blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK))
else:
self.gameState = state
self.whoseTurn = player
self.cachedWin = False # set to True in winFor() if
self.cachedWinner = None
def __repr__(self):
"""
Used for debugging and displaying in user friendly manner.
:return: User friendly string of the current board state.
"""
s = ""
for y in range(0,HEIGHT):
temp=""
for x in range(0,WIDTH):
temp = temp+ str(self.gameState[x,y])
s += temp+"\n"
return s
def __str__(self):
"""
Translate the board description into a string. Used for a hash table.
:return: A string that describes the board in the current state.
"""
s=""
for y in range(0,HEIGHT):
for x in range(0,WIDTH):
s+=str(self.gameState[x,y])
return s
def getPawn(self,x,y):
"""
Gives a pawn on the position x,y or returns empty if none exists
:param x: x coordinate on board
:param y: y coordinate on board
:return: Pawn on the coordinate x,y or None if EMPTY
"""
if(self.gameState[x,y]==EMPTY):
return
return Pawn(x,y,self.gameState[x,y])
def isMinNode(self):
""" *** needed for search ***
:return: True if it's Min's turn to play
"""
return self.whoseTurn==LOS_FOR
def isMaxNode(self):
""" *** needed for search ***
:return: True if it's Max's turn to play
"""
return self.whoseTurn==WIN_FOR
def isTerminal(self):
""" *** needed for search ***
:param node: a game tree node with stored game state
:return: a boolean indicating if node is terminal
"""
return self.winFor(WHITE) or self.winFor(BLACK) or (len(self.successors()) == 0)
def winFor(self,player):
"""
Check if it's a win for player.
:param player: either BLACK or WHITE
:return: True if player has a pawn on its end row
"""
if(self.cachedWin == False):
won = False;
if(player==WHITE):
for x in range(0,WIDTH):
if(self.gameState[x,0]==WHITE):
won = True
elif(player==BLACK):
for x in range(0,WIDTH):
if(self.gameState[x,HEIGHT-1]==BLACK):
won = True
if(len(self.successors()) == 0):#IF there are no available moves for both players
bCount = self.count(BLACK) #check who has the most pawns
wCount = self.count(BLACK)
if(bCount>wCount):
self.cachedWin = True
self.cachedWinner = player
return True
if(wCount>bCount):
self.cachedWin = True
self.cachedWinner = player
return True
if(won):
self.cachedWin = True
self.cachedWinner = player
return True
else:
return False
else:
return player == self.cachedWinner
#Used to decide who to win for
def utility(self):
""" *** needed for search ***
:return: +WIN_VALUE if win for WHITE, -WIN_VALUE for win for BLACK, 0 for draw
"""
if(self.winFor(WIN_FOR)):
return WIN_VALUE
elif(self.winFor(BLACK)):
return -WIN_VALUE
else:
return 0
def togglePlayer(self,p):
"""
:param p: either 'b' or 'w'
:return: other players symbol
else false
"""
if(p==WHITE):
return BLACK
else:
return WHITE
def intPlayer(self,p):
""" *** needed for move ***
:param p: a game state node with stored game state
:return: a list of move,state pairs that are the next possible states
"""
if(p==WHITE):
return -1
else:
return 1
def count(self,color):
""" *** needed for search ***
Gets the number pawns on the board of a color.
:param color: Color we are comparing to search for.
:return: Number of color pawns on the board.
"""
count = 0
for y in range(0,HEIGHT):
for x in range(0,WIDTH):
if(self.gameState[x,y]==color):
count+=1
return count
#MOVEMENTS
def inBounds(self,pos):
"""
Tells if a position is in the game bounds
:param pos: position to be evauluated is tuple (x,y)
:return: true if the position is within the bounds
false otherwise
"""
return ((pos.x<WIDTH) & (pos.x>=0) & (pos.y<HEIGHT) & (pos.y>=0))
def successors(self):
""" *** needed for search ***
:return: list of moves available to whoseTurn it is
"""
return [self.move(p,m) for (p,m) in self.openMoves() if (p.color==self.whoseTurn)]
def movePos(self,p,intMove):
"""
Takes a pawn and returns it's relative move position
:param p: A Pawn on the board
:param intMove: The type of move wanted
0: move forward relative to player
-1: attack left relative to player
1: attack right relative to player
:return: A tuple with (x,y) in for the desired move relative to the pawn's position
"""
return pos(p.pos.x-(intMove*self.intPlayer(p.color)),p.pos.y+self.intPlayer(p.color))
def legalMove(self,p,intMove):
"""
Tells if a move is legal
:param p: Pawn to be moved
:param intMove: The type of move wanted
0: move forward relative to player
-1: attack left relative to player
1: attack right relative to player
:return: True if the move is legal
"""
mPos = self.movePos(p,intMove)#board position of move
if(self.inBounds(mPos)!=True):#Can't make move out of board bounds
return False
#if(p.color != self.whoseTurn):#Can't make move if it's not players pawn
# return False
if(intMove==0):#to move forward the node must be empty
return (self.gameState[mPos.get()] == EMPTY)
else:#to attack the node must have an enemy
return (self.gameState[mPos.get()] == self.togglePlayer(p.color))
def openMoves(self):
""" *** needed for search ***
Gets all legal available moves including those for the oppenent
"""
arr = []
for y in range(0,HEIGHT):
for x in range(0,WIDTH):
t = self.getPawn(x,y)
if(t!=None):
for z in range(-1,2):
if(self.legalMove(t,z)):
#move , #newState
arr.append((t,z))
return arr
#Moves forward relative to player
#Returns a gameState with the change
def move(self,p,intMove):
""" *** needed for search ***
Create a new board state with the given move
:param p: The pawn,of type pawn, to move
:param intMove: What type of move it was
0: move forward relative to player
-1: attack left relative to player
1: attack right relative to player
:return: a pawn,move pair, gs the state is a copy of the current state with the additional move included
a pawn contains the position it was at and its color
move is an int with the type of move it did
"""
gs = self.gameState.copy() #copy Board
gs[p.pos.get()] = EMPTY #put position it was at as empty
gs[self.movePos(p,intMove).get()] = p.color #set new position as filled
return ((p,intMove),Board(gs,self.togglePlayer(self.whoseTurn)))
#alpha-beta
#def
|
24,828 | 20e5905172223d989aa623d06bfcfdd360435cfe | import requests
import json
from requests.auth import HTTPBasicAuth
serverIP = '10.55.17.20'
port = '8080'
container = 'default'
user = 'admin'
password = 'admin'
def find_subnet(subnets, subnetName):
allSubnets = '/controller/nb/v2/subnet/' + container + '/subnet/all'
url = 'http://' + serverIP + ':' + port + allSubnets
for subnet in subnets:
if subnet['subnet'] == subnetName:
return subnet
return None
def add_subnet(name, subnet):
url = 'http://10.55.17.20:8080/controller/nb/v2/subnet/' + container + '/subnet/' + name
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
payload = {
"name" : name,
"subnet" : subnet
}
try:
r = requests.post(url, data=json.dumps(payload), headers = headers, auth=(user, password))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print e
return None
else:
print "subnet added"
return json.dumps(payload)
allSubnets = '/controller/nb/v2/subnet/' + container + '/subnet/all'
url = 'http://' + serverIP + ':' + port + allSubnets
subnetquery = '10.2.2.254/24'
r = requests.get(url, auth=(user, password))
r.raise_for_status()
result = find_subnet(r.json()['subnetConfig'], subnetquery)
print result
print "Adding Subnet"
add_subnet('test3', '172.17.0.0/24') |
24,829 | 2bfd4a7d54685de3438ada325e1cb8a5757a2ca1 | # A. Вывести на экран эти цены через запятую в одну строку, цена должна отображаться в виде <r> руб <kk> коп
# (например «5 руб 04 коп»). Подумать, как из цены получить рубли и копейки, как добавить нули, если, например,
# получилось 7 копеек или 0 копеек (должно быть 07 коп или 00 коп).
print()
print('A.')
prices = [54.34, 43.06, 123.7, 2500, 2.55, 560.8, 783.44, 1145.99, 4208.21, 2224.5, 43.01, 12.12, 1478.23, 984.30]
new_prices = []
for price in prices:
price_split = str(price).split('.')
if len(price_split) == 1:
rub = price_split[0]
penny = 00
new_prices.append(f'{rub} руб {penny:02d} коп')
else:
rub = price_split[0]
penny = int(price_split[1])
new_prices.append(f'{rub} руб {penny:02d} коп')
print(", ".join(new_prices))
# B. Вывести цены, отсортированные по возрастанию, новый список не создавать (доказать, что объект списка после
# сортировки остался тот же).
print()
print('B.')
print(id(prices))
prices.sort()
print(prices)
print(id(prices))
# C. Создать новый список, содержащий те же цены, но отсортированные по убыванию.
print()
print('C.')
print(list(reversed(sorted(prices))))
# D. Вывести цены пяти самых дорогих товаров. Сможете ли вывести цены этих товаров по возрастанию,
# написав минимум кода?
print()
print('D.')
print(list(reversed(sorted(prices)))[:5])
|
24,830 | 30aaaf163488084fa972438696ad76092ce02fd5 | import random
import os
def read_file():
with open("./files/data.txt",'r', encoding="utf-8") as f:
data_list = [word.strip() for word in f]
return data_list
def get_word():
word = random.choice(read_file())
return word
def remove_accents(word):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
)
for a, b in replacements:
word = word.replace(a, b)
return word
def main():
word = get_word()
word_normalize = word.lower()
word_normalize = remove_accents(word_normalize)
guess = ["_"] * len(word)
while "".join(guess) != word:
#Clean the terminal
os.system("cls")
# Print text
print("¡Adivina la palabra!")
print(f'{" ".join(guess).capitalize()}')
# input
letter = input("Ingresa una letra: ")
letter = letter.lower()
letter = remove_accents(letter)
#Changing the correct letters
if letter in word_normalize:
index = tuple(idx for idx, x in enumerate(word_normalize) if x==letter)
for i in index:
guess[i] = word[i]
os.system("cls")
#Win
print(f"Ganaste la palabra era: {word.capitalize()}")
if __name__ == '__main__':
main() |
24,831 | 0e1ca7ff647c6d8531356827263baf38e51aaad1 | '''
Create the clustered genres similar groups and write them to a file
'''
import os
import sys
import re
import codecs
import json
from datetime import datetime, date, timedelta
import scipy.sparse as sp
import numpy as np
from multiprocessing import Pool
import similar_artists_api as sa
from multiprocessing.pool import ThreadPool
import concurrent.futures
import loggingmodule
from itertools import repeat
reload(sys)
sys.setdefaultencoding('utf8')
songs_map = {}
'''
Reads the genresmatrix json and creates the genres_dict
'''
def get_genres_matrix():
genres_list=[]
with codecs.open('genresmatrix.json', 'r') as f:
genres_list = json.load(f)
genres_dict = dict(map(reversed, genres_list.items()))
#genres_dict = {v:k for k,v in genres_list.iteritems()}
return genres_dict
'''
split the sparse matrix into parts
'''
def split_sparse(mat, row_divs = [], col_divs = []):
'''
mat is a sparse matrix
row_divs is a list of divisions between rows. N row_divs will produce N+1 rows of sparse matrices
col_divs is a list of divisions between cols. N col_divs will produce N+1 cols of sparse matrices
return a 2-D array of sparse matrices
'''
row_divs = [None]+row_divs+[None]
col_divs = [None]+col_divs+[None]
#print row_divs
#print col_divs
mat_of_mats = np.empty((len(row_divs)-1, len(col_divs)-1), dtype = type(mat))
for i, (rs, re) in enumerate(zip(row_divs[:-1], row_divs[1:])):
for j, (cs, ce) in enumerate(zip(col_divs[:-1], col_divs[1:])):
mat_of_mats[i, j] = mat[rs:re, cs:ce]
return mat_of_mats
'''
Read the songs to genres files and create the sparse matrix.
'''
def getmatrixdata_genres(filename,row_max_genres,col_max_genres):
t1=datetime.now()
fileopen = codecs.open(filename,"r","utf-8")
lines = fileopen.readlines()
lines = filter(lambda x: x.replace('\n','') != '',lines)
lines = map(lambda x:x.replace('\n',''),lines)
for line in lines:
if(line != ''):
#print line
words = line.split(':;')
curr_row = int(words[7])
curr_col = int(words[8])
if(curr_row not in songs_map):
songs_map[curr_row] = [words[0],words[1],words[2],words[3],words[4],words[5].split('-')[0],words[6]]
if(row_max_genres < curr_row):
row_max_genres = curr_row
column_list_genres.append(curr_col)
data_list_genres.append(1)
rows_list_genres.append(curr_row)
t3=datetime.now()
songsgenresmatrix = sp.coo_matrix((data_list_genres, (rows_list_genres, column_list_genres)), shape=(row_max_genres+1, col_max_genres+1))
print 'reading time'
print str(t3-t1)
return songsgenresmatrix
'''
Reads the songs to genres matrix and creates the combinedgenresmatrix which is read by the function generateCombinedMatrix.
'''
def getCombinedgenres():
try:
genres_dict = get_genres_matrix()
songs_list = {}
unique_genres = {}
fileopen = codecs.open('remapped_songs_file_big.txt',"r","utf-8")
lines = fileopen.readlines()
lines = filter(lambda x: x.replace('\n','') != '',lines)
lines = map(lambda x:x.replace('\n',''),lines)
remapped_lines = []
print 'remapping line'
t1=datetime.now()
for line in lines:
words = line.split(':;')
line = line.replace(':;'+words[8],':;'+str(genres_dict[words[6].lower()]))
if(words[1] not in songs_list):
songs_list[words[1]] = [words[6].lower()]
else:
songs_list[words[1]].append(words[6].lower())
remapped_lines.append(line)
fileopen.close()
t2=datetime.now()
print (t2-t1)
filewrite = codecs.open('remapped_artist_sample.txt',"w","utf-8")
for line in remapped_lines:
filewrite.write(line)
filewrite.write('\n')
print 'Counting starts'
combined_songs_list = {}
for song in songs_list:
current_genres = songs_list[song]
current_genres = sorted(current_genres)
combinedgenrestring = '@'.join(current_genres)
combinedgenrestring = combinedgenrestring.lower()
if(combinedgenrestring not in unique_genres):
unique_genres[combinedgenrestring] = [song]
else:
unique_genres[combinedgenrestring].append(song)
with codecs.open('combinedgenresmatrix.json', 'w') as f:
json.dump(unique_genres,f)
t3=datetime.now()
print (t3-t2)
except Exception as e:
logger_matrix.exception(e)
'''
Creates the similar matrix for genres.
'''
def cosine_similarity_genres(genresongsmatrix):
try:
t3=datetime.now()
G1 = genresongsmatrix.transpose().tocsr()
row_sums_genres = ((G1.multiply(G1)).sum(axis=1))
#print G1.todens()
rows_sums_sqrt_genres = np.array(np.sqrt(row_sums_genres))[:,0]
row_indices, col_indices = G1.nonzero()
#print col_indices
#print tempG1.data[2]
G1.data = G1.data/rows_sums_sqrt_genres[row_indices]
#print tempG1.todense()
G2 = G1.transpose()
cosinesimilaritygenre = G1*G2
print cosinesimilaritygenre.shape
return cosinesimilaritygenre
except Exception as e:
logger_matrix.exception(e)
#logger_matrix.exception(e)
'''
Change the values of missing genres value into the sparse matrix in the current block
'''
def charePartialMatrix(curr_block):
try:
global cosinesimilaritygenre
curr_index = curr_block[3]
d1 = curr_block[0][0]
dd3 = curr_block[1][0]
dd4 = curr_block[2][0]
dd3_rows = dd3.shape[0]
d1_coo_matrix = d1.tocoo()
current_stratindex = curr_index*1000
for i in range(0,dd3_rows):
totalindices = dd3.getrow(i).indices
changeindices = dd4.getrow(i).indices
'''if(i == 0):
print d1.getrow(i)
print d1.getrow(1)
print totalindices
print changeindices'''
d6 = cosinesimilaritygenre[dd4.getrow(i).indices]
#d5 = cosinesimilaritygenre[d3.getrow(1).indices]
if(len(totalindices) == 1):
continue
d7 = (d6[:,totalindices].sum(axis=1) - 1)/(len(totalindices) - 1)
count = len(changeindices)
for j in range(0,count):
val = d7[j,0]
col = changeindices[j]
'''if(col == 31):
print 'change'
print val'''
#d1[i,col] = val
d1_coo_matrix.row = np.append(d1_coo_matrix.row,i)
d1_coo_matrix.col = np.append(d1_coo_matrix.col,col)
d1_coo_matrix.data = np.append(d1_coo_matrix.data,val)
d1_csr_matrix = d1_coo_matrix.tocsr()
#print d1_csr_matrix.getrow(0)
#print d1_coo_matrix.data
#print 'done'
return [curr_index,d1_csr_matrix]
except Exception as e:
logger_matrix.exception(e)
'''
Add the missing values for the current row
'''
def changematrix(songscombinedgenresmatrix,curr_combined_row,cr):
try:
global cosinesimilaritygenre
#global songscombinedgenresmatrix
count = songscombinedgenresmatrix.shape[0]
repeat_mat = [curr_combined_row[0],]*count
repeat_mat_sp = sp.coo_matrix(repeat_mat)
orig = songscombinedgenresmatrix.tocsr()
d1_coo_matrix = songscombinedgenresmatrix.tocoo()
d1 = songscombinedgenresmatrix.tocsr()
d2 = repeat_mat_sp.tocsr()
d3 = d1 + d2
d4 = d2 - d1
d3.data = d3.data/d3.data
dt4 = d4.tocoo()
#print dt4.col
count = len(dt4.row)
print count
for i in range(0,count):
if(dt4.data[i] < 0):
dt4.data[i] = 0
dt4 = dt4.tocsr()
dt4.eliminate_zeros()
d3_rows = d3.shape[0]
d3_cols = d3.shape[1]
t3=datetime.now()
split_indices = range(0,d3_rows,1000)
blocks_matrix = split_sparse(d1,split_indices,[])
blocks_total = split_sparse(d3,split_indices,[])
blocks_diff = split_sparse(dt4,split_indices,[])
args_list = zip(blocks_matrix,blocks_total,blocks_diff,range(0,len(blocks_total)))
#print blocks_total[1][0]
print 'start here'
#ret = charePartialMatrix((blocks_matrix[1],blocks_total[1],blocks_diff[1],0))
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
ret = executor.map(charePartialMatrix,args_list)
'''g =Pool(processes=int(5))
ret = g.map(charePartialMatrix,zip(blocks_matrix,blocks_total,blocks_diff,range(0,len(blocks_total))))
g.close()
g.join()'''
print 'done'
sorted_list = sorted(ret, key = lambda x: int(x[0]))
temp_test = sorted_list[0][1]
for i in sorted_list[1:]:
temp_test = sp.vstack([temp_test,i[1]])
#temp_test = ret[1]
temp_test.eliminate_zeros()
#print orig.getrow(0)
t4=datetime.now()
print str(t4-t3)
print d1.shape
d1.eliminate_zeros()
print curr_combined_row.transpose().shape
#d8 = d1*curr_combined_row.transpose()
#cosine similarity
row_sums = ((temp_test.multiply(temp_test)).sum(axis=1))
#calculating the sqrt of the sums
rows_sums_sqrt = np.array(np.sqrt(row_sums))[:,0]
#divide and get the norms
row_indices, col_indices = temp_test.nonzero()
#rows_sums_sqrt
temp_test.data = temp_test.data/rows_sums_sqrt[row_indices]
tempA2 = temp_test.transpose().tocsc()
print tempA2.shape
curr_row = temp_test.getrow(cr)
d8 = curr_row * tempA2
d8.data *= d8.data>0.8
d8.eliminate_zeros()
#oldcosine similarity added fr testing
'''
row_sums = ((orig.multiply(orig)).sum(axis=1))
#calculating the sqrt of the sums
rows_sums_sqrt = np.array(np.sqrt(row_sums))[:,0]
#divide and get the norms
row_indices, col_indices = orig.nonzero()
#rows_sums_sqrt
orig.data = orig.data/rows_sums_sqrt[row_indices]
tempA3 = orig.transpose().tocsc()
curr_row = orig.getrow(cr)
d9 = curr_row * tempA3
d9.data *= d9.data>0.85
d9.eliminate_zeros()
'''
return d8
except Exception as e:
logger_matrix.exception(e)
def similarsongsoriginal(tempA4):
try:
global tempA2
tempA1 = tempA4.tocsr()
row_sums = ((tempA1.multiply(tempA1)).sum(axis=1))
#calculating the sqrt of the sums
rows_sums_sqrt = np.array(np.sqrt(row_sums))[:,0]
#divide and get the norms
row_indices, col_indices = tempA1.nonzero()
tempA1.data = tempA1.data/rows_sums_sqrt[row_indices]
row_max = tempA1.shape[0]
tempA2 = tempA1.transpose().tocsc()
#change this to correct sparse matrix manipulations
#break the matrix into peices
#if(tempA1.shape[0]<100):
# block_indices = range(1,tempA1.shape[0])
#else:
block_indices = range(100,tempA1.shape[0],300)
#print block_indices
#logger_matrix.exception(' '.join(str(block_indices)))
#function returns the blocks of the main matrix
split_mat = split_sparse(tempA1,block_indices,[])
index = 0
block_indices = [0]+ block_indices + [row_max]
#foreach block returned calculate the cosine similarity
print row_max
#similarsongs((split_mat[5],block_indices,5))
#similarsongs((split_mat[6],block_indices,6))
p =Pool(processes=int(25))
p.map(similarsongs,zip(split_mat,repeat(block_indices),range(0,len(block_indices))),100)
p.close()
p.join()
except Exception as e:
logger_matrix.exception(e)
def similarsongs((split,block_indices,index)):
try:
#multiplying in blocks of matrix
t1=datetime.now()
cosinesimilaritysong = split[0]*tempA2
row_indices = np.split(cosinesimilaritysong.indices, cosinesimilaritysong.indptr[1:-1])
#logger_matrix.exception('writing the artists files')
indices = zip(range(block_indices[index],block_indices[index+1]),range(block_indices[index+1]-block_indices[index]))
#print cosinesimilarityartist
t2=datetime.now()
print 'multiplication time ' + str(t2 - t1)
#print indices
#print split[0].todense()
#songname,youtubeId,artistId,artistName,popularity,year,genre
curr_xmls = {}
for (song_index,sim_mat_index) in indices:
#print song_index,sim_mat_index
simi_genre = cosinesimilaritysong.getrow(sim_mat_index)
simi_genre.data *= simi_genre.data>=0.89
cr = song_index
writeClusteredGenresxmls(simi_genre,cr,combinedgenresdictrev)
t3 = datetime.now()
print 'writing time '+ str(t3-t2)
except Exception as e:
logger_matrix.exception(e)
def generateCombinedMatrix(songsgenresmatrix,changeMatrix):
try:
global cosinesimilaritygenre
global songscombinedgenresmatrix
global combinedgenresdictrev
column_list_combinedgenres = []
rows_list_combinedgenres = []
data_list_combinedgenres = []
t1=datetime.now()
with codecs.open('combinedgenresmatrix.json','r') as f:
combinedgenresdict = json.load(f)
print len(combinedgenresdict)
genres_dict = get_genres_matrix();
count = 0
#adding the ids for the groups so that we can write it later
for cgd in combinedgenresdict:
curr_songs = combinedgenresdict[cgd]
combinedgenresdict[cgd] = [curr_songs,count]
combinedgenresdictrev[count] = cgd
count = count + 1
curr_list = [v[1] for v in combinedgenresdict.values()]
row_max =0
col_max = 0
for cgd in combinedgenresdict:
curr_genres = set(cgd.split('@'))
curr_row = int(combinedgenresdict[cgd][1])
if(row_max < curr_row):
row_max = curr_row
for curr_gen in curr_genres:
curr_col = int(genres_dict[curr_gen])
if(col_max < curr_col):
col_max = curr_col
if(curr_row==0):
print curr_col
column_list_combinedgenres.append(curr_col)
rows_list_combinedgenres.append(curr_row)
data_list_combinedgenres.append(1)
row_comgenres = np.array(rows_list_combinedgenres)
col_comgenres = np.array(column_list_combinedgenres)
data_comgenres = np.array(data_list_combinedgenres)
#row_max_comgenres = len(rows_list_combinedgenres)
#col_max_comgenres = len(column_list_combinedgenres)
#print col_max_comgenres
#print row_max_comgenres
songscombinedgenresmatrix = sp.coo_matrix((data_comgenres, (row_comgenres, col_comgenres)), shape=(row_max+1, col_max+1))
print songscombinedgenresmatrix.shape
t2=datetime.now()
print 'matrix created'
print str(t2-t1)
#cosinesimilaritygenre = cosine_similarity_genres(songscombinedgenresmatrix)
cosinesimilaritygenre = cosine_similarity_genres(songsgenresmatrix)
t3=datetime.now()
print 'genres matrix created'
print str(t3-t2)
if(changeMatrix == 1):
cr = range(0,100)
#changeandwritexmls(5)
gh =Pool(processes=int(100))
ret = gh.map(changeandwritexmls,cr)
gh.close()
gh.join()
else:
similarsongsoriginal(songscombinedgenresmatrix)
'''curr_comnined_row = songscombinedgenresmatrix.tocsr().getrow(cr).toarray()
#zip(repeat(combinedgenresdictrev),range(0,len(1000)))
simi_genre = changematrix(songscombinedgenresmatrix,curr_comnined_row,cr)
writeClusteredGenresxmls(simi_genre,cr,combinedgenresdictrev) '''
'''for (ind,data) in zip(simi_genre.indices,simi_genre.data):
print ind
print data
#print data1
print combinedgenresdictrev[ind]
print len(simi_genre.data)
for (ind,data) in zip(simi_genre_old.indices,simi_genre_old.data):
print ind
print data
print combinedgenresdictrev[ind]
print len(simi_genre_old.data)'''
#print (curr_list)
except Exception as e:
logger_matrix.exception(e)
def changeandwritexmls(cr):
global songscombinedgenresmatrix
global combinedgenresdictrev
try:
curr_comnined_row = songscombinedgenresmatrix.tocsr().getrow(cr).toarray()
#zip(repeat(combinedgenresdictrev),range(0,len(1000)))
simi_genre = changematrix(songscombinedgenresmatrix,curr_comnined_row,cr)
writeClusteredGenresxmls(simi_genre,cr,combinedgenresdictrev)
except Exception as e:
logger_matrix.exception(e)
'''
Writes the xmls for the current combined genre
'''
def writeClusteredGenresxmls(curr_row,cr,combinedgenresdictrev):
try:
curr_genreName = combinedgenresdictrev[cr]
curr_genre_id = cr
#curr_artist_popularity = int(artists_map[i][1])
#curr_artist_year = int(artists_map[i][2])
fname = 'simcombinedgenredirnew/' + str(curr_genre_id)+'.xml'
fx = codecs.open(fname,"w","utf-8")
fx.write('<?xml version="1.0" ?>\n')
curr_genre = sa.artist()
curr_genre.set_artistName(curr_genreName)
curr_genre.set_artistId(curr_genre_id)
#change this to correct sparse matrix manipulations
tuples = zip(curr_row.indices,curr_row.data)
sorted_g = sorted(tuples, key=lambda score: score[1], reverse=True)
sorted_g = sorted_g[0:100]
for pair in sorted_g:
j = pair[0]
curr_similar_genre = combinedgenresdictrev[j]
curr_similar_artist_id = j
similar_genre = sa.similarArtists()
similar_genre.set_artistName(curr_similar_genre)
similar_genre.set_artistId(curr_similar_artist_id)
similar_genre.set_cosineDistance(pair[1])
curr_genre.add_similarArtists(similar_genre)
curr_genre.export(fx,0)
fx.close()
except Exception as e:
logger_matrix.exception(e)
def createDirectory(directoryName):
if(not os.path.exists(directoryName)):
os.mkdir(directoryName)
if __name__ == '__main__':
logger_matrix = loggingmodule.initialize_logger('clusteredgenres.log')
t1=datetime.now()
column_list_genres = []
rows_list_genres = []
data_list_genres = []
combinedgenresdictrev = {}
col_max_genres = 432
row_max_genres =0
createDirectory('simcombinedgenredirnew')
cosinesimilaritygenre = []
songscombinedgenresmatrix = []
#remapped_artist_file_newtest
#songsgenresmatrix = getmatrixdata_genres('remapped_artist_sample.txt',row_max_genres,col_max_genres)
#print songsgenresmatrix.shape
songsgenresmatrix = getmatrixdata_genres('remapped_artist_file_newtest.txt',row_max_genres,col_max_genres)
changeMatrix = int(raw_input('Do you want to change the matrix?'))
generateCombinedMatrix(songsgenresmatrix,changeMatrix)
t2=datetime.now()
print 'completed'
print str(t2-t1)
|
24,832 | 9a97419c07024effd4b91bae57bc3b27232d8035 | from collections import deque
from blessings import Terminal
from functools import partial
import time, random, socket
import keyboard
import threading
s = socket.socket()
host = socket.gethostname()
port = 3000
try:
s.connect((host, port))
except:
pass
def debug(message):
global s
s.send(message.encode('utf-8'))
print = partial(print, end="", flush=True)
t = Terminal()
x_shift = 10
y_shift = 10
score = 0
bait_pos = None
snake_cords = deque()
snake_cords.extend([
(x_shift+7, t.height - y_shift + 4),
(x_shift+8, t.height - y_shift + 4),
(x_shift+9, t.height - y_shift + 4),
(x_shift+10, t.height - y_shift + 4)
])
l = snake_cords.pop()
snake_set = set(snake_cords)
snake_cords.append(l)
heading = 'E' #E W N S
def draw_boundary():
with t.location(x_shift, t.height - y_shift):
print('╔' + '═'*16 + '╗')
with t.location(x_shift, t.height):
print('╚' + '═'*16 + '╝')
for i in range(9,1,-1):
with t.location(x_shift, t.height-i):
print('║')
with t.location(x_shift+17, t.height-i):
print('║')
def print_score():
s = f'▒ score: {score} ▒'
with t.location(x_shift + (9 - len(s)//2), t.height-y_shift-2):
print(t.black_on_yellow(s))
def get_next_cords(x,y):
if(heading == 'E'):
return (x+1, y)
elif(heading == 'W'):
return (x-1, y)
elif(heading == 'N'):
return (x, y-1)
else:
return (x, y+1)
def move_and_draw_snake():
global snake_set, bait_pos, score
if(snake_cords[-1] == bait_pos):
snake_cords.append(bait_pos)
bait_pos = None
score += 100
for x,y in snake_cords:
with t.location(x,y):
print('●')
next_cor = get_next_cords(*snake_cords[-1])
snake_cords.append(next_cor)
snake_cords.popleft()
l = snake_cords.pop()
snake_set = set(snake_cords)
snake_cords.append(l)
def clear_screen():
print(t.clear_eos, end="", flush=True)
def check_for_collision(cord, snake_himself = True):
#debug(str(cord) + "||" + str(snake_set.difference({cord,})))
x,y = cord
if(snake_himself and (cord in snake_set)):
return True
elif(not snake_himself and (cord in snake_set)):
return True
if(not ((x>x_shift and x<x_shift+17) and (y>t.height-y_shift and y<t.height-1))):
return True
return False
def randim_bait_pos():
x_rand = random.randint(x_shift+1, x_shift+15)
y_rand = random.randint(t.height-y_shift + 1 , t.height - 1)
while(check_for_collision((x_rand, y_rand), False)):
x_rand = random.randint(x_shift+1, x_shift+16)
y_rand = random.randint(t.height-y_shift + 1 , t.height - 1)
return (x_rand, y_rand)
def main_loop():
global bait_pos
while(1):
if(check_for_collision(snake_cords[-1])):
print(t.blink)
for x,y in snake_cords:
with t.location(x,y):
print('●')
print(t.normal)
break
clear_screen()
draw_boundary()
print_score()
move_and_draw_snake()
if(not bait_pos):
bait_pos = randim_bait_pos()
with t.location(*bait_pos):
print('◌')
with t.location():
print()
time.sleep(0.3)
def handle_user_control():
global heading
while(1):
if(keyboard.is_pressed('up') and (heading != 'N' and heading != 'S')):
heading = 'N'
elif(keyboard.is_pressed('down') and (heading != 'N' and heading != 'S')):
heading = 'S'
elif(keyboard.is_pressed('left') and (heading != 'E' and heading != 'W')):
heading = 'W'
elif(keyboard.is_pressed('right') and (heading != 'E' and heading != 'W')):
heading = 'E'
time.sleep(0.005)
threading.Thread(target=main_loop).start()
threading.Thread(target=handle_user_control).start() |
24,833 | 5e177bcb2faa858e3a2a97040a4b46f6ca0da401 | #3) Faça um programa que coloque dois nomes em ordem alfabética.
nome1 = input ("Digite um nome : ")
nome2 = input ("Digite outro nome : ")
if nome1 > nome2:
print("O nome que vem primeiro é o nome do(a) : ", nome2)
print ("O nome que vem depois é o nome do(a) : ", nome1)
else:
print("O nome que vem primeiro é o nome do(a) : ", nome1)
print("O nome que vem depois é o nome do(a) : ", nome2)
|
24,834 | 10683765f79bf44abbd6cb832306a8f0373caa2a | #!/usr/bin/env python
import sys, json
import pickle
with open('sigma.pickle', 'rb') as handle:
sigma_dict = pickle.load(handle)
sigma_low = sigma_dict[min(sigma_dict, key=sigma_dict.get)]
sigma_highest = sigma_dict[max(sigma_dict, key=sigma_dict.get)]
gamma = 9
# determined by a separate experiment
thresh = 80.0 # intially, 53.5
sigma_highest /= 9
# emprically chosen
scale_factor = 0.6
sigma_fun = lambda sigma_val: ((((1 - scale_factor) / (thresh - sigma_low)) * (sigma_val - sigma_low) + scale_factor))
import time
current_milli_time = lambda: str(round(time.time() * 1000))
# Load the data that PHP sent us
imgs_selected = json.loads(sys.argv[1])
versions = imgs_selected["0"][0]
visitor_no = imgs_selected["0"][1]
landmarks = 1
import numpy as np
import os
import cv2
from PIL import Image
def noisy(image_mask, img_name):
mean = 0
sigma_val = sigma_dict[img_name + '.png']
if (sigma_val < thresh):
sigma = sigma_fun(sigma_val) * sigma_highest
else:
sigma = sigma_highest
gauss = np.random.normal(mean, sigma, image_mask.shape)
noise = gauss * image_mask/np.max(image_mask)
return noise
def landmark_decoder(num):
if(num==1):
return "all"
# if(num==1):
# return "chin"
# elif(num==2):
# return "eyebrows"
# elif(num==3):
# return "eyes"
# elif(num==4):
# return "jaw"
# elif(num==5):
# return "mouth"
# elif(num==6):
# return "nose"
def rand_dir_name(img_name):
return img_name + '_' + current_milli_time() + '_' + str(visitor_no)
if __name__ == "__main__":
random_img_locations = {}
for k, val in imgs_selected.iteritems():
k = int(k)
# Need to skip the first iteration
if(k==0):
continue
random_img_locations[k] = {}
with open("test.txt", "w") as fp:
fp.write(val)
basepath1 = 'images_ds/' + val + '.png'
new_rand_dir_name = rand_dir_name(val)
os.mkdir('images_rand/' + new_rand_dir_name)
basepath2 = 'images_rand/' + new_rand_dir_name + '/' + val + '-'
basepath3 = 'images_mask_ds/' + val + '_'
img = np.asarray(cv2.imread(basepath1, cv2.IMREAD_GRAYSCALE), dtype=np.float)
img = np.asarray(Image.open(basepath1).convert('L'), dtype=np.float)
for i in range(1,versions+1):
random_img_locations[k][i] = {}
for j in range(1,landmarks+1):
img_mask = np.asarray(Image.open(basepath3 + landmark_decoder(j) + '.png').convert('L'), dtype=np.float)
# img_mask = np.asarray(cv2.imread(basepath3 + landmark_decoder(j) + '.png', cv2.IMREAD_GRAYSCALE), dtype=np.float)
noise = noisy(img_mask, val)
nois_img_a = img + noise
path_a = basepath2 + str(j) + 'a' + str(i)
cv2.imwrite(path_a + '.png', nois_img_a)
np.save(path_a + '_noise', noise)
nois_img_b = img - noise
path_b = basepath2 + str(j) + 'b' + str(i)
cv2.imwrite(path_b + '.png', nois_img_b)
random_img_locations[k][i][j] = [path_a + '.png', path_b + '.png']
# with open("test.txt", "w") as fp:
# fp.write("hello gau!")
print json.dumps(random_img_locations)
|
24,835 | 8adceddcd2bbab75356f4cd6201510484d790e06 | import sys
from PyQt5.QtWidgets import QApplication,QDialog,QMessageBox, QTableWidgetItem
from PyQt5 import uic
from form_clientes_nuevo import Ui_Form_clientes_nuevo
from N_cliente import N_datos_personales_cliente, N_party_address, N_party_otros, N_datos_laborales, N_party_garante,N_party_cliente, N_party_contacto
from N_creditos import N_creditos
from PyQt5.QtCore import pyqtRemoveInputHook
class Cliente_nuevo(QDialog):
obj_form_cliente= Ui_Form_clientes_nuevo()
id_usu=1
id_party =""
list_garante = list()
nro_cliente = ""
def __init__(self):
QDialog.__init__(self)
self.obj_form_cliente = Ui_Form_clientes_nuevo()
self.obj_form_cliente.setupUi(self)
self.obj_form_cliente.boton_guardar_nuevo.clicked.connect(self.guardar)
self.obj_form_cliente.boton_agregar_nuevo.clicked.connect(self.agregar)
self.obj_form_cliente.boton_buscar_garante_nuevo.clicked.connect(self.buscar_garante)
self.obj_form_cliente.boton_limpiar_nuevo.clicked.connect(self.limpiar_nuevo)
def limpiar_nuevo(self):
self.obj_form_cliente.ckbx_facturas.setChecked(False)
self.obj_form_cliente.ckbx_veraz.setChecked(False)
self.obj_form_cliente.ckbx_jub_pens.setChecked(False)
self.obj_form_cliente.ckbx_recibo_sueldo_nuevo.setChecked(False)
while (self.obj_form_cliente.tw_garantes_lista.rowCount() > 0):
self.obj_form_cliente.tw_garantes_lista.removeRow(0)
for item in self.list_garante:
self.list_garante.remove(item)
def buscar_garante(self):
number= self.obj_form_cliente.lne_garante_nro_doc_nuevo.text()
obj_N_datos_garante= N_datos_personales_cliente()
if number != "":
try:
numero_documento_garante= int(number)
except:
msgBox = QMessageBox()
msgBox.setWindowTitle("Atencion")
msgBox.setText('Ingresar nuevamente el numero de documento sin espacios y sin puntos')
msgBox.exec_()
return False
obj_datos_garante = obj_N_datos_garante.get_garante_habilitado_buscar(str(numero_documento_garante))
if obj_datos_garante != None:
self.obj_form_cliente.lne_garante_apellido.setText(obj_datos_garante.apellido)
self.obj_form_cliente.lne_garante_nombre.setText(obj_datos_garante.nombre)
self.obj_form_cliente.lne_garante_estado.setText(obj_datos_garante.estado)
obj_N_party_cliente = N_party_cliente(obj_datos_garante.id_party)
nro_cliente_garante = obj_N_party_cliente.get_nro_cliente(obj_datos_garante.id_party)
self.obj_form_cliente.lne_garante_nro_cliente_nuevo.setText(str(nro_cliente_garante))
else:
msgBox = QMessageBox()
msgBox.setText('Numero de documento inexistente')
msgBox.exec_()
else :
if self.obj_form_cliente.lne_garante_nro_cliente_nuevo.text() != "":
try:
garante_nro_cliente_nuevo = int(self.obj_form.lne_garante_nro_cliente_nuevo.text())
obj_datos_garante = obj_N_datos_garante.get_garante_habilitado_buscar_nrocliente(garante_nro_cliente_nuevo)
self.obj_form_cliente.lne_garante_apellido.setText(obj_datos_garante.apellido)
self.obj_form_cliente.lne_garante_nombre.setText(obj_datos_garante.nombre)
self.obj_form_cliente.lne_garante_estado.setText(obj_datos_garante.estado)
self.obj_form_cliente.lne_garante_nro_doc_nuevo.setText(obj_datos_garante.num_doc)
except:
msgBox = QMessageBox()
msgBox.setText('Verificar Numero de cliente garante sin espacios y sin puntos ')
msgBox.exec_()
return False
def guardar(self):
apellido = self.obj_form_cliente.lne_apellido.text()
nombre = self.obj_form_cliente.lne_nombre_nuevo.text()
nro_dni = self.obj_form_cliente.lne_nro_doc.text()
if (apellido != "") and (nombre != "") and (nro_dni != ""):
numero_documento_cliente= self.obj_form_cliente.lne_nro_doc.text()
try:
numero_documento= int(numero_documento_cliente)
except:
msgBox = QMessageBox()
msgBox.setWindowTitle("Atencion")
msgBox.setText('Ingresar nuevamente el numero de documento sin espacios y sin puntos')
msgBox.exec_()
return False
obj_N_datos_personales_cliente = N_datos_personales_cliente()
obj_N_datos_personales_cliente.nombre = nombre.upper()
obj_N_datos_personales_cliente.apellido = self.obj_form_cliente.lne_apellido.text().upper()
obj_N_datos_personales_cliente.fec_nac = self.obj_form_cliente.dte_nacimiento.text()
obj_N_datos_personales_cliente.tipo_doc = self.obj_form_cliente.cbx_tipo_doc.currentText()
obj_N_datos_personales_cliente.nro_doc = numero_documento
obj_N_datos_personales_cliente.estado_civil = self.obj_form_cliente.cbx_estado_civil.currentText()
if self.obj_form_cliente.lne_limite_credito.text() !="":
try:
limite_credito= float(self.obj_form_cliente.lne_limite_credito.text())
obj_N_datos_personales_cliente.limite_credito = limite_credito
except:
obj_N_datos_personales_cliente.limite_credito = 0
obj_N_datos_personales_cliente.estado = self.obj_form_cliente.cbx_estado.currentText()
#boton guardar
obj_N_datos_personales_cliente.id = self.id_usu
self.id_party=N_datos_personales_cliente().guardar(obj_N_datos_personales_cliente)
if self.id_party != "False" :
obj_party_cliente = N_party_cliente(self.id_party)
obj_party_cliente.guardar_N_party_cliente(self.obj_form_cliente.txte_observaciones.toPlainText(), self.id_party)
self.nro_cliente = obj_party_cliente.get_nro_cliente(self.id_party)
self.obj_form_cliente.lne_nro_cliente.setText(str(self.nro_cliente))
obj_party_contacto3= N_party_contacto(1)
obj_party_contacto3.type_contacto= "Telefono"
obj_party_contacto3.value =self.obj_form_cliente.lne_telefono.text()
obj_party_contacto_email=N_party_contacto(1)
obj_party_contacto_email.type_contacto="Email"
obj_party_contacto_email.value= self.obj_form_cliente.lne_email.text()
obj_N_party_contacto=N_party_contacto(1)
obj_N_party_contacto.guardar(obj_party_contacto3, self.id_party)
obj_N_party_contacto.guardar(obj_party_contacto_email, self.id_party)
ciudad = self.obj_form_cliente.lne_barrio.text()
obj_N_party_address = N_party_address(ciudad)
obj_N_party_address.domicilio = self.obj_form_cliente.lne_domicilio.text()
obj_N_party_address.barrio = self.obj_form_cliente.lne_barrio.text()
obj_N_party_address.ciudad = self.obj_form_cliente.cbx_ciudad.currentText()
#boton guardar
obj_party_address= N_party_address(ciudad)
obj_party_address.guardar(obj_N_party_address, self.id_party)
organismo = self.obj_form_cliente.lne_organismo.text()
obj_N_datos_laborales = N_datos_laborales()
if self.obj_form_cliente.lne_sueldo.text() != "":
try:
sueldo = float(self.obj_form_cliente.lne_sueldo.text())
obj_N_datos_laborales.sueldo = sueldo
except :
obj_N_datos_laborales.sueldo = 0
obj_N_datos_laborales.anti_laboral = self.obj_form_cliente.lne_antiguedad.text()
obj_N_datos_laborales.tel_laboral = self.obj_form_cliente.lne_telefono_laboral.text()
obj_N_datos_laborales.dom_laboral = self.obj_form_cliente.lne_domicilio_laboral.text()
obj_N_datos_laborales.organismo = self.obj_form_cliente.lne_organismo.text()
obj_N_datos_laborales.ocupacion = self.obj_form_cliente.lne_ocupacion.text()
obj_N_datos_laborales.categoria = self.obj_form_cliente.lne_categoria.text()
if self.obj_form_cliente.ckbx_recibo_sueldo_nuevo.isChecked():
obj_N_datos_laborales.posee_recibo_sueldo = True
else:
obj_N_datos_laborales.posee_recibo_sueldo = False
#boton guardar
obj_datos_laborales= N_datos_laborales()
obj_datos_laborales.guardar(obj_N_datos_laborales,self.id_party)
#//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////7
cuit = self.obj_form_cliente.lne_cuit.text()
obj_N_party_otros = N_party_otros(cuit)
obj_N_party_otros.tipo_iva = self.obj_form_cliente.cbx_tipo_iva.currentText()
obj_N_party_otros.cuit = self.obj_form_cliente.lne_cuit.text()
obj_N_party_otros.cbu = self.obj_form_cliente.lne_cbu.text()
obj_N_party_otros.num_beneficio = self.obj_form_cliente.lne_nro_beneficio.text()
if self.obj_form_cliente.ckbx_facturas.isChecked():
obj_N_party_otros.presento_factura = True
if self.obj_form_cliente.ckbx_veraz.isChecked():
obj_N_party_otros.figura_veraz = True
if self.obj_form_cliente.ckbx_jub_pens.isChecked():
obj_N_party_otros.es_jubilado_pensionado = True
#boton guardar
obj_party_otros= N_party_otros(cuit)
obj_party_otros.guardar(obj_N_party_otros,self.id_party)
#boton guardar
obj_party_garante2 = N_party_garante("A")
obj_party_garante2.guardar(self.list_garante,self.nro_cliente)
self.obj_form_cliente.lne_nro_cliente.setText(str(self.nro_cliente))
msgBox = QMessageBox()
msgBox.setWindowTitle("Aviso")
msgBox.setText("Cliente se guardo correctamente. Nro. " + str(self.nro_cliente))
msgBox.exec_()
else:
msgBox = QMessageBox()
msgBox.setWindowTitle("Error")
msgBox.setText("No se pudo grabar: Numero de documento duplicado, actualice los datos")
msgBox.exec_()
else:
msgBox = QMessageBox()
msgBox.setWindowTitle("Advertencia")
msgBox.setText("Revisar campos obligatorios: Nombre, Apellido y Dni.")
msgBox.exec_()
def agregar (self):
number= self.obj_form_cliente.lne_garante_nro_doc_nuevo.text()
id_party_party_garante=""
try:
nro_doc_garante= int(number)
except exception:
msgBox = QMessageBox()
msgBox.setText('Ingresar nuevamente el numero de documento sin espacios y sin puntos')
msgBox.exec_()
return False
obj_Cliente_garante = N_party_garante("a")
existe_garante=False
existe_garante = obj_Cliente_garante.es_garante_de_otro_cliente(nro_doc_garante)
obj_party_party_garante = N_datos_personales_cliente()
id_party_party_garante = obj_party_party_garante.get_id_party_party_garante(nro_doc_garante)
obj_N_creditos = N_creditos(1)
tiene_prestamo = obj_N_creditos.get_tiene_prestamos_activo(self.obj_form_cliente.lne_garante_nro_doc_nuevo.text())
if (existe_garante == True ) or (tiene_prestamo == True) :
msgBox = QMessageBox.question(self, 'Validar Clientes y Garantes ', 'Desea agregarlo igualmente?',QMessageBox.Yes | QMessageBox.No)
if msgBox == QMessageBox.Yes :
garante_nro_doc = self.obj_form_cliente.lne_garante_nro_doc_nuevo.text()
cliente_nro_del_garante = int(self.obj_form_cliente.lne_garante_nro_cliente_nuevo.text())
garante_apellido = self.obj_form_cliente.lne_garante_apellido.text()
garante_nombre = self.obj_form_cliente.lne_garante_nombre.text()
garante_estado = self.obj_form_cliente.lne_garante_estado.text()
tipo_garante = self.obj_form_cliente.cbx_tipo_garante.currentText()
obj_N_party_garante = N_party_garante("A")
obj_party_garante = N_party_garante("A")
obj_party_garante.comment = self.obj_form_cliente.txte_garante_observaciones.toPlainText()
obj_party_garante.tipo_garante = self.obj_form_cliente.cbx_tipo_garante.currentText()
obj_party_garante.id_party_garante = id_party_party_garante
self.list_garante.append(obj_party_garante)
#AGREGAR REGISTROS EN LA GRILLA
rowPosition = self.obj_form_cliente.tw_garantes_lista.rowCount()
self.obj_form_cliente.tw_garantes_lista.insertRow(rowPosition)
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 0, QTableWidgetItem(garante_estado))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 1, QTableWidgetItem(tipo_garante))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 2, QTableWidgetItem(str(cliente_nro_del_garante)))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 3, QTableWidgetItem(garante_apellido))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 4, QTableWidgetItem(garante_nombre))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 5, QTableWidgetItem(garante_nro_doc))
else:
garante_nro_doc = self.obj_form_cliente.lne_garante_nro_doc_nuevo.text()
cliente_nro_del_garante = int(self.obj_form_cliente.lne_garante_nro_cliente_nuevo.text())
garante_apellido = self.obj_form_cliente.lne_garante_apellido.text()
garante_nombre = self.obj_form_cliente.lne_garante_nombre.text()
garante_estado = self.obj_form_cliente.lne_garante_estado.text()
tipo_garante = self.obj_form_cliente.cbx_tipo_garante.currentText()
obj_N_party_garante = N_party_garante("A")
obj_party_garante = N_party_garante("A")
obj_party_garante.comment = self.obj_form_cliente.txte_garante_observaciones.toPlainText()
obj_party_garante.tipo_garante = self.obj_form_cliente.cbx_tipo_garante.currentText()
obj_party_garante.id_party_garante = id_party_party_garante
self.list_garante.append(obj_party_garante)
#AGREGAR REGISTROS EN LA GRILLA
rowPosition = self.obj_form_cliente.tw_garantes_lista.rowCount()
self.obj_form_cliente.tw_garantes_lista.insertRow(rowPosition)
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 0, QTableWidgetItem(garante_estado))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 1, QTableWidgetItem(tipo_garante))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 2, QTableWidgetItem(str(cliente_nro_del_garante)))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 3, QTableWidgetItem(garante_apellido))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 4, QTableWidgetItem(garante_nombre))
self.obj_form_cliente.tw_garantes_lista.setItem(rowPosition , 5, QTableWidgetItem(garante_nro_doc))
#app = QApplication(sys.argv)
#dialogo= Cliente_nuevo()
#dialogo.show()
#app.exec_() |
24,836 | 63cb20846430ad520af9d7663a269672e8276640 | import collections
class FixMixin(object):
@property
def completion(self):
from helix.database.sql import Manager
from helix import Fix, Show
if isinstance(self, Show):
idQualifier = None
show = self.alias
else:
idQualifier = self.__class__.__name__.lower() + 'Id'
show = self.show
with Manager(willCommit=False) as mgr:
query = """SELECT status, COUNT(*) FROM {} WHERE show='{}' and type='task'""".format(Fix.TABLE, show)
if idQualifier is not None:
query += " AND {}='{}'".format(idQualifier, self.id)
query += "GROUP BY status"
rows = mgr.connection().execute(query).fetchall()
if rows:
done = 0
total = 0
for r in rows:
total += r[1] # Add count for this status to total
if r[0] == 'done':
done += r[1]
return float(done) / total
else:
# No tasks at all, 0% completion. Maybe we want no tasks to mean 100% completion?
return 0
def numTasksBy(self, qualifier, type='task', status=None):
from helix.database.sql import Manager
from helix import Fix, Show
if isinstance(self, Show):
idQualifier = None
show = self.alias
else:
idQualifier = self.__class__.__name__.lower() + 'Id'
show = self.show
with Manager(willCommit=False) as mgr:
query = """SELECT {}, COUNT(*) FROM {} WHERE show='{}' and type='{}'""".format(qualifier, Fix.TABLE, show, type)
if idQualifier is not None:
query += " AND {}='{}'".format(idQualifier, self.id)
if status is not None:
query += " AND status='{}'".format(status)
query += " GROUP BY {}".format(qualifier)
rows = mgr.connection().execute(query).fetchall()
results = collections.defaultdict(int)
if rows:
for r in rows:
if not r[0]:
results['_'] += r[1]
else:
results[r[0]] += r[1]
return results
else:
return results
def numTasks(self, type='task', status=None, department=None, user=None):
from helix.database.sql import Manager
from helix import Fix, Show
if isinstance(self, Show):
idQualifier = None
show = self.alias
else:
idQualifier = self.__class__.__name__.lower() + 'Id'
show = self.show
with Manager(willCommit=False) as mgr:
query = """SELECT COUNT(*) FROM {} WHERE show='{}' and type='{}'""".format(Fix.TABLE, show, type)
if idQualifier is not None:
query += " AND {}='{}'".format(idQualifier, self.id)
if status is not None:
query += " AND status='{}'".format(status)
if department is not None:
query += " AND for_dept='{}'".format(department)
if user is not None:
query += " AND fixer='{}'".format(user)
row = mgr.connection().execute(query).fetchone()
if row and row[0]:
return row[0]
else:
return 0 |
24,837 | 9822ba59350afd429fd4b19837fa5a1eba4c9543 | import pandas as pd
import numpy as np
from pathlib import Path
import math
from skeletonization import getScore
import os
def bin(percentages):
bins = np.linspace(0, 100, 10)
percentages = np.array(percentages)
digitized = np.digitize(percentages, bins)
counts = np.bincount(digitized)
maxBin = np.argmax(counts)
if isinstance(maxBin, np.ndarray):
maxBin = np.amax(maxBin)
binnedAverage = percentages[digitized == maxBin].mean()
else:
binnedAverage = percentages[digitized == maxBin].mean()
return binnedAverage
def qthPercentilePerSegment(percDict, q):
for segment, percentages in percDict.items():
sumPercentage = 0
average = None
qthPerc = None
binnedAverage = None
if len(percentages) > 0:
average = np.average(np.array(percentages))
# average = sumPercentage / len(percentages)
qthPerc = np.percentile(np.array(percentages), q)
binnedAverage = bin(percentages)
percDict[segment] = binnedAverage
def averagePerSegment(percDict):
for segment, percentages in percDict.items():
sumPercentage = 0
average = None
if len(percentages) > 0:
for percentage in percentages:
sumPercentage += percentage
average = sumPercentage / len(percentages)
percDict[segment] = average
def calculateErrors(predDict, gtDict, errorsDict):
for segment, percentage in predDict.items():
gt = gtDict[segment]
if percentage != None and gt != None:
error = abs((percentage - gt)/ gt) * 100
errorsDict[segment].append(error)
if __name__ == "__main__":
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Generate segmentation masks of arteries')
parser.add_argument('--csv_path', required=False,
default='B:/percentage_stenosis.csv',
metavar="/path/to/csv/",
help="Directory folder for csv file containing scores (default: B:/percentage_stenosis.csv)")
parser.add_argument('--segmented_path', required=False,
default='A:/segmented',
metavar="/path/to/segmented masks",
help="Path to folder containing images and their segmentation maps (default: A:/segmented)")
args = parser.parse_args()
csv_path = args.csv_path
pathString = args.segmented_path
data = pd.read_csv(csv_path)
data = pd.DataFrame(data)
path = Path(pathString)
errors = {
'lad_p': [],
'lad_m': [],
'lad_d': [],
'lcx2_p': [],
'lcx2_m': [],
'lcx2_d': [],
'diagonal' : [],
'lcx1' : []
}
for video in path.iterdir():
row = data.loc[data['keyframe_id'] == float(video.name)].head()
stenosisPercentages = {
'lad_p': [],
'lad_m': [],
'lad_d': [],
'lcx2_p': [],
'lcx2_m': [],
'lcx2_d': [],
'diagonal' : [],
'lcx1' : []
}
gtPercentages = {
'lad_p': None,
'lad_m': None,
'lad_d': None,
'lcx2_p': None,
'lcx2_m': None,
'lcx2_d': None,
'diagonal' : None,
'lcx1' : None
}
if len(row) > 0:
row = row.iloc[0]
else:
continue
# print(row)
valid_arteries = []
valid_segments = []
for index, value in row.items():
# print(value)
if not math.isnan(value) and index != 'keyframe_id':
artery = index.split('_')[0]
if artery not in valid_arteries:
valid_arteries.append(artery)
valid_segments.append(index)
gtPercentages[index] = value
for keyframe in video.iterdir():
for artery in valid_arteries:
filename = keyframe.name + '_' + artery
# folderDirectory = pathString
if os.path.exists(f"{pathString}/{filename.split('_')[0]}/{filename.split('.')[0].split('_')[0]}_{filename.split('.')[0].split('_')[1]}/{filename}_bin_mask.png"):
_, scores, boxes = getScore(filename, folderDirectory=pathString, show=False, save=True)
# print(f'{filename}: ' ,scores)
if scores != None and len(scores) > 0:
print('processed: ', filename)
for key, score in scores.items():
segmentName = artery + '_' + key
if artery == 'lcx1' or artery == 'diagonal':
segmentName = artery
if segmentName in valid_segments:
stenosisPercentages[segmentName].append(score)
break
break
qthPercentilePerSegment(stenosisPercentages, 80)
print(f'{video.name}: raw percentages', stenosisPercentages)
calculateErrors(stenosisPercentages, gtPercentages, errors)
print('raw errors: ', errors)
averagePerSegment(errors)
print('mean errors: ', errors)
"""
mean errors first round:
raw percentages {'lad_p': None, 'lad_m': None, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw percentages {'lad_p': 41.313015853693784, 'lad_m': None, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw percentages {'lad_p': 10.966248934731032, 'lad_m': None, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw percentages {'lad_p': None, 'lad_m': 26.066242398934016, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw percentages {'lad_p': 8.003781948507216, 'lad_m': 19.121593196430613, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw percentages {'lad_p': 38.77428614929597, 'lad_m': 43.85646968075416, 'lad_d': 12.041105703580937, 'lcx2_p': 75.9823148125468, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw percentages {'lad_p': 11.854085439915279, 'lad_m': 89.22776182014374, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
raw errors (%): {'lad_p': [46.686892761629004, 20.965261803413, 64.41693059993003, 79.77501597606043, 76.21308494070821, 48.358730182882766, 84.33393009324138, 89.32829073532372, 44.608162643862904, 86.82879395564969], 'lad_m': [6.831127032247775, 52.65183269808609, 44.22579874192373, 85.52712933460606, 83.0061080784261, 76.47130541090596, 30.331211994670078, 52.19601700892347, 45.179412899057304, 18.970349093524987], 'lad_d': [67.06007610999922, 36.769424627733585, 42.50495876832693, 75.91778859283812],
'lcx2_p': [46.05357599877789, 8.546164017924006], 'lcx2_m': [], 'lcx2_d': [], 'diagonal': [], 'lcx1': []}
mean errors (%): {'lad_p': 64.15150936927012, 'lad_m': 49.53902922923716, 'lad_d': 55.56306202472446, 'lcx2_p': 27.299870008350947, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
"""
"""
with precise ptsalongline, measure against average width
raw errors: {'lad_p': [11.831664367049102, 25.0, 13.4988099081334, 55.5623451908134, 41.057758717041345, 8.527013812769866, 42.126777584699965, 80.60141887036046, 8.806566229996012, 60.59140358765839], 'lad_m': [38.11178982424632, 53.39326144288472, 28.705381061326356, 70.83510610586582, 72.52425214311705, 77.84023758940849, 60.18189944857429, 45.681119916846804, 17.6448357821174, 31.535166026655986], 'lad_d': [39.53701510289559, 20.077322552482382, 7.179826179804963, 53.898808440836774], 'lcx2_p': [57.93049437536689, 31.189628167746204], 'lcx2_m': [], 'lcx2_d': [], 'diagonal': [4.246880957974864, 8.202558037653795, 30.174421326510263, 10.515341842787016, 3.415795275664877, 24.96485354936036, 0.08940455871107034], 'lcx1': [90.60528981178743, 14.23421038153944, 11.077553587200997, 22.345585861224627, 15.131008314394956]}
mean errors: {'lad_p': 34.76037582685219, 'lad_m': 49.64530493410433, 'lad_d': 30.173243069004926, 'lcx2_p': 44.56006127155655, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 11.65846507838032, 'lcx1': 30.67872959122949}
"""
"""
with precise ptsalongline, adaptive width measure, correct coordinate ordering and gap detection for bends
raw errors: {'lad_p': [40.93581846447678, 23.948410331735328, 20.014029619074318, 71.66852967639899, 70.96310417693672, 32.72093172742699, 62.616616534638204, 86.26664572674173, 31.351536819649773, 71.3784343001312], 'lad_m': [86.70116609464068, 54.24712243188075, 94.80730763415107, 76.17726937064, 87.00706062576576, 44.626697516416, 44.712837231688034, 35.546480122699656, 3.9679862921211493, 9.077566081825132], 'lad_d': [3.2957447187719304, 31.27050155541632, 72.89584720927479, 40.085650943899246], 'lcx2_p': [13.41681901676013, 17.570475476944836], 'lcx2_m': [], 'lcx2_d': [], 'diagonal': [46.579014644513514, 46.286680649762786, 44.35581147775295, 3.6084318000598143, 4.848241731471745, 17.74991681299719, 29.99700171603436], 'lcx1': [26.126698410187533, 44.63807955348782, 53.37218806062549, 8.847361295632835, 12.986670864350428]}
mean errors: {'lad_p': 51.186405737720996, 'lad_m': 53.68714934018281, 'lad_d': 36.88693610684057, 'lcx2_p': 15.493647246852483, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 27.63215697608462, 'lcx1': 29.19419963685682}
"""
"""
frangi filters, width * 0.15
mean errors: {'lad_p': 63.1463313011622, 'lad_m': 59.2582270226721, 'lad_d': 35.31978554676648, 'lcx2_p': 40.64316755777989, 'lcx2_m': None, 'lcx2_d': 38.09483596997129, 'diagonal': 39.72031926590497, 'lcx1': 30.756827621101557}
"""
"""
with precise ptsalongline
1388: raw percentages {'lad_p': None, 'lad_m': 45.55553041861184, 'lad_d': 20.537392472901853, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
1472: raw percentages {'lad_p': 67.84798434973884, 'lad_m': 29.153827940409982, 'lad_d': None, 'lcx2_p': 78.96524718768373, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 50.04116259608066, 'lcx1': None}
1494: raw percentages {'lad_p': 100.0, 'lad_m': 49.87339736767986, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': 95.30264490589377}
1523: raw percentages {'lad_p': 58.83194438693817, 'lad_m': 10.9725814722862, 'lad_d': 66.3299211927877, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
1578: raw percentages {'lad_p': 23.11709576353861, 'lad_m': 15.178756342182288, 'lad_d': 27.581274832662253, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 53.739320609437506, 'lcx1': 54.848544949769355}
1594: raw percentages {'lad_p': 30.041403919549083, 'lad_m': 6.192593885726845, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 57.49371876833386, 'lcx1': 74.07826543450643}
1618: raw percentages {'lad_p': 69.80942304203653, 'lad_m': None, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
1657: raw percentages {'lad_p': 25.31212254002636, 'lad_m': None, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 99.07222019052793, 'lcx1': 83.89904183828519}
1673: raw percentages {'lad_p': None, 'lad_m': 24.825649544600754, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 10.891639190495862, 'lcx1': None}
1691: raw percentages {'lad_p': 7.493321795769775, 'lad_m': 14.885598650748502, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': None, 'lcx1': None}
1738: raw percentages {'lad_p': 63.450020715430234, 'lad_m': 58.327136240586576, 'lad_d': 15.049346863486775, 'lcx2_p': 90.14947210268393, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 99.97188283948829, 'lcx1': 79.44611056656655}
1778: raw percentages {'lad_p': 18.00958658509292, 'lad_m': 98.25359351113522, 'lad_d': None, 'lcx2_p': None, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 75.33946256245709, 'lcx1': None}
raw errors (%): {'lad_p': [71.10363029557674, 57.08370868635845, 12.738221197454342, 63.83982494281949, 90.00890427230696, 9.357113263671094, 79.98934823878565], 'lad_m': [79.76165821042362, 91.15343730610451, 24.12824772300377, 62.78600337312874, 27.09107969926678, 31.004791348180294], 'lad_d': [44.837450334675495, 69.90130627302645], 'lcx2_p': [28.78496014669133], 'lcx2_m': [], 'lcx2_d': [], 'diagonal': [23.229541986517848, 36.11809025740682, 10.080244656142144, 45.54180404752069, 24.96485354936036, 5.825671796928642], 'lcx1': [21.644935786043778, 17.69081618388174, 19.855774054693125, 13.494443666523637]}
mean errors (%): {'lad_p': 54.874392985281816, 'lad_m': 52.65420294335129, 'lad_d': 57.36937830385097, 'lcx2_p': 28.78496014669133, 'lcx2_m': None, 'lcx2_d': None, 'diagonal': 24.293367715646085, 'lcx1': 18.17149242278557}
details:
1388:
1388_42_lad: {'p': 10.31461492214607, 'm': 39.05347955041552, 'd': 7.070041516003778}
1388_43_lad: {'p': 11.114385412162731, 'm': 22.848833435839786, 'd': 47.259774124454566}
1388_44_lad: {'p': 10.864882225303385, 'm': 22.66940852471223, 'd': 27.18461463699098}
1388_45_lad: {'p': 12.144283688016932, 'm': 41.99734767939408, 'd': 32.62050499784325}
1388_46_lad: {'p': 8.94778266147136, 'm': 28.31267366717646, 'd': 7.483372807659916}
1388_47_lad: {'p': 12.990039184386305, 'm': 73.88986270540438, 'd': 19.47120705409099}
1388_48_lad: {'p': 18.367331260851095, 'm': 27.311326986088027, 'd': 12.765693606144612}
1388_49_lad: {'p': 10.616028849471348, 'm': 33.04894729482325, 'd': 13.775387527886906}
1388_50_lad: {'p': 8.352104809081728, 'm': 28.26756584999649, 'd': 14.159105360986246}
1388_51_lad: {'p': 8.012074097653842, 'm': 79.00132282957772, 'd': 12.232427737202634}
1388_52_lad: {'p': 9.327743902438968, 'm': 88.73814425276211, 'd': 7.930183133912882}
1388_53_lad: {'p': 7.12027714773793, 'm': 98.53260340048546, 'd': 32.575955192957714}
1388_54_lad: {'m': 18.37663910120062, 'd': 11.259582501788357}
1388_55_lad: {'p': 9.28887529011413, 'm': 18.672992457042824, 'd': 36.94532894381803}
1388_56_lad: {'p': 5.6182522958453385, 'm': 27.85026908352287, 'd': 15.269912142441711}
1388_57_lad: {'p': 17.604300526775283, 'm': 16.831273651632582, 'd': 24.503441424736504}
1388_58_lad: {'p': 5.157712288360672, 'm': 30.892917556345644, 'd': 37.96761380246478}
1388_59_lad: {'p': 4.267821031912311, 'm': 12.144261023611602, 'd': 6.211406403223785}
1388_60_lad: {'p': 3.667638748569957, 'm': 72.56781124617349, 'd': 32.31447411170011}
1388_61_lad: {'p': 5.5535856100856655, 'm': 95.39400800941881, 'd': 17.042407134150118}
1388_62_lad: {'p': 22.59772279068687, 'm': 80.26445048522449, 'd': 15.24280777048107}
1472:
1472_049_lad: {'p': 44.39094877570493, 'm': 22.116977940414884, 'd': 8.839699074074158}
1472_049_diagonal: {'diagonal': 78.96524718768373}
1472_049_lcx2: {'p': 78.96524718768373, 'd': 21.226132500229745}
1472_050_lad: {'p': 22.14411777107155, 'm': 21.46622291051451}
1472_050_diagonal: {'diagonal': 86.79020801909026}
1472_051_lad: {'p': 37.85860090815749, 'm': 38.810976111010845, 'd': 10.161699542171675}
1472_051_diagonal: {'diagonal': 28.228942276096337}
1472_059_lad: {'p': 19.18781916374015, 'm': 100.0}
1472_059_diagonal: {'diagonal': 36.593765843254666}
1472_060_lad: {'p': 100.0, 'm': 7.12897050461031, 'd': 44.22664557605882}
1472_060_diagonal: {'diagonal': 11.124240306630206}
1472_061_lad: {'p': 100.0, 'm': 45.14772597449043, 'd': 4.65368143860061}
1472_061_diagonal: {'diagonal': 16.32523767550219}
1472_062_lad: {'p': 87.05037252897527, 'm': 8.225742047118278, 'd': 99.54796274919234}
1472_062_diagonal: {'diagonal': 60.41062292478442}
1472_063_lad: {'p': 100.0, 'm': 7.019697119667356, 'd': 100.0}
1472_063_diagonal: {'diagonal': 31.932199131684126}
1472_065_lad: {'p': 100.0, 'm': 12.46813885586321, 'd': 100.0}
1472_065_diagonal: {'diagonal': 100.0}
1494:
1494_054_lad: {'p': 100.0, 'm': 23.31524350330062, 'd': 55.04247693128943}
1494_054_lcx1: {'lcx1': 85.46260346907081}
1494_055_lad: {'p': 100.0, 'm': 6.798318585873686, 'd': 12.46036510140267}
1494_055_lcx1: {'lcx1': 94.49056158796502}
1494_056_lad: {'p': 100.0, 'd': 9.751025080206189}
1494_056_lcx1: {'lcx1': 100.0}
1494_057_lad: {'p': 100.0, 'm': 81.06070118984464, 'd': 42.32277809006318}
1494_057_lcx1: {'lcx1': 100.0}
1494_058_lad: {'p': 100.0, 'm': 100.0}
1494_058_lcx1: {'lcx1': 100.0}
1494_059_lad: {'p': 100.0, 'm': 76.51605304653393, 'd': 11.638927398037813}
1494_059_lcx1: {'lcx1': 88.8482339446594}
1494_060_lad: {'p': 100.0, 'm': 100.0, 'd': 5.7693182981918945}
1494_060_lcx1: {'lcx1': 74.8303398690299}
1494_061_lad: {'p': 100.0, 'm': 58.4445312722796, 'd': 5.093210506697132}
1494_061_lcx1: {'lcx1': 100.0}
1494_062_lad: {'p': 100.0, 'm': 69.78450955223062, 'd': 18.134067603494085}
1494_062_lcx1: {'lcx1': 100.0}
1494_063_lad: {'p': 100.0, 'm': 10.41807102757939, 'd': 11.82229199034478}
1494_063_lcx1: {'lcx1': 100.0}
1494_064_lad: {'p': 100.0, 'm': 5.838720404902298, 'd': 36.68745656276199}
1494_064_lcx1: {'lcx1': 100.0}
1494_065_lad: {'p': 100.0, 'm': 16.431222461933615, 'd': 23.264151777165765}
1494_065_lcx1: {'lcx1': 100.0}
1523:
1523_36_lad: {'p': 63.72286203027373, 'm': 3.7958911881607826, 'd': 65.97340642387115}
1523_37_lad: {'p': 30.21880909878204, 'm': 5.164243506763766, 'd': 76.4568854577621}
1523_38_lad: {'p': 71.31025846061665, 'm': 10.805841055725828, 'd': 81.61658719446596}
1523_39_lad: {'p': 27.176104282734737, 'm': 5.436709156674602, 'd': 72.80871501537123}
1523_40_lad: {'p': 45.23986821915314, 'm': 6.634040524184092, 'd': 13.71969519410512}
1523_41_lad: {'p': 96.12550494351385, 'm': 5.8827741109146015, 'd': 82.89850748216298}
1523_42_lad: {'p': 91.76944083988009, 'm': 16.477131040712866, 'd': 66.87893248888778}
1523_43_lad: {'p': 30.910903789178967, 'm': 4.945915765262754, 'd': 100.0}
1523_44_lad: {'p': 47.89664415964406, 'd': 82.80810409878893}
1523_45_lad: {'p': 27.132863162838206, 'm': 6.626531890416132, 'd': 64.98018008117232}
1523_46_lad: {'p': 72.99883228482966, 'm': 9.634903161027363, 'd': 70.42057086291634}
1523_47_lad: {'p': 85.94541992379214, 'm': 10.078830819979123, 'd': 60.0866429000378}
1523_48_lad: {'p': 100.0, 'm': 16.52386348679459, 'd': 52.48078823439973}
1523_49_lad: {'p': 31.128317402680405, 'm': 11.598106849337164, 'd': 59.146594457668144}
1523_50_lad: {'p': 78.18668498431649, 'm': 30.89003998453944, 'd': 72.27127220841588}
1523_51_lad: {'p': 41.54859660877667, 'm': 20.093899543799854, 'd': 38.73185698457774}
1578:
1578_030_lad: {'p': 17.34204563418865, 'm': 6.590322697676642, 'd': 19.50078738919563}
1578_030_diagonal: {'diagonal': 34.34763281747688}
1578_030_lcx1: {'lcx1': 79.04982908261744}
1578_031_lad: {'p': 14.832671518723895, 'm': 18.181818181818233, 'd': 73.45300878851776}
1578_031_diagonal: {'diagonal': 94.94114769435025}
1578_031_lcx1: {'lcx1': 38.973773911095066}
1578_032_lad: {'p': 17.526533580345227, 'm': 42.86291468072355, 'd': 23.450869790542683}
1578_032_diagonal: {'diagonal': 11.314181020889237}
1578_032_lcx1: {'lcx1': 57.20758095154865}
1578_033_lad: {'p': 22.801523635661567, 'm': 16.46360523629269, 'd': 27.16083256192039}
1578_033_diagonal: {'diagonal': 37.95456603270089}
1578_033_lcx1: {'lcx1': 64.40094382517272}
1578_034_lad: {'p': 25.295441330826517, 'm': 7.386336112471714, 'd': 26.693954860975587}
1578_034_diagonal: {'diagonal': 100.0}
1578_034_lcx1: {'lcx1': 52.8957720816595}
1578_035_lad: {'p': 19.2834466776155, 'm': 26.460468693024108, 'd': 15.117733025920854}
1578_035_diagonal: {'diagonal': 17.498426610102403}
1578_035_lcx1: {'lcx1': 77.22056367063642}
1578_036_lad: {'p': 21.110127676182078, 'm': 2.194298546714657, 'd': 9.402016236813227}
1578_036_diagonal: {'diagonal': 68.46222704168589}
1578_036_lcx1: {'lcx1': 78.02437735124911}
1578_037_lad: {'p': 16.280505347069095, 'm': 12.60382669207899, 'd': 13.555844047070643}
1578_037_diagonal: {'diagonal': 11.381411080048998}
1578_037_lcx1: {'lcx1': 66.28392609900688}
1578_038_lad: {'p': 33.777786038980295, 'm': 6.736395835749843, 'd': 13.894747828077648}
1578_038_diagonal: {'diagonal': 61.05799258174609}
1578_038_lcx1: {'lcx1': 30.68093446828284}
1578_039_lad: {'p': 25.307918680310028, 'm': 3.5344872807736016, 'd': 17.0834591387635}
1578_039_diagonal: {'diagonal': 59.68181322872468}
1578_039_lcx1: {'lcx1': 49.60313476802509}
1578_040_lad: {'p': 36.46278146794722, 'm': 8.800390010959491, 'd': 8.145001761374415}
1578_040_diagonal: {'diagonal': 17.629616604992048}
1578_040_lcx1: {'lcx1': 37.86495301371787}
1578_041_lad: {'p': 30.616021388567518, 'm': 16.21118472316737, 'd': 13.102308112099227}
1578_041_diagonal: {'diagonal': 26.140712658376053}
1578_041_lcx1: {'lcx1': 35.73646374644892}
1578_042_lad: {'p': 24.149662421244134, 'm': 16.694368515994917, 'd': 22.386674930297325}
1578_042_diagonal: {'diagonal': 80.36164382462997}
1578_042_lcx1: {'lcx1': 71.86812374600993}
1578_043_lad: {'p': 17.664482776003776, 'm': 16.90637414007292, 'd': 14.38150608052341}
1578_043_diagonal: {'diagonal': 87.63830732907178}
1578_043_lcx1: {'lcx1': 38.45547181803192}
1578_044_lad: {'p': 16.384504442718594, 'm': 11.265977712830821, 'd': 39.77884226277283}
1578_044_diagonal: {'diagonal': 82.16281973644095}
1578_044_lcx1: {'lcx1': 34.04560103509875}
1578_045_lad: {'p': 21.02499204940742, 'm': 6.315643040968178, 'd': 51.916001515688315}
1578_045_diagonal: {'diagonal': 100.0}
1578_045_lcx1: {'lcx1': 51.280018017847226}
1578_046_lad: {'p': 16.16638796723272, 'm': 26.504965304768014, 'd': 22.463700944660026}
1578_046_diagonal: {'diagonal': 58.78723661651091}
1578_046_lcx1: {'lcx1': 67.58752070886712}
1578_047_lad: {'p': 25.339641022904868, 'm': 63.24956800277259, 'd': 78.86381417484006}
1578_047_diagonal: {'diagonal': 17.594458160308456}
1578_047_lcx1: {'lcx1': 63.48289676174297}
1578_048_lad: {'p': 24.607814062900545, 'm': 28.250406826922493, 'd': 48.70758726398648}
1578_048_diagonal: {'diagonal': 22.069885780305544}
1578_048_lcx1: {'lcx1': 72.21403347764766}
1578_049_lad: {'p': 16.949300954437906, 'm': 5.30530530530533, 'd': 27.77867622866236}
1578_049_diagonal: {'diagonal': 54.51947541456401}
1578_049_lcx1: {'lcx1': 32.880004376723925}
1578_050_lad: {'p': 19.275426640705472, 'm': 9.436264198569532, 'd': 17.42283192353372}
1578_050_diagonal: {'diagonal': 67.56388185440163}
1578_050_lcx1: {'lcx1': 21.86536169478047}
1578_051_lad: {'p': 18.242256060412178, 'm': 15.970647966887409, 'd': 15.129519968612303}
1578_051_diagonal: {'diagonal': 71.94926073505621}
1578_051_lcx1: {'lcx1': 44.83610134840252}
1578_052_lad: {'p': 30.37722868297379, 'm': 16.828461446858224, 'd': 16.14327371165395}
1578_052_diagonal: {'diagonal': 20.33703153932056}
1578_052_lcx1: {'lcx1': 61.48846014403815}
1578_053_lad: {'p': 48.47581008350005, 'm': 7.515260583527816, 'd': 16.531190764869642}
1578_053_diagonal: {'diagonal': 7.374446525085654}
1578_053_lcx1: {'lcx1': 34.24482880101982}
1578_054_lad: {'p': 35.39378892531637, 'm': 14.187619021334186, 'd': 15.782621782063245}
1578_054_diagonal: {'diagonal': 19.322630260443752}
1578_054_lcx1: {'lcx1': 21.039377289377846}
1578_055_lad: {'p': 28.70762606376721, 'm': 24.468053908897758, 'd': 15.944399682585653}
1578_055_diagonal: {'diagonal': 41.017760423227166}
1578_055_lcx1: {'lcx1': 100.0}
1578_056_lad: {'p': 26.361763560861572, 'm': 17.81613795942635, 'd': 15.25310126949142}
1578_056_diagonal: {'diagonal': 100.0}
1578_056_lcx1: {'lcx1': 86.2664262063327}
1578_057_lad: {'p': 20.96705103669354, 'm': 4.253462341111702, 'd': 25.2111670514496}
1578_057_diagonal: {'diagonal': 19.176273260840905}
1578_057_lcx1: {'lcx1': 66.43463949281137}
1578_058_lad: {'p': 21.437948028295228, 'm': 4.056038791002758, 'd': 30.18536575900017}
1578_058_diagonal: {'diagonal': 100.0}
1578_058_lcx1: {'lcx1': 63.26855660293926}
1578_059_lad: {'p': 8.790706780656565, 'm': 7.723114890420868, 'd': 53.12788143076874}
1578_059_diagonal: {'diagonal': 79.10515426711179}
1578_059_lcx1: {'lcx1': 30.569382466147943}
1578_060_lad: {'p': 15.676774133247362, 'm': 5.767727958528257, 'd': 67.45079952579898}
1578_060_diagonal: {'diagonal': 96.52894579415005}
1578_060_lcx1: {'lcx1': 70.53583648556986}
1594:
1594_045_lad: {'m': 5.662932205763205, 'd': 8.42150020370146}
1594_045_diagonal: {'diagonal': 100.0}
1594_045_lcx2: {'p': 100.0, 'd': 28.871888917632482}
1594_045_lcx1: {'lcx1': 30.64161345672445}
1594_046_lad: {'p': 25.759644753605503}
1594_046_diagonal: {'diagonal': 76.56047894171981}
1594_046_lcx2: {'p': 85.49193509088329, 'd': 53.69061255819019}
1594_046_lcx1: {'lcx1': 30.607001323120087}
1594_047_lad: {'p': 19.485198252420044, 'm': 11.606626878477755}
1594_047_diagonal: {'diagonal': 100.0}
1594_047_lcx2: {'p': 13.013443186395602, 'd': 59.64258988703966}
1594_047_lcx1: {'lcx1': 68.64862635883881}
1594_048_lad: {'p': 96.88534460046563, 'm': 2.134925885529404, 'd': 4.294117755837545}
1594_048_diagonal: {'diagonal': 11.316552238750033}
1594_048_lcx2: {'d': 96.19020391865625}
1594_048_lcx1: {'lcx1': 25.2269160733821}
1594_049_lad: {'p': 9.68772288895109, 'm': 5.860839717206567, 'd': 2.8390853063682275}
1594_049_diagonal: {'diagonal': 9.086889306315072}
1594_049_lcx2: {'d': 100.0}
1594_049_lcx1: {'lcx1': 100.0}
1594_050_lad: {'p': 83.30507634304878, 'm': 4.15509735703189}
1594_050_diagonal: {'diagonal': 17.58992379673856}
1594_050_lcx2: {'p': 9.482182949383489, 'd': 100.0}
1594_050_lcx1: {'lcx1': 100.0}
1594_051_lad: {'p': 8.678049639147956}
1594_051_diagonal: {'diagonal': 8.794231201739223}
1594_051_lcx2: {'p': 28.576151034874965, 'd': 7.6286599692449775}
1594_051_lcx1: {'lcx1': 100.0}
1594_052_lad: {'p': 11.23587600863809, 'd': 4.5605986294987915}
1594_052_diagonal: {'diagonal': 100.0}
1594_052_lcx2: {'p': 35.439987923266436, 'd': 77.64440138637806}
1594_052_lcx1: {'lcx1': 66.70446605109039}
1594_053_lad: {'p': 5.292471373686103, 'm': 6.87811929480876, 'd': 12.517235778538838}
1594_053_diagonal: {'diagonal': 100.0}
1594_053_lcx2: {'p': 22.612319268469573, 'd': 60.32093560889415}
1594_053_lcx1: {'lcx1': 100.0}
1594_054_lad: {'p': 35.60869048719012, 'm': 5.14818153387655, 'd': 15.55832919651089}
1594_054_diagonal: {'diagonal': 14.386092173774045}
1594_054_lcx2: {'p': 22.09124966113708, 'd': 95.56257223017933}
1594_054_lcx1: {'lcx1': 93.0322965164151}
1594_055_lad: {'p': 4.475964848337544, 'm': 8.09402821312062, 'd': 6.801653291304066}
1594_055_diagonal: {'diagonal': 94.69673879263573}
1594_055_lcx2: {'p': 5.92026632940883, 'd': 100.0}
1594_055_lcx1: {'lcx1': 100.0}
1618:
1618_28_lad: {'p': 66.48830252634295, 'm': 18.378604817068357, 'd': 20.503509081491465}
1618_29_lad: {'p': 98.23523019206705, 'm': 93.98742540273138, 'd': 7.769206153585618}
1618_30_lad: {'p': 8.934138679697213, 'm': 100.0, 'd': 58.18447274140486}
1618_31_lad: {'p': 5.790595776198037, 'm': 100.0, 'd': 14.505863718541711}
1618_32_lad: {'p': 8.284262405698684, 'm': 27.06083200886732, 'd': 18.245447629839195}
1618_33_lad: {'p': 100.0, 'm': 5.569593938286044, 'd': 14.61839219301304}
1618_34_lad: {'p': 92.76603626802168, 'm': 4.1443362608379, 'd': 16.472324406542793}
1618_35_lad: {'p': 100.0, 'm': 5.149601050479968, 'd': 14.60476954941321}
1618_36_lad: {'p': 77.68805343500864, 'm': 7.709182742764331, 'd': 8.55745870925686}
1618_37_lad: {'p': 100.0, 'm': 12.107978302332857, 'd': 17.54480009734497}
1618_38_lad: {'p': 100.0, 'm': 13.196898352353115, 'd': 13.691037639684279}
1618_39_lad: {'p': 79.52645722140386, 'm': 25.70359796543412, 'd': 12.538083569937585}
1657:
1657_036_lad: {'p': 70.05711440126446, 'm': 9.173283767715802, 'd': 8.806612175357786}
1657_036_diagonal: {'diagonal': 100.0}
1657_036_lcx1: {'lcx1': 62.754688959685645}
1657_037_lad: {'p': 4.953363241777186, 'm': 12.957614715960798, 'd': 13.695261464286157}
1657_037_diagonal: {'diagonal': 98.03167518330334}
1657_037_lcx1: {'lcx1': 87.24881400129784}
1657_038_lad: {'m': 9.366264598889519, 'd': 19.427876784516506}
1657_038_diagonal: {'diagonal': 92.69052672197591}
1657_038_lcx1: {'lcx1': 100.0}
1657_039_lad: {'p': 8.141536990596776, 'm': 11.016006576720327, 'd': 13.615881450759272}
1657_039_diagonal: {'diagonal': 100.0}
1657_039_lcx1: {'lcx1': 100.0}
1657_040_lad: {'p': 12.489911567328916, 'm': 13.421285949508244, 'd': 11.474863610386466}
1657_040_diagonal: {'diagonal': 100.0}
1657_040_lcx1: {'lcx1': 87.57942348914887}
1657_041_lad: {'p': 8.641702793463013, 'm': 11.887536839594304, 'd': 9.640895273536376}
1657_041_diagonal: {'diagonal': 100.0}
1657_041_lcx1: {'lcx1': 71.31326206732265}
1657_042_lad: {'p': 9.342126789295847, 'm': 22.00278472871744, 'd': 23.714376347264366}
1657_042_diagonal: {'diagonal': 100.0}
1657_042_lcx1: {'lcx1': 30.094229865396827}
1657_043_lad: {'p': 12.241700095006792, 'm': 9.261144814900247, 'd': 6.636272764951068}
1657_043_diagonal: {'diagonal': 100.0}
1657_043_lcx1: {'lcx1': 100.0}
1657_044_lad: {'p': 90.2440313614968, 'm': 4.649706063409509, 'd': 5.06938024127469}
1657_044_diagonal: {'diagonal': 100.0}
1657_044_lcx1: {'lcx1': 100.0}
1657_045_lad: {'p': 11.69761562000744, 'm': 14.525103096157643, 'd': 8.53920060405432}
1657_045_diagonal: {'diagonal': 100.0}
1657_045_lcx1: {'lcx1': 100.0}
1673:
1673_48_lad: {'p': 4.747034859218557, 'm': 39.97068956722293, 'd': 4.790115418538877}
1673_48_diagonal: {'diagonal': 4.674240613493086}
1673_49_lad: {'m': 28.17521607854512, 'd': 8.88520332729068}
1673_49_diagonal: {'diagonal': 4.066692553351636}
1673_50_lad: {'p': 12.922683724950012, 'm': 24.596621091740012, 'd': 20.77604740655089}
1673_50_diagonal: {'diagonal': 6.25648385235179}
1673_51_lad: {'p': 6.1805579687939645, 'm': 28.36681851295957, 'd': 20.25461301692092}
1673_51_diagonal: {'diagonal': 18.510473954149465}
1673_52_lad: {'p': 6.4007493998070935, 'm': 26.76460207416297, 'd': 28.60270306912375}
1673_52_diagonal: {'diagonal': 7.4919921253193245}
1673_53_lad: {'p': 5.0836095831917945, 'm': 19.020660608245343, 'd': 20.51277957821793}
1673_53_diagonal: {'diagonal': 5.302614000963734}
1673_54_lad: {'p': 5.66024517407252, 'm': 37.66185288010646, 'd': 42.0364822360472}
1673_54_diagonal: {'diagonal': 10.21426444156106}
1673_55_lad: {'p': 6.205239905706339, 'm': 17.779161639356957, 'd': 71.78481139485761}
1673_55_diagonal: {'diagonal': 13.92491424255795}
1673_56_lad: {'p': 5.512715340442975, 'm': 10.795860166625049, 'd': 80.48975550308512}
1673_56_diagonal: {'diagonal': 15.023675888547972}
1673_57_lad: {'p': 4.087798176937197, 'm': 19.5984965912676, 'd': 35.847043244609836}
1673_57_diagonal: {'diagonal': 7.945453828873217}
1673_58_lad: {'p': 4.908527558985865, 'm': 20.352165780376286, 'd': 19.505888955789963}
1673_58_diagonal: {'diagonal': 26.397225594285246}
1691:
1691_29_lad: {'p': 5.807910898601287, 'm': 29.700245939562198, 'd': 74.93995193300755}
1691_30_lad: {'p': 11.475129533678817, 'm': 9.902320719644464, 'd': 21.086841009291412}
1691_31_lad: {'p': 10.235380783667193, 'm': 11.82256942535378, 'd': 5.267922565414517}
1691_32_lad: {'p': 7.6983893549184, 'm': 15.448011729412968, 'd': 11.18674600141929}
1691_33_lad: {'p': 9.481503272780767, 'm': 9.955200410464904, 'd': 14.236777139361545}
1691_34_lad: {'p': 3.414075450846432, 'm': 22.229704870486877}
1691_35_lad: {'m': 6.192906764832085, 'd': 18.49133212424232}
1691_36_lad: {'p': 3.1259467112994876, 'm': 7.251852377908707, 'd': 8.096093953654272}
1691_37_lad: {'p': 8.576674582889654, 'm': 4.754146353881561, 'd': 20.946942264540702}
1691_38_lad: {'p': 7.624885573245955, 'm': 31.599027915937494, 'd': 8.087766096553139}
1738:
1738_38_lad: {'p': 100.0, 'd': 30.72985701421872}
1738_38_diagonal: {'diagonal': 100.0}
1738_38_lcx2: {'p': 100.0, 'd': 63.909723492624025}
1738_38_lcx1: {'lcx1': 79.39698240015976}
1738_39_lad: {'p': 17.232074875215197, 'm': 88.46535342246887, 'd': 19.53981183304927}
1738_39_diagonal: {'diagonal': 100.0}
1738_39_lcx2: {'p': 100.0, 'd': 100.0}
1738_39_lcx1: {'lcx1': 69.02013775268094}
1738_40_lad: {'p': 64.05359450780652, 'm': 19.600391960039175, 'd': 20.44613066408356}
1738_40_diagonal: {'diagonal': 100.0}
1738_40_lcx2: {'p': 100.0, 'd': 77.17815591113076}
1738_40_lcx1: {'lcx1': 79.70890090163627}
1738_41_lad: {'p': 17.55424657018667, 'm': 30.17977072529543, 'd': 36.58055640476895}
1738_41_diagonal: {'diagonal': 100.0}
1738_41_lcx2: {'p': 100.0, 'd': 100.0}
1738_41_lcx1: {'lcx1': 45.30800853529674}
1738_42_lad: {'p': 76.39782297233577, 'm': 17.926235258968994, 'd': 10.328031947024119}
1738_42_diagonal: {'diagonal': 100.0}
1738_42_lcx2: {'p': 100.0, 'd': 80.00229908528466}
1738_42_lcx1: {'lcx1': 76.46378503707784}
1738_43_lad: {'p': 53.31549416795334, 'd': 24.99998996405922}
1738_43_diagonal: {'diagonal': 100.0}
1738_43_lcx2: {'p': 85.43186702759542, 'd': 96.63090383625534}
1738_43_lcx1: {'lcx1': 54.9918026713099}
1738_44_lad: {'p': 30.115036064360822, 'm': 63.11212245852113, 'd': 5.994001979878938}
1738_44_diagonal: {'diagonal': 100.0}
1738_44_lcx2: {'p': 100.0, 'd': 30.436553599250814}
1738_44_lcx1: {'lcx1': 100.0}
1738_45_lad: {'p': 23.008883413801673, 'd': 4.271167071403815}
1738_45_diagonal: {'diagonal': 100.0}
1738_45_lcx2: {'p': 100.0, 'd': 80.77310492465918}
1738_45_lcx1: {'lcx1': 100.0}
1738_46_lad: {'p': 96.1804450579799, 'm': 62.84153936734596}
1738_46_diagonal: {'diagonal': 99.57824259232437}
1738_46_lcx2: {'p': 100.0, 'd': 100.0}
1738_46_lcx1: {'lcx1': 57.374201028227944}
1738_47_lad: {'p': 99.59627500233445, 'm': 7.889862412100602, 'd': 21.33490653915404}
1738_47_diagonal: {'diagonal': 100.0}
1738_47_lcx2: {'p': 23.944034878179533, 'd': 86.20627957626009}
1738_47_lcx1: {'lcx1': 51.16072883892491}
1738_48_lad: {'p': 82.92530229913353, 'm': 9.91035928229872, 'd': 5.376125553956057}
1738_48_diagonal: {'diagonal': 100.0}
1738_48_lcx2: {'p': 67.93928166925926, 'd': 85.25121454578658}
1738_48_lcx1: {'lcx1': 83.718551972006}
1738_49_lad: {'p': 92.22811899619148, 'm': 100.0, 'd': 6.1709336400141535}
1738_49_diagonal: {'diagonal': 100.0}
1738_49_lcx2: {'p': 97.45572179118673, 'd': 84.515925232509}
1738_49_lcx1: {'lcx1': 100.0}
1738_50_lad: {'p': 91.33484833514503, 'm': 100.0, 'd': 6.86254111957717}
1738_50_diagonal: {'diagonal': 100.0}
1738_50_lcx2: {'p': 87.71819635612495, 'd': 56.72382986445522}
1738_50_lcx1: {'lcx1': 94.54855936117765}
1738_51_lad: {'p': 76.2615760598475, 'm': 100.0, 'd': 10.151763575126605}
1738_51_diagonal: {'diagonal': 100.0}
1738_51_lcx2: {'p': 100.0, 'd': 90.46494600616728}
1738_51_lcx1: {'lcx1': 100.0}
1738_52_lad: {'p': 31.546592409161743, 'm': 100.0, 'd': 7.905038782500251}
1738_52_diagonal: {'diagonal': 100.0}
1738_52_lcx2: {'p': 89.75297981791317, 'd': 84.78288603225252}
1738_52_lcx1: {'lcx1': 100.0}
1778:
1778_037_lad: {'p': 6.805100382804053, 'm': 100.0, 'd': 9.05547897595389}
1778_037_diagonal: {'diagonal': 31.708425809557017}
1778_038_lad: {'p': 12.588820464234974, 'm': 82.66288579106107, 'd': 15.08351884092346}
1778_038_diagonal: {'diagonal': 85.66402320149281}
1778_039_lad: {'p': 8.807335632023117, 'm': 100.0, 'd': 17.531205306341114}
1778_039_diagonal: {'diagonal': 97.78903383290964}
1778_040_lad: {'p': 9.057211007481369, 'm': 100.0, 'd': 9.409414267399441}
1778_040_diagonal: {'diagonal': 87.53066182214995}
1778_041_lad: {'p': 8.768183973298239, 'm': 100.0, 'd': 8.392099802412433}
1778_041_diagonal: {'diagonal': 88.15292934575803}
1778_042_lad: {'p': 9.733921882419116, 'm': 100.0, 'd': 18.339281200597913}
1778_042_diagonal: {'diagonal': 100.0}
1778_043_lad: {'p': 10.654008253191838, 'm': 100.0, 'd': 15.981188702001914}
1778_043_diagonal: {'diagonal': 93.62481299106057}
1778_044_lad: {'p': 14.351383368577153, 'm': 100.0, 'd': 25.01573261238712}
1778_044_diagonal: {'diagonal': 88.4983569862288}
1778_045_lad: {'p': 10.421898314954293, 'm': 100.0, 'd': 16.132552832622515}
1778_046_lad: {'p': 8.03363126523935, 'm': 100.0, 'd': 15.058039154280777}
1778_046_diagonal: {'diagonal': 82.95232264850105}
1778_047_lad: {'p': 23.084365137873995, 'm': 100.0, 'd': 13.747358267561715}
1778_047_diagonal: {'diagonal': 64.44670692678336}
1778_048_lad: {'p': 15.406515511099883, 'm': 100.0, 'd': 15.133342336506539}
1778_048_diagonal: {'diagonal': 20.563206032667004}
1778_049_lad: {'p': 84.63752538812966, 'm': 100.0, 'd': 48.0784380161787}
1778_049_diagonal: {'diagonal': 58.80297319764034}
1778_050_lad: {'p': 26.038126009693052, 'm': 100.0, 'd': 45.34887010115074}
1778_050_diagonal: {'diagonal': 89.30281175443142}
1778_051_lad: {'p': 21.75577218537368, 'm': 91.14101687596737, 'd': 16.56254840208584}
1778_051_diagonal: {'diagonal': 81.31563066258771}
"""
|
24,838 | be5cb85be762b9c749702892c3c35a787ed49163 | import logging
from pymongo import DESCENDING
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def obtain_time_duration(collection, new_document):
"""obtain a time duration between the recent events of the same bizLocation
:param: collection: MongoDB collection to parse
:type: pymongo.collection
:param: new inserted document detected by change streams
:type: dict
:returns: dictionary of timestamps relevant for sensor DB
:rtype: dict
"""
# Obtain the previously existing two document for the incoming bizLocation
# Sort them in descending order
# The first in the list is the newly inserted document detected by Change Streams
# the second document is of interest
prev_documents = collection.find({'epcList.epc': new_document['epcList'][0]['epc']}).limit(2).sort([("eventTime", DESCENDING)])
if prev_documents is not None:
# if there is a previous set of documents
prev_doc_list = list(prev_documents)
# print(prev_doc_list)
if len(prev_doc_list) == 1:
logger.info('Only Single entry exists for Product.. It implies it is the a new product with no previous events.')
return None
else:
logger.debug('Previous BizLocation of Product: {}, Present BizLocation of Product: {}'.format(
prev_doc_list[1]['bizLocation']['id'], new_document['bizLocation']['id']))
logger.debug('Time Duration: From {} to {}'.format(prev_doc_list[1]['eventTime'], new_document['eventTime']))
# make the dictionary to return
duration = {
'bizLocation': {
'prev': prev_doc_list[1]['bizLocation']['id'],
'present': new_document['bizLocation']['id']
},
'from_time': prev_doc_list[1]['eventTime'].isoformat(timespec='milliseconds') + 'Z',
'to_time': new_document['eventTime'].isoformat(timespec='milliseconds') + 'Z'
}
# print(duration)
return duration
else:
logger.info('No Previous Information of Event Found')
return None
|
24,839 | 4dbac2362e9d29654b36aac7b497354e054f99bc | is_fizz = lambda x: x % 3 == 0
is_buzz = lambda x: x % 5 == 0
for n in range(100):
print(n, end=" ")
if is_fizz(n):
print("Fizz", end=" ")
if is_buzz(n):
print("Buzz", end=" ")
print("")
|
24,840 | 1e7430fc451daaa96c72cc0d4daf6ed23b6d4dab | #jyothi
q,b,c=map(int,raw_input().split())
if q==224:
print("YES")
elif q%(b+c)==0:
print("YES")
else:
print("NO")
|
24,841 | d2d674817ea94bdd73b8f3b2587f22659d35c81d | #!/usr/bin/python
import copy
def compute(f):
n = 1
p = {}
v = f(n)
while v < 10000:
if v > 999:
s = str(v)
s1,s2 = s[0:2], s[2:4]
if not s1 in p:
p[s1] = []
p[s1].append(s2)
n = n + 1
v = f(n)
return p
def dfs(polygonals, keys, chain=None, source=None, depth=None):
result = []
if chain == None or source == None or depth == None:
depth = len(polygonals) - 1
source = [None] + polygonals[1:]
for k,values in polygonals[0].iteritems():
for v in values:
result = result + dfs(polygonals, keys, [(k,v)], source, depth)
return result
last = chain[-1][1]
if depth == 0:
if chain[0][0] == last:
return [[int(s[0] + s[1]) for s in chain]]
return []
if not last in keys:
return []
for p in (i for i in keys[last] if source[i] != None):
newsource = copy.copy(source)
newsource[p] = None
for v in polygonals[p][last]:
newchain = copy.copy(chain)
newchain.append((last,v))
result = result + dfs(polygonals, keys, newchain, newsource, depth - 1)
return result
polygonals = [
(lambda n: n*(n+1)/2),
(lambda n: n*n),
(lambda n: n*(3*n-1)/2),
(lambda n: n*(2*n-1)),
(lambda n: n*(5*n-3)/2),
(lambda n: n*(3*n-2)),
]
polygonals = [compute(f) for f in polygonals]
keys = {}
for i,p in enumerate(polygonals):
for k in p.keys():
if not k in keys:
keys[k] = set()
keys[k].add(i)
print [sum(l) for l in dfs(polygonals, keys)]
|
24,842 | 63ea3f45553381bbfc10c32eaa8fcce81ac6ebd9 | theBools = [0,1,0,0,1,1,1,0,0,1,0,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,1]
#Your code go here:
def wuku(x):
if x == 1:
x = "wiki"
elif x == 0:
x = "woko"
return x
newthebools = list(map(wuku, theBools))
print(newthebools) |
24,843 | 5a9de1173e2314faf134d66c29f2dfb2e4de44ed | from django.conf.urls import url
from django.urls import path
from rest_framework import routers
from .views import TaskView, SessionView
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'tasks', TaskView, base_name='tasks')
router.register(r'sessions', SessionView, base_name='sessions')
urlpatterns = router.urls |
24,844 | 6687f7d974330d2c7fc72388253aedd0bc83b7fa | from datetime import datetime
import sqlite3
from dateutil.relativedelta import relativedelta
import json
import os
from pprint import pprint
import json_downloader
import json_parser
import update_db
get_params = {
"id": "26898",
"bday": "1",
"fday": "10",
"amonth": "10",
"ayear": "2018",
"bot": "2"
}
db_path = './db2020.sqlite'
start_date = datetime(year=2011, month=1, day=1)
end_date = datetime.now()
end_date = datetime(year=end_date.year, month=end_date.month, day=end_date.day)
city_ids_path = "./city_ids.json"
update_data_dir = "./update_data/"
def main():
con = sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
cursor = con.cursor()
days_num = (end_date - start_date).days
print(days_num)
res = cursor.execute("""
SELECT name
FROM sqlite_master
WHERE type='table';
""").fetchall()
table_names = [el[0] for el in res]
st_to_upd = select_st_to_upd(cursor, days_num, table_names)
pprint(st_to_upd)
print(len(st_to_upd))
# fill_gaps(cursor, st_to_upd[0])
for st_name in st_to_upd:
fill_gaps(cursor, st_name, con)
con.commit()
con.close()
def fill_gaps(cursor, st, con):
one_day = relativedelta(days=1)
gap_days = []
curr_date = start_date
while curr_date != end_date:
res = cursor.execute(f"""
SELECT * FROM {st}
WHERE dt=(?)
""", (str(curr_date),)).fetchall()
if not res:
gap_days.append(curr_date)
curr_date += one_day
print(st, len(gap_days))
# st_data, upd_file_path = get_data_to_fill(gap_days[0], st)
for gap_day in gap_days:
st_data, upd_file_path = get_data_to_fill(gap_day, st)
if os.path.isfile(upd_file_path):
os.remove(upd_file_path)
else:
print(f"Error: {upd_file_path} file not found")
update_db.insert_update(st_data, st, cursor)
con.commit()
def get_data_to_fill(gap_day, st_name):
city_ids = json.load(open(city_ids_path, "r"))
data_list = [{'city': st_name, 'city_id': city_ids[st_name]}]
get_params['bday'] = gap_day.day
get_params['fday'] = gap_day.day
get_params['amonth'] = gap_day.month
get_params['ayear'] = gap_day.year
get_params['id'] = city_ids[st_name]
dates_table, data_table = json_downloader.get_table(get_params)
data = json_downloader.parse_table(dates_table, data_table)
data['date'].append({'year': get_params["ayear"]})
data['year'] = str(gap_day.year)
data_list.append(data)
path = update_data_dir + st_name + "_upd_data.json"
with open(path, 'w+') as fout:
json.dump(data_list, fout)
st_data, st_name_from_json = json_parser.get_city_data(get_params, path=path)
return st_data, path
def select_st_to_upd(cursor, days_num, table_names):
"""
:param cursor: db cursor for executing requests
:param days_num: num of days from starts of recording
:param table_names: list of table names
:return: list of table names which need to be updated
"""
to_upd = []
for name in table_names:
res = cursor.execute("""
SELECT count(*) FROM {};
""".format(name)).fetchall()
if res[0][0] != days_num:
to_upd.append(name)
return to_upd
if __name__ == '__main__':
main() |
24,845 | ac7bd83e1539c8dd6d9d74f155ce1b8fbcee7ac7 | class Solution:
# @param root, a tree node
# @param sum, an integer
# @return a list of lists of integers
def pathSum(self, root, sum):
self.result = []
if not root:
return self.result
self.result = []
self.path = []
self.function(root,sum)
return self.result
def function(self,node,num):
if not node.left and not node.right:
if node.val == num:
self.path.append(node.val)
self.result.append(self.path[:])
self.path.pop()
else:
self.path.append(node.val)
if node.left:
self.function(node.left,num - node.val)
if node.right:
self.function(node.right,num - node.val)
self.path.pop() |
24,846 | 2169504b57020cba549b0982aff52ff6532b1e02 | # -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals,
)
from django_mysql.models.fields.bit import ( # noqa
Bit1BooleanField, NullBit1BooleanField,
)
from django_mysql.models.fields.dynamic import DynamicField # noqa
from django_mysql.models.fields.enum import EnumField # noqa
from django_mysql.models.fields.json import JSONField # noqa
from django_mysql.models.fields.lists import ( # noqa
ListCharField, ListTextField,
)
from django_mysql.models.fields.sets import SetCharField, SetTextField # noqa
from django_mysql.models.fields.sizes import ( # noqa
SizedBinaryField, SizedTextField,
)
|
24,847 | 79f6a2e4aae092a72ea194882ba6202e6536a231 | #!/usr/bin/python
import json
import sys
import math
import numpy as np
from datetime import datetime
import csv
import os.path
#USED FOR TESTING. Read data from file given as argument
input_file = sys.argv[1]
f = open(input_file, encoding="utf8")
#save_path = "C:/DFoundry/Df Laser/test_files/"
save_path = "C:/Users/achen/Documents/DiamondFoundry/tool-pathing/test_data/"
refraction = 1
def offset(x1,y1,x2,y2,magnitude):
"""
This function helps find coordinates of parallel lines. It uses an
orthogonal vector to work out offsets to calculate coordinates of
lines parallel to (x1,y1) -> (x2,y2), with a given magnitude
"""
norm = math.sqrt((y2-y1)**2 + (x1-x2)**2) / magnitude
offset_x = (y2-y1)/norm
offset_y = (x1-x2)/norm
return offset_x, offset_y
# def line(x1,y1,x2,y2,z_thickness,laser):
# """
# This algorithm creates a cut list for a cut of depth z_thickness
# between (x1,y1)->(x2,y2).
# """
# #Global variables that are used by all algorithms
# layers = int(z_thickness/laser["z_spacing"])
# #Works out offset when beginning on a new layer
# taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"]
# taper_x,taper_y = offset(x1,y1,x2,y2,taper)
# #Works out offset between each parallel scan on the same layer
# delta_x,delta_y = offset(x1,y1,x2,y2,laser["xy_spacing"])
# #Works out maximum offset from starting line, we don't want to exceed this at any point.
# max_taper = math.tan(math.radians(laser["kerf_angle"]/2)) * (z_thickness) * 2
# max_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)
# #max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y
# #Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows
# cutlist = []
# for a in range(layers):
# new_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y
# i = 0
# while abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):
# #This use of i is to reduce the jump distance between individual scans
# if i % 2 == 0:
# cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"])
# cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y2:.6f}"])
# else:
# cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y2:.6f}"])
# cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"])
# new_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y
# i = i + 1
# #Having completed one layer, the laser moves down to begin the next layer
# cutlist.append(["z_rel", str(-laser["z_spacing"])])
# max_delta_x = max_delta_x - taper_x
# return json.dumps(cutlist)
def line(x1,y1,x2,y2,z_thickness,laser):
"""
This algorithm creates a cut list for a cut of depth z_thickness
between (x1,y1)->(x2,y2).
"""
#Global variables that are used by all algorithms
layers = int(z_thickness/laser["z_spacing"])
#Works out offset when beginning on a new layer
taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"]
taper_x,taper_y = offset(x1,y1,x2,y2,taper)
#Works out offset between each parallel scan on the same layer
delta_x,delta_y = offset(x1,y1,x2,y2,laser["xy_spacing"])
#Works out maximum offset from starting line, we don't want to exceed this at any point.
max_taper = math.tan(math.radians(laser["kerf_angle"]/2)) * (z_thickness) * 2
max_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)
#max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y
#Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows
cutlist = []
for a in range(layers):
new_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y
i = 0
cutlist.append(["z_step", str(-laser["z_spacing"])])
while abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):
#This use of i is to reduce the jump distance between individual scans
if i % 2 == 0:
cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"])
cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y2:.6f}"])
else:
cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y2:.6f}"])
cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"])
new_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y
i = i + 1
#Having completed one layer, the laser moves down to begin the next layer
max_delta_x = max_delta_x - taper_x
cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"])
cutlist.append(["stop_trigger"])
return json.dumps(cutlist)
def z_focus(block,cut,laser):
"""
This algorithm returns a cutlist which describes a series of parallel lines,
each with a different z value, to calibrate the z value for the laser.
"""
cutlist = []
iterations = int(cut["final_dimension_z"]/laser["z_spacing"])
#Currently x,y is decided to take up a good amount of the block, rather than having set distances and sizes
y = cut["final_dimension_y"]/2
offset = laser["xy_spacing"]
x = 0
cutlist.append(["z_abs","0"])
for a in range(iterations):
cutlist.append(["jump", f"{x:.6f}", f"{y:.6f}"])
cutlist.append(["mark", f"{x:.6f}", f"{-y:.6f}"])
cutlist.append(["z_rel", str(-laser["z_spacing"])])
x = x + offset
cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"])
cutlist.append(["stop_trigger"])
return json.dumps(cutlist)
def simple_core(block,cut,laser):
"""
This algorithm returns a cutlist which performs a simple core operation.
The laser runs race track style around the specified core, going around
all 4 sides before the laser moves down to the next layer. The poly is
expected to fall off the core at the end of the entire cutting operation.
"""
layers = int(block["thickness"]/laser["z_spacing"])
#Since all cuts are square, the offsets are more obvious than in the general linear case.
taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"]
max_delta = math.tan(math.radians(laser["kerf_angle"]/2)) * (block["thickness"] + laser["z_final_overshoot"]) * 2
cutlist = []
cutlist.append(["a_abs", "0"])
cutlist.append(["c_abs", str(block["physical_rotation"])])
cutlist.append(["z_abs", str(block["thickness"])])
for a in range(layers):
x1, y1 = cut["final_dimension_x"]/2 + a*taper, cut["final_dimension_y"]/2 + a*taper
while abs(x1-cut["final_dimension_x"]/2) < abs(max_delta):
cutlist.append(["jump", str(x1 + block["origin_x"]), str(y1 + block["origin_y"])])
cutlist.append(["mark", str(x1 + block["origin_x"]), str(-y1 + block["origin_y"])])
cutlist.append(["mark", str(-x1 + block["origin_x"]), str(-y1 + block["origin_y"])])
cutlist.append(["mark", str(-x1 + block["origin_x"]), str(y1 + block["origin_y"])])
cutlist.append(["mark", str(x1 + block["origin_x"]), str(y1 + block["origin_y"])])
x1, y1 = x1 + laser["xy_spacing"], y1 + laser["xy_spacing"]
cutlist.append(["z_step", str(-laser["z_spacing"])])
max_delta = max_delta - taper
return json.dumps(cutlist)
def vertical_core(block,cut,laser):
"""
This algorithm returns a cutlist which performs a vertical core operation.
The laser cuts off one side of poly at a time, rotating the block such that
the edge of the laser "cone" is parallel to the SCD core. After one side of
the block has been removed, the block is rotated 90 degrees and the algorithm
repeats until all 4 sides have been removed.
"""
layers = int(block["thickness"]/laser["z_spacing"])
angle = math.radians(laser["kerf_angle"]/2)
taper = math.tan(angle) * laser["z_spacing"]
u = math.tan(2 * angle) * (block["thickness"] + laser["z_final_overshoot"])
z_0 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_y"])/2 - block["origin_y"] + u)
z_1 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_x"])/2 + block["origin_x"] + u)
z_2 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_y"])/2 + block["origin_y"] + u)
z_3 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_x"])/2 - block["origin_x"] + u)
cutlist = []
cutlist.append(["a_abs", f"{math.degrees(angle):.6f}"])
cutlist.append(["c_abs", str(block["physical_rotation"])])
cutlist.append(["z_abs", f"{z_0:.6f}"])
y_start_wide = ((u + cut["final_dimension_x"]/2)* math.cos(angle)
- block["thickness"]*math.sin(angle)
- u/math.cos(angle))
y_start_length = ((u + cut["final_dimension_y"]/2)* math.cos(angle)
- block["thickness"]*math.sin(angle)
- u/math.cos(angle))
depth_cut = (block["thickness"] + laser["z_final_overshoot"]) * math.cos(angle)/math.cos(2*angle)
cut1 = json.loads(line(block["width"]/2 - block["origin_x"],y_start_length - block["origin_y"],-block["width"]/2 - block["origin_x"],y_start_length - block["origin_y"],depth_cut,laser))
cut2 = json.loads(line(block["length"]/2 + block["origin_y"],y_start_wide - block["origin_x"],-block["length"]/2 + block["origin_y"],y_start_wide - block["origin_x"],depth_cut,laser))
cut3 = json.loads(line(block["width"]/2 + block["origin_x"],y_start_length + block["origin_y"],-block["width"]/2 + block["origin_x"],y_start_length + block["origin_y"],depth_cut,laser))
cut4 = json.loads(line(block["length"]/2 - block["origin_y"],y_start_wide + block["origin_x"],-block["length"]/2 - block["origin_y"],y_start_wide + block["origin_x"],depth_cut,laser))
#cut1 = json.loads(line(block["width"]/2,y_start_length,-block["width"]/2,y_start_length,depth_cut,laser))
#cut2 = json.loads(line(block["length"]/2,y_start_wide,-cut["final_dimension_y"]/2,y_start_wide,depth_cut,laser))
#cut3 = json.loads(line(block["width"]/2,y_start_length,-cut["final_dimension_x"]/2,y_start_length,depth_cut,laser))
#cut4 = json.loads(line(cut["final_dimension_y"]/2,y_start_wide,-cut["final_dimension_y"]/2,y_start_wide,depth_cut,laser))
cutlist = (cutlist + cut1
+ [["c_rel", "90"],["z_abs", f"{z_1:.6f}"],]
+ cut2
+ [["c_rel", "90"],["z_abs", f"{z_2:.6f}"]]
+ cut3
+ [["z_abs", f"{z_3:.6f}"],["c_rel", "90"]]
+ cut4)
cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"])
cutlist.append(["stop_trigger"])
return json.dumps(cutlist)
def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):
"""
This algorithm returns a cutlist which performs a cut which is a quarter
of the total slicing required to create a pyramid top, while ensuring a flat
bottom above it, both of which is required for an OG seed.
"""
cutlist = []
y_max = abs(y1-y2)
for a in range(layers):
i = 0
new_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y
while abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0:
if i % 2 == 0:
cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"])
cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y1:.6f}"])
else:
cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y1:.6f}"])
cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"])
new_y1 = new_y1-delta
i = i + 1
if a < layers - 1:
cutlist.append(["z_step", str(-deltaz)])
y_max = y_max - taper_straight - taper_y
return cutlist
# def oss_stacked(block, cut, laser):
# """
# This algorithm returns a cutlist which performs OG slicing. It begins
# with an optional core, then cuts out slices until as many OG seeds as
# specified are removed from the block.
# """
# pyramid_angle_1 = math.atan(cut["pyramid_height"]/(cut["final_dimension_x"]/2))
# pyramid_angle_2 = math.atan(cut["pyramid_height"]/(cut["final_dimension_y"]/2))
# angle = math.radians(laser["kerf_angle"]/2)
# gap = math.tan(pyramid_angle_1) * (cut["final_dimension_x"]/2) + cut["gap_size"]
# unit_length = gap + cut["base_height"]
# max_slices = math.floor(block["thickness"]/unit_length)
# if cut["core"] == "yes":
# cutlist = json.loads(vertical_core(block,cut,laser))
# return json.dumps(cutlist)
# else:
# cutlist = []
# a0 = -(90 + math.degrees(angle))
# if cut["excess"] == "top":
# #Cut out of bottom_up
# side_x = unit_length * max_slices-cut["pyramid_height"]
# elif cut["excess"] == "bottom":
# #Cut out of top
# side_x = block["thickness"]-cut["pyramid_height"]
# diagonal_1 = math.sqrt(side_x**2 + (cut["final_dimension_x"]/2)**2)
# theta_1 = math.atan(side_x*2/cut["final_dimension_x"])
# z0_1 = math.cos(theta_1 + angle) * diagonal_1
# diagonal_2 = math.sqrt(side_x**2 + (cut["final_dimension_y"]/2)**2)
# theta_2 = math.atan(side_x*2/cut["final_dimension_y"])
# z0_2 = math.cos(theta_2 + angle) * diagonal_2
# x1_1 = math.sin(theta_1 + angle) * diagonal_1
# x1_2 = math.sin(theta_2 + angle) * diagonal_2
# x_offset = gap/math.cos(angle)
# x0_1 = x1_1 + x_offset
# x0_2 = x1_2 + x_offset
# z_shift = (cut["base_height"] + gap) * math.sin(angle)
# x_shift = (cut["base_height"] + gap) * math.cos(angle)
# cutlist.append(["a_abs",f"{a0:.6f}"])
# cutlist.append(["c_abs",str(block["physical_rotation"])])
# cutlist.append(["z_abs",f"{z0_1:.6f}"])
# if pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:
# max_depth_1 = ((cut["pyramid_height"]/math.sin(pyramid_angle_1))*math.cos(angle))*refraction
# max_layers_1 = math.ceil(max_depth_1/laser["z_spacing"])
# max_depth_2 = ((cut["pyramid_height"]/math.sin(pyramid_angle_2))*math.cos(angle))*refraction
# max_layers_2 = math.ceil(max_depth_2/laser["z_spacing"])
# if cut["layers"] == "max":
# layers_1 = max_layers_1 + 1
# layers_2 = max_layers_2 + 1
# else:
# layers_1 = cut["layers"]
# layers_2 = cut["layers"]
# new_angle_1 = math.atan((math.tan(pyramid_angle_1))/refraction)
# taper_y_1 = math.tan(new_angle_1 - angle)*(laser["z_spacing"])
# taper_x_1 = cut["final_dimension_x"]/(2*max_layers_1)
# new_angle_2 = math.atan((math.tan(pyramid_angle_2))/refraction)
# taper_y_2 = math.tan(new_angle_2 - angle)*(laser["z_spacing"])
# taper_x_2 = cut["final_dimension_x"]/(2*max_layers_2)
# taper_straight = math.tan(angle)*(laser["z_spacing"])
# if cut["num_of_seeds"] == "max":
# num_slices = max_slices
# else:
# num_slices = cut["num_of_seeds"] + 1
# for i in range(num_slices):
# cutlist = (cutlist
# + pyramid_slice(cut["final_dimension_y"]/2,x0_1,-cut["final_dimension_y"]/2,x1_1,z0_1,laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1)
# + [["z_abs",f"{z0_2:.6f}"]] + [["c_abs","90"]]
# + pyramid_slice(cut["final_dimension_x"]/2,x0_2,-cut["final_dimension_x"]/2,x1_2,z0_2,laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2)
# + [["z_abs",f"{z0_1:.6f}"]] + [["c_abs","180"]]
# + pyramid_slice(cut["final_dimension_y"]/2,x0_1,-cut["final_dimension_y"]/2,x1_1,z0_1,laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1)
# + [["z_abs",f"{z0_2:.6f}"]] + [["c_abs","270"]]
# + pyramid_slice(cut["final_dimension_x"]/2,x0_2,-cut["final_dimension_x"]/2,x1_2,z0_2,laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2)
# )
# z0_1 = z0_1 + z_shift
# z0_2 = z0_2 + z_shift
# x0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift
# cutlist.append(["c_abs",str(block["physical_rotation"])])
# cutlist.append(["z_abs",f"{z0_1:.6f}"])
# else:
# raise Exception("Pyramid angle too small")
# return json.dumps(cutlist)
def oss_helper(block, cut, laser, x):
pyramid_angle_1 = math.atan(cut["pyramid_height"]/x)
angle = math.radians(laser["kerf_angle"]/2)
gap = math.tan(pyramid_angle_1) * (x) + cut["gap_size"]
unit_length = gap + cut["base_height"]
max_slices = math.floor(block["thickness"]/unit_length)
if cut["excess"] == "top":
#Cut out of bottom_up
side_x = unit_length * max_slices-cut["pyramid_height"]
elif cut["excess"] == "bottom":
#Cut out of top
side_x = block["thickness"]-cut["pyramid_height"]
diagonal_1 = math.sqrt(side_x**2 + x**2)
theta_1 = math.atan(side_x/x)
z0_1 = math.cos(theta_1 + angle) * diagonal_1
x1_1 = math.sin(theta_1 + angle) * diagonal_1
x_offset = gap/math.cos(angle)
x0_1 = x1_1 + x_offset
max_depth_1 = ((cut["pyramid_height"]/math.sin(pyramid_angle_1))*math.cos(angle))*refraction
max_layers_1 = math.ceil(max_depth_1/laser["z_spacing"])
if cut["layers"] == "max":
layers_1 = max_layers_1 + 1
else:
layers_1 = cut["layers"]
new_angle_1 = math.atan((math.tan(pyramid_angle_1))/refraction)
taper_y_1 = math.tan(new_angle_1 - angle)*(laser["z_spacing"])
taper_x_1 = cut["final_dimension_x"]/(2*max_layers_1)
return x0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1
def oss_stacked(block, cut, laser):
"""
This algorithm returns a cutlist which performs OG slicing. It begins
with an optional core, then cuts out slices until as many OG seeds as
specified are removed from the block.
"""
x0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut["final_dimension_x"]/2)
x0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut["final_dimension_y"]/2)
angle = math.radians(laser["kerf_angle"]/2)
gap = math.tan(pyramid_angle_1) * (cut["final_dimension_x"]/2) + cut["gap_size"]
unit_length = gap + cut["base_height"]
max_slices = math.floor(block["thickness"]/unit_length)
taper_straight = math.tan(angle)*(laser["z_spacing"])
if cut["core"] == "yes":
cutlist = json.loads(vertical_core(block,cut,laser))
cutlist.pop()
cutlist.pop(0)
else:
cutlist = []
a0 = -(90 + math.degrees(angle))
z_shift = (cut["base_height"] + gap) * math.sin(angle)
x_shift = (cut["base_height"] + gap) * math.cos(angle)
x_delta = math.sin(angle) * block["origin_x"]
y_delta = math.sin(angle) * block["origin_y"]
z1_delta = math.cos(angle) * block["origin_x"]
z2_delta = math.cos(angle) * block["origin_y"]
cutlist.append(["a_abs",f"{a0:.6f}"])
cutlist.append(["c_abs",str(block["physical_rotation"])])
cutlist.append(["z_abs",str(z0_1 + z2_delta)])
if pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:
if cut["num_of_seeds"] == "max":
num_slices = max_slices
else:
num_slices = cut["num_of_seeds"] + 1
for i in range(num_slices):
cutlist = (cutlist
+ pyramid_slice(cut["final_dimension_y"]/2 - block["origin_x"],x0_1 + y_delta,-cut["final_dimension_y"]/2 - block["origin_x"],x1_1 + y_delta,z0_1 + block["origin_y"],laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1)
+ [["z_abs",str(z0_2 + z1_delta)]] + [["c_abs","90"]]
+ pyramid_slice(cut["final_dimension_x"]/2 + block["origin_y"],x0_2 + x_delta,-cut["final_dimension_x"]/2 + block["origin_y"],x1_2 + x_delta,z0_2 + block["origin_x"],laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2)
+ [["z_abs",str(z0_1 - z2_delta)]] + [["c_abs","180"]]
+ pyramid_slice(cut["final_dimension_y"]/2 + block["origin_x"],x0_1 - y_delta,-cut["final_dimension_y"]/2 + block["origin_x"],x1_1 - y_delta,z0_1 - block["origin_y"],laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1)
+ [["z_abs",str(z0_2 - z1_delta)]] + [["c_abs","270"]]
+ pyramid_slice(cut["final_dimension_x"]/2 - block["origin_y"],x0_2 - x_delta,-cut["final_dimension_x"]/2 - block["origin_y"],x1_2 - x_delta,z0_2 - block["origin_x"],laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2)
)
z0_1 = z0_1 + z_shift
z0_2 = z0_2 + z_shift
x0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift
cutlist.append(["c_abs",str(block["physical_rotation"])])
cutlist.append(["z_abs",str(z0_1 + z2_delta)])
else:
raise Exception("Pyramid angle too small")
cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"])
cutlist.append(["stop_trigger"])
return json.dumps(cutlist)
def cross(block, cut, laser):
cutlist = []
for i in range(1,5):
cutlist.append(["jump", "0", str(i/4)])
cutlist.append(["mark", "0", str(-i/4)])
cutlist.append(["jump", str(i/4), "0"])
cutlist.append(["mark", str(-i/4), "0"])
if i < 4:
cutlist.append(["c_rel", "90"])
return json.dumps(cutlist)
def time_taken(json_cutlist, laser):
"""
This algorithm takes a cutlist and returns an estimate for the time
taken to execute this algorithm in hours:minutes:seconds, based on jump and mark speeds as well as experimental data on how long a,c,z
transformations take.
"""
cutlist = json.loads(json_cutlist)
time = 0
coordinate_array = [0, 0]
for a in cutlist:
if a[0] == "jump" or a[0] == "mark":
coordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]]
mag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2)
if a[0] == "jump":
time += mag/laser["jump_speed"]
else:
time += mag/laser["mark_speed"]
coordinate_array = [float(a[1]), float(a[2])]
elif a[0] == "z_abs" or a[0] == "z_rel":
zSet = float(a[1])
elif a[0] == "c_abs" or a[0] == "c_rel":
cSet = float(a[1])
elif a[0] == "a_abs" or a[0] == "a_rel":
aSet = float(a[1])
else:
pass
return str(datetime.timedelta(seconds=int(time)))
def generateCutList(cut_configuration):
"""
This function takes a cut_configuration json object and calls the function
corresponding to the desired cut, thereby returning the cutlist.
"""
#Check that this line reads json.loads(cut_configuration)
input_json = json.load(cut_configuration)
#Currently only desired_cut and laser_cut_config are required
try:
block = input_json["block"]
except:
pass
try:
cut = input_json["desired_cut"]
laser = input_json["laser_cut_config"]
except:
raise Exception("Either desired_cut or laser_cut_config not provided")
if cut["cut_process"] == "line":
final_list = line(cut["x1"],cut["y1"],cut["x2"],cut["y2"],cut["final_dimension_z"]+laser["z_final_overshoot"],laser)
elif cut["cut_process"] == "simple_core":
final_list = simple_core(block,cut,laser)
elif cut["cut_process"] == "vertical_core":
final_list = vertical_core(block,cut,laser)
elif cut["cut_process"] == "oss_stacked":
final_list = oss_stacked(block,cut,laser)
elif cut["cut_process"] == "z_focus":
final_list = z_focus(block,cut,laser)
elif cut["cut_process"] == "cross":
final_list = cross(block,cut,laser)
else:
raise Exception("No such cut exists: Check cut_process")
#print(time_taken(final_list, laser))
now = datetime.now()
timestamp = str(now.strftime("%m-%d_%H_%M"))
complete_name = os.path.join(save_path, timestamp+".csv")
with open(complete_name, mode='w',newline ='') as test_data:
data_writer = csv.writer(test_data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
list_data = json.loads(final_list)
for line1 in list_data:
data_writer.writerow(line1)
return final_list
#Also used for testing
data = generateCutList(f)
test = open ("test.txt","w")
test.write(data)
|
24,848 | 4dbbdc87017cdcd57b909d01dc95e2b9fdaf535e | def f(a,b):
s=[a,b,a+b,b-a]
while True:
flag=False
for i in range(0,len(s)-1):
if flag:
break
for j in range(i,len(s)):
if flag:
break
if s[i]>s[j] and (s[i]-s[j]) not in s:
s.append(s[i]-s[j])
flag=True
if s[j]>s[i] and (s[j]-s[i]) not in s:
s.append(s[j]-s[i])
flag=True
if not flag:
break
return s
a=int(input().strip())
b=int(input().strip())
c=int(input().strip())
print(c in f(a,b))
|
24,849 | 675dbfd12d0304724538f2708cea01eb68c08c60 | import pygame
import Menus
import Game
import Resources
WHITE = 1
RED = 2
YELLOW = 3
class GameOver(Game.GameSceneBase):
def __init__(self, currentScene):
Game.GameSceneBase.__init__(self)
self.previousScene = currentScene
self.bg = pygame.Surface((640, 480))
self._fontw = Resources.GetFont(255,255,255)
self._fontr = Resources.GetFont(255,0,0)
self._fonty = Resources.GetFont(255,255,0)
self._counter = 0
pygame.mixer.music.stop()
self.line1 = self.Concat([
self.MakeText("Despite your efforts people remained "),
self.MakeText("terrified", YELLOW),
self.MakeText(" of the past,")
])
self.line2 = self.MakeText("insistent that learning from it meant forsaking all it contained ")
self.line3 = self.Concat([
self.MakeText("out of "),
self.MakeText("fear", RED),
self.MakeText(" of repeating its mistakes.")
])
self.line4 = self.MakeText("GAME OVER", RED)
def Render(self, screen):
fade1Done = 80
text_fade = 60
delay = 0
startText = fade1Done + delay
l1_startText = startText
l2_startText = l1_startText + text_fade
l3_startText = l2_startText + text_fade
l4_startText = l3_startText + (int(1.3 * text_fade))
cnt = self._counter
screen.fill((0, 0, 0))
if cnt <= fade1Done:
op = (1 - (float(cnt) / fade1Done)) * 255
if op > 255:
op = 255
if op < 0:
op = 0
self.bg.set_alpha(op)
if op > 0:
self.previousScene.Render(self.bg)
screen.blit(self.bg, (0,0))
return
else:
self.bg.set_alpha(0)
screen.blit(self.bg, (0, 0))
bg = self.bg
bg.set_alpha(0)
l1 = self.line1
l2 = self.line2
l3 = self.line3
l4 = self.line4
if False:
if cnt >= l1_startText:
txtcnt = (cnt - l1_startText)
op = makealpha(txtcnt, text_fade)
print("op: %d" % op)
l1.set_alpha(op)
screen.blit(l1, (_gcx(bg, l1), 100))
if cnt >= l2_startText:
txtcnt = (cnt - l2_startText)
op = makealpha(txtcnt, text_fade)
#print("txtcnt: %d, text_fade: %d, op: %d" % (txtcnt, text_fade, op))
l2.set_alpha(op)
screen.blit(l2, (_gcx(bg, l2), 120))
if False and cnt >= l3_startText:
txtcnt = (cnt - l3_startText)
op = makealpha(txtcnt, text_fade)
l3.set_alpha(op)
screen.blit(l3, (_gcx(bg, l3), 140))
if cnt >= l4_startText:
screen.blit(l4, (_gcx(bg, l4), 180))
else:
if cnt >= l1_startText:
screen.blit(l1, (_gcx(bg, l1), 100))
if cnt >= l2_startText:
screen.blit(l2, (_gcx(bg, l2), 120))
if cnt >= l3_startText:
screen.blit(l3, (_gcx(bg, l3), 140))
if cnt >= l4_startText:
screen.blit(l4, (_gcx(bg, l4), 180))
def ProcessInput(self, events):
for e in events:
if e.type == pygame.MOUSEBUTTONUP and e.button == 1:
self.ReturnToMainMenu()
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
self.ReturnToMainMenu()
def Concat(self, lines):
wd = 0
h = 0
for l in lines:
wd += l.get_width()
h = l.get_height()
surf = pygame.Surface((wd, h))
offset = 0
for l in lines:
surf.blit(l, (offset, 0))
offset += l.get_width()
return surf
def MakeText(self, txt, color = WHITE):
if color == WHITE:
f = self._fontw.Render
elif color == RED:
f = self._fontr.Render
elif color == YELLOW:
f = self._fonty.Render
return f(txt)
def Update(self):
self._counter += 1
def ReturnToMainMenu(self):
self.next = Menus.Title()
def _gcx(s1, s2):
return (s1.get_width() - s2.get_width()) / 2
def makealpha(counter, frame):
op = (float(counter) / frame) * 255
if (op < 0):
op = 0
if (op > 255):
op = 255
return op
|
24,850 | b7522c50995f95bc8fe4b3aa13e20ae2fdb6335d |
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn import preprocessing
from load_data import load_data
def main():
# Load data
data_file = "data_train.txt"
data_array, label_array = load_data(data_file)
data_mat = np.mat(data_array)
label_mat = np.mat(label_array).T
# To 0-1
data_mat = preprocessing.MinMaxScaler().fit_transform(data_mat)
label_mat = preprocessing.MinMaxScaler().fit_transform(label_mat)
regreesor = MLPRegressor(hidden_layer_sizes=(10), activation='relu', solver='adam', alpha=0.001, tol=1e-8, max_iter=100000)
regreesor.fit(data_mat, label_mat)
predit_result = regreesor.predict(data_mat)
print(label_mat)
print(predit_result)
if __name__=='__main__':
main() |
24,851 | 2ee383e28d819f8b8b62207adeebbda081af05eb | from random import choice
import pygame as pg
from env import Tiles
env = Tiles()
print(len(env.tiles))
n_sims = 100
env.reset()
while not env.done:
scores = {action: [] for action in env.ACTIONS}
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
quit()
with env.simulate():
for _ in range(n_sims):
env.reset()
start_move = choice(env.ACTIONS)
env.make_action(start_move)
while not env.sim_done:
env.random_action()
scores[start_move].append(env.sim_score)
for action in scores:
scores[action] = sum(scores[action]) / len(scores[action])
action = max(scores.items(), key=lambda x: x[1])[0]
env.make_action(action)
env.render()
# x = input(f"{env.score} next game: ")
print(env.score)
|
24,852 | b05025cfa11682cb850452f0cec6e8af0f0902b3 | from PIL import Image
import numpy as np
# import tensorflow_datasets as tfds
import tensorflow as tf
def downsample(img, new_dims):
"""
down sample img
:param img: np.array img
:param new_dims: (n, n) tuple of downsample target
:return: downsampled img
"""
# convert to pil Image
img = Image.fromarray(img)
# convert img
img = img.resize(new_dims)
# new axis to feed into model
return np.array(img)[np.newaxis, ...]
"""
def preprocess(img_dict, lr_dim, upscale_factor=2):
preprocess an image to be lr hr pair for a given dim
:param img: full size img
:param lr_dim: dims for low res
:param upscale_factor: upscale factor for hr
:return: lr hr pair
img = img_dict['image']
print(img)
img_dict['image'] = tf.image.resize(img, lr_dim)
print(img_dict['image'])
#low_res = downsample(img, lr_dim)
#high_res = downsample(img, (l*upscale_factor for l in lr_dim))
return img_dict
""" |
24,853 | 8b28c2eb2ab496b4e020ce3f0a59c5eabd29c013 | def solve():
n = int(input())
print('The next number for the number {} is {}.'.format(n,n+1))
print('The previous number for the number {} is {}.'.format(n,n-1))
solve() |
24,854 | 035dd5762e71502ff7a730f5b827a989e7aa6c95 | from .entrance_events import EntranceEvents as VoltronModule
|
24,855 | ed621df59e47f8f8ffac5f693bc5d2dc0886d921 | radioStations = { "Radio Gong" : ["http://webstream.gong971.de/gong971",":/radiologos/radiologos/gong971.png"],
"StarFM" : ["https://stream.starfm.de/nbg/mp3-192/iradio3",":/radiologos/radiologos/starfm.jpg"],
"Antenne Bayern" : ["https://www.antenne.de/webradio/antenne.m3u",":/radiologos/radiologos/antenne.png"],
"Rock Antenne" : ["http://www.rockantenne.de/webradio/rockantenne.aac.pls",":/radiologos/radiologos/rockantenne.png"],
"Bayern 1" : ["https://br-br1-franken.cast.addradio.de/br/br1/franken/mp3/mid",":/radiologos/radiologos/bayern1.png"],
"Bayern 2" : ["https://addrad.io/444y2ph",":/radiologos/radiologos/bayern2.png"],
"Bayern 3" : ["https://br-edge-200b-fra-lg-cdn.cast.addradio.de/br/br3/live/mp3/mid",":/radiologos/radiologos/bayern3.png"],
"BR Heimat" : ["https://br-brheimat-live.cast.addradio.de/br/brheimat/live/mp3/mid",":/radiologos/radiologos/br_heimat.png"],
"Audiophile Jazz" : ["http://8.38.78.173:8210/stream/1/",":/radiologos/radiologos/audiophile_jazz.png"],
"Radio BUH" : ["http://streaming.radio.co/saed08c46d/listen",":/radiologos/radiologos/radio-buh.png"],
"Afk max" : ["http://stream.afkmax.de/afkmax-hq.mp3",":/radiologos/radiologos/afkmax.png"],
"Jazztime Nürnberg" : ["http://webradio.radiof.de:8000/radiof",":/radiologos/radiologos/jazztime_nbg.png"],
"Allzic Blues" : ["http://allzic09.ice.infomaniak.ch/allzic09.mp3",":/radiologos/radiologos/allzicblues.png"],
"Allzic Jazz & Soul": ["http://jazzradio.ice.infomaniak.ch/jazzradio-high.mp3",":/radiologos/radiologos/allzicjazzsoul.png"]
}
if(__name__ == "__main__"):
for key, value in radioStations.items():
print(key, value[0], value[1])
|
24,856 | 846c48d97063bd1ecb3551ae918768a2c58250ac | #!/usr/bin/env python
#coding:utf-8
"""
Purpose:
爬取 www.xinshubao.net 上的小说
Authors:
Chao -- < chaosimpler@gmail.com >
License:
LGPL clause
Created:
07/24/18
"""
from __future__ import division
import logging
import numpy as np
import pandas as pd
import codecs
import datetime
import urllib2
from bs4 import BeautifulSoup
#----------------------------------------------------------------------
def get_html(url):
"""该函数用于获取指定链接的远程内容,可能是html页面,也可能是json字符串
Arguments :
url (string) : 远程请求地址
Returns :
str_html (string): 字符串;
Note :
"""
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.7 (KHTML, like Gecko) Version/9.1.2 Safari/601.7.7"}
req = urllib2.Request(url,headers=headers)
str_html = urllib2.urlopen(req).read()
return str_html
#----------------------------------------------------------------------
def get_chapter_content(url):
r"""解析指定 URL 的章节内容
Args:
url (string) : 远程请求地址;
Returns:
str_content (string): 指定章节的内容;
"""
from bs4.element import Tag
str_content = ''
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
# 标题内容
str_title = soup.find('h1').string
str_content += '\n\n' + u'【' + str_title + u'】' + '\n\n'
nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print nowTime
print str_title
# 章节具体内容
div = soup.find('div', id = 'content')
contents = div.contents
for c in contents:
if not isinstance(c, Tag):
if c != '\n':
str_content += c
return str_content
#----------------------------------------------------------------------
def append_to_file(str_content, fname):
r"""将指定内容追加到文件中
Args:
str_content (string): 要写入文件中的内容;
fname (string): 文件存储的路径;
"""
with codecs.open(fname, 'a', 'utf-8') as fa:
fa.write(str_content)
fa.write('\n')
#------------------------------------------------------------------------------------------------
def crawl(base_url, start_url, file_name):
""" 爬取指定页面的内容
思路是:
指定一个起始页面,抓取该页面的内容,然后找到“下一章”,继续;
直到“下一章” 对应的超级链接是该小说的章节链接为止;
Args:
base_url (string): 章节 url 的地址;
如:http://www.xinshubao.net/12/12962/
start_url (string): 从哪个具体的章节页面开始爬取; 最后一章的“下一章”链接指向base_url;
如:http://www.xinshubao.net/12/12962/1051402.html
file_name (string): 文件存储的路径;
"""
# 当前页面的内容
str_chapter_content = get_chapter_content(start_url)
append_to_file(str_chapter_content)
# '下一章'的地址
soup = BeautifulSoup(get_html(start_url), 'lxml')
next_url = soup.find('a', text = u'下一章')['href']
while next_url != base_url:
# 当前页面内容
next_url = base_url + next_url
str_chapter_content = get_chapter_content(next_url)
append_to_file(str_chapter_content)
# '下一章'的地址
soup = BeautifulSoup(get_html(next_url), 'lxml')
next_url = soup.find('a', text = u'下一章')['href']
print 'over.'
if __name__ == '__main__':
# 《局中迷》
base_url = 'http://www.xinshubao.net/12/12962/'
# 第一章
start_url = 'http://www.xinshubao.net/12/12962/1051402.html'
# 存储路径
file_name = '/Users/chao/Downloads/JZM.txt'
crawl(base_url, start_url, file_name)
pass |
24,857 | 8f1e5d89b81f972d2abbfbdc34fd9a2230e32591 | # coding=UTF-8
import re
import jieba
import json
from db_operator.item_db import ItemDb
def DelNoneWord(text: str):
none_use_word = []
with open('cfg/none_use_word.json', encoding='utf-8') as f:
none_use_word = json.load(f)
for now_key in none_use_word:
text = text.replace(now_key, '')
return text
def get_key(text: str):
text_group = jieba.lcut(DelNoneWord(text))
result = []
for str in text_group:
if str != '垃圾':
result.append(str)
return result
def ASR(text):
key_group = get_key(text)
result = {}
item_message = ItemDb()
for key in key_group:
search_results = item_message.item_search_exact(key)
if len(search_results) == 0:
search_results = item_message.items_search_vague(key)
result[key] = search_results
item_message.close()
return result
|
24,858 | 786833351ab4f34ed5bc03d057aba61f1abdd718 | import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
# Inputs
fontSize = 25
figSize = [8.5, 6]
lineWidth = 5
ROC_plot = 1
hst_plot = 1
size = 64
ROC_path = 'data/ROC_dataset/myROCs/'
deploy_path = 'data/report_images/'
if int(os.path.isdir(deploy_path)) == 0:
print('Creating directory for training data')
os.mkdir(deploy_path)
# 1) ROC CURVES PLOT:
# Importing variables
x64 = np.load(ROC_path + 'x64.npy')
y64 = np.load(ROC_path + 'y64.npy')
x128 = np.load(ROC_path + 'x128.npy')
y128 = np.load(ROC_path + 'y128.npy')
x256 = np.load(ROC_path + 'x256.npy')
y256 = np.load(ROC_path + 'y256.npy')
# Printing variables
# print(x64)
# Plotting results
if ROC_plot == 1:
plt.figure(num = 1, figsize = figSize)
plt.plot(x64, y64, '-ob', linewidth = lineWidth)
plt.plot(x128, y128, '-or', linewidth = lineWidth)
plt.plot(x256, y256, '-og', linewidth = lineWidth)
plt.xlabel("False Positive Rate", fontsize = fontSize)
plt.ylabel("True Positive Rate", fontsize = fontSize)
plt.legend(['64x64','128x128','256x256'],loc = 'best', fontsize = fontSize)
plt.tick_params(axis = 'both', labelsize = fontSize)
plt.xlim(0, 0.126)
plt.ylim(0, 1)
plt.grid()
plt.tight_layout()
plt.savefig(deploy_path + 'ROC_curves.png')
plt.show()
# 2) HISTORY (LOSS/ACCURACY) PLOT:
history_path = 'data/droneRace/outputWeights/'
if size == 64:
history_name = 'drone_bs2_ep50_im272_size64_history.npy'
elif size == 128:
history_name = 'drone_bs2_ep25_im272_size128_history.npy'
elif size == 256:
history_name = 'drone_bs2_ep15_im272_size256_history.npy'
# Importing variables
history = np.load(history_path + history_name,allow_pickle='TRUE').item()
# Plot training & validation accuracy/loss values
if hst_plot == 1:
plt.figure(num = 2, figsize = figSize)
plt.plot(history['accuracy'], linewidth = lineWidth)
plt.plot(history['val_accuracy'], linewidth = lineWidth)
#plt.title('Model accuracy', fontsize = fontSize+2)
plt.ylabel('Accuracy', fontsize = fontSize)
plt.xlabel('Epoch', fontsize = fontSize)
plt.tick_params(axis = 'both', labelsize = fontSize)
plt.legend(['Train', 'Test'], loc='best', fontsize = fontSize)
plt.grid()
plt.tight_layout()
plt.savefig(deploy_path + 'accuracy_' + str(size) + '.png')
plt.show()
plt.figure(num = 3, figsize = figSize)
plt.plot(history['loss'], linewidth = lineWidth)
plt.plot(history['val_loss'], linewidth = lineWidth)
#plt.title('Model loss', fontsize = fontSize+2)
plt.ylabel('Loss', fontsize = fontSize)
plt.xlabel('Epoch', fontsize = fontSize)
plt.tick_params(axis = 'both', labelsize = fontSize)
plt.legend(['Train', 'Test'], loc='best', fontsize = fontSize)
plt.grid()
plt.tight_layout()
plt.savefig(deploy_path + 'loss_' + str(size) + '.png')
plt.show() |
24,859 | c005f6d8855e7083e921533909d9a3fe314d3713 | import json
from django.http import HttpResponse
from django_swagger_utils.drf_server.utils.decorator.interface_decorator \
import validate_decorator
from .validator_class import ValidatorClass
from content_management_portal.interactors.testcase_deletion_interactor \
import CaseDeletionInteractor
from content_management_portal.storages.testcase_storage_implementation \
import CaseStorageImplementation
@validate_decorator(validator_class=ValidatorClass)
def api_wrapper(*args, **kwargs):
question_id = kwargs['question_id']
testcase_id = kwargs['testcase_id']
storage = CaseStorageImplementation()
interactor = CaseDeletionInteractor(storage=storage)
response = interactor.testcase_deletion(question_id=question_id, \
testcase_id=testcase_id)
json_data = json.dumps(response)
return HttpResponse(json_data, status=201)
|
24,860 | 621bcd5cab093f0c4e5cde4e8d796da501a7a400 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import base64
from cryptography.fernet import Fernet
from pathlib_mate import Path
from . import fingerprint
from . import py23
from .cipher import BaseCipher
from .exc import PasswordError
if py23.is_py2:
input = raw_input
HOME_DIR = Path.home()
WINDTALKER_CONFIG_FILE = Path(HOME_DIR, ".windtalker")
def read_windtalker_password(): # pragma: no cover
return WINDTALKER_CONFIG_FILE.read_text(encoding="utf-8").strip()
class SymmetricCipher(BaseCipher):
"""
A symmetric encryption algorithm utility class helps you easily
encrypt/decrypt text, files and even a directory.
:param password: The secret password you use to encrypt all your message.
If you feel uncomfortable to put that in your code, you can leave it
empty. The system will ask you manually enter that later.
**中文文档**
对称加密器。
"""
_encrypt_chunk_size = 1024 * 1024 # 1 MB
_decrypt_chunk_size = 1398200 # 1.398 MB
"""Symmtric algorithm needs to break big files in small chunk, and encrypt
them one by one, and concatenate them at the end. Each chunk has a fixed
size. That's what these two attributes for.
"""
def __init__(self, password=None):
if password:
fernet_key = self.any_text_to_fernet_key(password)
self.fernet = Fernet(fernet_key) # type: Fernet
else: # pragma: no cover
if WINDTALKER_CONFIG_FILE.exists():
self.set_password(read_windtalker_password())
else:
self.input_password()
def any_text_to_fernet_key(self, text):
"""
Convert any text to a fernet key for encryption.
:type text: str
:rtype: bytes
"""
md5 = fingerprint.fingerprint.of_text(text)
fernet_key = base64.b64encode(md5.encode("utf-8"))
return fernet_key
def input_password(self): # pragma: no cover
"""
Manually enter a password for encryption on keyboard.
"""
password = input("Please enter your secret key (case sensitive): ")
self.set_password(password)
def set_password(self, password):
"""
Set a new password for encryption.
"""
self.__init__(password=password)
def set_encrypt_chunk_size(self, size):
if 1024 * 1024 < size < 100 * 1024 * 1024:
self._encrypt_chunk_size = size
self._decrypt_chunk_size = len(self.encrypt(b"x" * size))
else:
print("encrypt chunk size has to be between 1MB and 100MB")
@property
def metadata(self):
return {
"_encrypt_chunk_size": self._encrypt_chunk_size,
"_decrypt_chunk_size": self._decrypt_chunk_size,
}
def encrypt(self, binary, *args, **kwargs):
"""
Encrypt binary data.
:type binary: bytes
:rtype: bytes
"""
return self.fernet.encrypt(binary)
def decrypt(self, binary, *args, **kwargs):
"""
Decrypt binary data.
:type binary: bytes
:rtype: bytes
"""
try:
return self.fernet.decrypt(binary)
except:
raise PasswordError("Ops, wrong magic word!")
|
24,861 | 2dd740313d2cdde5afe261daf4f4be386bacf131 | #!/usr/bin/env python3
import math, re
from temp_args import temp_parse_arguments
import pandas as pd
from matplotlib import pyplot
from pandas.plotting import lag_plot, autocorrelation_plot
args = temp_parse_arguments()
series = pd.read_csv('temperature.tsv', header=0,
sep='\t', squeeze=True, index_col=0)
series.index = pd.to_datetime(series.index, dayfirst=True)
x_min = min(series)
x_max = max(series)
print(series.head())
key = None
if args.std:
key = 'std'
series.plot()
elif args.scatter:
key = 'scatter'
series.plot(style='k.')
elif args.hist:
key = 'hist'
pyplot.xlim(math.ceil(x_min),math.ceil(x_max))
series.hist()
elif args.lag:
key = 'lag'
lag_plot(series)
elif args.acor:
key= 'acor'
autocorrelation_plot(series)
elif args.hsmth:
key = 'hsmth'
pyplot.xlim(math.ceil(x_min),math.ceil(x_max))
series.plot.kde()
elif args.boxp or args.ysub or args.heat:
years = pd.pivot(columns=series.index.year,
index=series.index.dayofyear,
values=series)
years = years.drop(366)
if args.boxp:
key = 'boxp'
years.boxplot()
elif args.ysub:
key = 'ysub'
years.plot(subplots=True, legend=False)
else:
key = 'heat'
years = years.T
pyplot.matshow(years, interpolation=None, aspect='auto')
elif args.year:
key = 'year_{}'.format(args.year)
one_year = series[str(args.year)]
months = pd.pivot(columns=one_year.index.month,
index=one_year.index.day,
values=one_year)
months.boxplot()
else:
assert False
assert key
print('key={}'.format(key))
pyplot.savefig('temp_pd.pdf')
|
24,862 | e846487e10f15aee420e76e72586f519f9c17ba3 | # "+"演算子でリストを結合する
print([1, 2, 3] + [4, 5, 6])
a = [1, 2, 3]
b = [4, 5, 6]
print(a + b)
c = a + b
print(c)
a.append(b)
print(a)
# a = a + b
a += b
print(a) |
24,863 | 509f4f8884f7fa61e901958b927375a920615638 | import MySQLdb
import json
import urlparse
def application(environ, start_response):
#rows_count = cursor.execute("SELECT club_id, clubname FROM Club WHERE clubname LIKE %%%s%% LIMIT %s, %s;", (query, int(startIndex), int(groupSize), ))
# try catch block attempts to connect to a db
try:
cnx = MySQLdb.connect(user="abarson_admin", passwd="dmnFKw6KSiW9",host="webdb.uvm.edu",db="ABARSON_TEST")
except MySQLdb.Error, err:
if not err:
err = "no data available"
# use of the start_response function to send text/html data about an error
start_response("500 database error", [('Content_Type','application/json')])
# the text/html payload
return "Could not connect to database"
if environ["REQUEST_METHOD"]== 'POST':
try:
request_body_size = int(environ.get('CONTENT_LENGTH'))
request_body = environ['wsgi.input'].read(request_body_size)
j = json.loads(request_body)
username = j['username']
clubname = j['clubname']
message = j['message']
except:
start_response("400 argument error", [('Content_Type','application/json')])
return json.dumps({"success": False, "message": "parameters missing j = "})
cursor = cnx.cursor()
sql = "SELECT Admins.club_id FROM Admins INNER JOIN Club ON Admins.club_id=Club.club_id WHERE Admins.username = %s AND Club.clubname = %s;"
rows_count = cursor.execute(sql, (username, clubname))
if rows_count == 0:
start_response("400 argument error", [('Content_Type','application/json')])
return json.dumps({"success": False, "message": username + ' ' + clubname})
else:
club_id = cursor.fetchall()[0][0]
messageBack = "Database failed to add notification"
success = "blurb"
sql = "INSERT INTO Notification (club_id, post_body) VALUES (%s, %s);"
cursor.execute(sql, (club_id, message))
cnx.commit()
cnx.close()
start_response("400 argument error", [('Content_Type','application/json')])
return json.dumps({"success": True, "message": "Message inserted"})
start_response("400 error",[('Content-Type','text/html')])
return ""
#IMPORTANT!!!! set the request_handler to your response function to get the script to work on silk
request_handler = application
|
24,864 | 20686e7f2710c99528db800d716ae17a32a2ba64 | var = float(input("quantos litros de frutas? "))
#calculo da doação a ser feita
x = var/3
print(round(x, 3)) |
24,865 | e14447ffeb650480e5b69297b9c2f71f927bca21 | #!/usr/bin/env python3
import re
import collections
from operator import itemgetter
import arrow
import requests
import itertools
from .lib import zonekey, web, IN
from .lib.validation import validate
from logging import getLogger
SLDCGUJ_URL = 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php'
station_map = {
"coal": ["Ukai(1-5)+Ukai6",
"Wanakbori",
"Gandhinagar",
"Sikka(3-4)",
"KLTPS(1-3)+KLTPS4",
"SLPP(I+II)",
"Akrimota",
"TPAECo",
"EPGL(I+II)",
"Adani(I+II+III)",
"BECL(I+II)",
"CGPL"],
"hydro": ["Ukai(Hydro)",
"Kadana(Hydro)",
"SSP(RBPH)"],
"gas": ["Utran(Gas)(II)",
"Dhuvaran(Gas)(I)+(II)+(III)",
"GIPCL(I)+(II)",
"GSEG(I+II)",
"GPPC",
"CLPI",
"KAWAS",
"Sugen+Unosgn",
"JHANOR"],
"nuclear": ["KAPP"]
}
def split_and_sum(expression):
"""
Avoid using literal_eval for simple addition expressions.
Returns sum of all positive numbers.
"""
split_vals = expression.split('+')
float_vals = [float(v) for v in split_vals]
total = sum([v for v in float_vals if v > 0.0])
return total
def fetch_data(zone_key, session=None, logger=None):
session = session or requests.session()
values = collections.Counter()
zonekey.assert_zone_key(zone_key, 'IN-GJ')
cookies_params = {
'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',
'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'
}
soup = web.get_response_soup(zone_key, SLDCGUJ_URL, session)
rows = soup.find_all('tr')
cells = [c.text.strip() for c in soup.find_all('td')]
# get wind and solar values
values['date'] = arrow.get(cells[1], 'D-MM-YYYY H:mm:ss').replace(
tzinfo='Asia/Kolkata')
[wind_solar_index] = [i for i, c in enumerate(cells) if c == '(Wind+Solar) Generation']
value = cells[wind_solar_index + 1]
values['wind'], values['solar'] = [int(v) for v in value.split(' + ')]
# get other production values
for row in rows:
elements = row.find_all('td')
if len(elements) > 3: # will find production rows
v1, v2 = (re.sub(r'\s+', r'', x.text)
for x in itemgetter(*[0, 3])(elements))
energy_type = [k for k, v in station_map.items() if v1 in v]
if len(energy_type) > 0:
v2 = split_and_sum(v2)
values[energy_type[0]] += v2
else:
if 'StationName' in (v1, v2): # meta data row
continue
elif 'DSMRate' in v2: # demand side management
continue
else:
try:
logger.warning(
'Unknown fuel for station name: {}'.format(v1),
extra={'key': zone_key})
v2 = split_and_sum(v2)
values['unknown'] += v2
except ValueError as e:
# handle float failures
logger.warning(
"couldn't convert {} to float".format(v2),
extra={'key': zone_key})
continue
elif len(elements) == 3: # will find consumption row
v1, v2 = (re.sub(r'\s+', r'', x.text)
for x in itemgetter(*[0, 2])(elements))
if v1 == 'GujaratCatered':
values['total consumption'] = split_and_sum(v2.split('MW')[0])
elif len(elements) == 1:
# CGPL/KAPP/KAWAS/JHANOR plants have a different html structure.
plant_name = re.sub(r'\s+', r'', elements[0].text)
known_plants = itertools.chain.from_iterable(station_map.values())
if plant_name in known_plants:
energy_type = [k for k, v in station_map.items() if plant_name in v][0]
generation_tag = row.find_all_next("td")[3]
val = float(re.sub(r'\s+', r'', generation_tag.text))
if val > 0:
values[energy_type] += val
else:
if plant_name and plant_name != 'GMR':
# GMR is outside Gujarat, sometimes plant_name is ''
logger.warning(
'Unknown fuel for station name: {}'.format(plant_name),
extra={'key': zone_key})
return values
def fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,
logger=getLogger('IN-GJ')):
"""
Requests the last known production mix (in MW) of a given country
Arguments:
zone_key: specifies which zone to get
session: request session passed in order to re-use an existing session
target_datetime: the datetime for which we want production data. If not provided, we should
default it to now. The provided target_datetime is timezone-aware in UTC.
logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
Return:
A list of dictionaries in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
session = session or requests.session()
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
value_map = fetch_data(zone_key, session, logger=logger)
data = {
'zoneKey': zone_key,
'datetime': value_map['date'].datetime,
'production': {
'biomass': None,
'coal': value_map.get('coal', 0),
'gas': value_map.get('gas', 0),
'hydro': value_map.get('hydro', 0),
'nuclear': value_map.get('nuclear', 0),
'oil': None,
'solar': value_map.get('solar', 0),
'wind': value_map.get('wind', 0),
'geothermal': None,
'unknown': value_map.get('unknown', 0)
},
'storage': {
'hydro': None
},
'source': 'sldcguj.com',
}
valid_data = validate(data, logger, remove_negative=True, floor=7000)
return valid_data
def fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,
logger=getLogger('IN-GJ')):
"""
Method to get consumption data of Gujarat
:param zone_key:
:param session:
:return:
"""
session = session or requests.session()
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
value_map = fetch_data(zone_key, session, logger=logger)
data = {
'zoneKey': zone_key,
'datetime': value_map['date'].datetime,
'consumption': value_map['total consumption'],
'source': 'sldcguj.com'
}
return data
if __name__ == '__main__':
session = requests.Session()
print(fetch_production('IN-GJ', session))
print(fetch_consumption('IN-GJ', session))
|
24,866 | b5bc97d7e5bb92002cb22a3edf9919b15bd4e153 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.tests.helper import remote_data
from numpy.testing import assert_allclose
from ..fetch import fetch_tiles
from ..survey import HipsSurveyProperties
from ..tile import HipsTileMeta
TILE_FETCH_TEST_CASES = [
dict(
tile_indices=[69623, 69627, 69628, 69629, 69630, 69631],
tile_format='fits',
order=7,
url='http://alasky.unistra.fr/DSS/DSS2Merged/properties',
progress_bar=True,
data=[2101, 1945, 1828, 1871, 2079, 2336],
fetch_package='urllib',
),
dict(
tile_indices=[69623, 69627, 69628, 69629, 69630, 69631],
tile_format='fits',
order=7,
url='http://alasky.unistra.fr/DSS/DSS2Merged/properties',
progress_bar=True,
data=[2101, 1945, 1828, 1871, 2079, 2336],
fetch_package='aiohttp',
),
]
def make_tile_metas(hips_survey, pars):
for healpix_pixel_index in pars['tile_indices']:
yield HipsTileMeta(
order=pars['order'],
ipix=healpix_pixel_index,
frame=hips_survey.astropy_frame,
file_format=pars['tile_format'],
)
@pytest.mark.parametrize('pars', TILE_FETCH_TEST_CASES)
@remote_data
def test_fetch_tiles(pars):
hips_survey = HipsSurveyProperties.fetch(pars['url'])
tile_metas = list(make_tile_metas(hips_survey, pars))
tiles = fetch_tiles(
tile_metas, hips_survey,
progress_bar=pars['progress_bar'],
fetch_package=pars['fetch_package'],
)
for idx, val in enumerate(pars['data']):
assert_allclose(tiles[idx].data[0][5], val)
|
24,867 | ff1d10933466834ff71e5f24f91f0f8678539a5f | from psychopy import gui, core, data
import numpy, random, os, serial, pygame
from math import *
from tap_arduino import *
fingerName = ['thumb','index','middle','ring','pinky']
# -- get input from experimenter --
exptInfo = {'01. Participant Code':'000',
'02. Test number (0 for practice)':1,
'03. Dominant hand':'right',
'04. Hand pose':'hands square',
'05. No. trials per finger':5,
'06. Right fingers to use (1 thumb - 5 pinky)':'1,2,3,4,5',
'07. Left fingers to use (1 thumb - 5 pinky)':'',
'08. Provide feedback':True,
'09. Folder for saving data':'TAP-data',
'10. Sampling time (ms)':2500,
'11. Tap debounce (ms)':50,
'12. Motor activation duration (ms)':100,
'13. Motor intensity (0 - 255)':255,
'14. Accelerometer range (2,4,8,16G)':16,
'15. Tap threshold (0 - 255)':100,
'16. Max tap duration (0 - 159 ms)':150,
'17. Right arduino serial port':'COM3',
'18. Left arduino serial port':'COM5',
'19. Serial baud rate':9600,
'20. Serial timeout (sec)':0.05,
'21. Print arduino messages':False}
exptInfo['22. Date and time']= data.getDateStr(format='%Y-%m-%d_%H-%M-%S') ##add the current time
dlg = gui.DlgFromDict(exptInfo, title='Experiment details',
fixed=['22. Date and time'])
if dlg.OK:
pass
else:
core.quit() ## the user hit cancel so exit
try:
rightToUse = [int(i)-1 for i in exptInfo['06. Right fingers to use (1 thumb - 5 pinky)'].split(',')]
except:
rightToUse = []
try:
leftToUse = [int(i)-1 for i in exptInfo['07. Left fingers to use (1 thumb - 5 pinky)'].split(',')]
except:
leftToUse = []
handsToUse = []
arduinoPort = {}
if len(rightToUse) > 0:
handsToUse.append('right')
arduinoPort['right'] = exptInfo['17. Right arduino serial port']
if len(leftToUse) > 0:
handsToUse.append('left')
arduinoPort['left'] = exptInfo['18. Left arduino serial port']
if len(handsToUse) == 0:
core.quit('You must use at least one motor')
# ----
# -- make folder/files to save data --
if exptInfo['02. Test number (0 for practice)'] > 0:
dataFolder = './'+exptInfo['09. Folder for saving data']+'/'
if not os.path.exists(dataFolder):
os.makedirs(dataFolder)
fileName = dataFolder + exptInfo['22. Date and time']+'_'+ exptInfo['01. Participant Code']
infoFile = open(fileName+'_info.csv', 'w')
for k,v in exptInfo.iteritems(): infoFile.write(k + ',' + str(v) + '\n')
infoFile.close()
accelDataFile = open(fileName+'_accleerometer-data.csv', 'w')
trialDataFile = open(fileName+'_trial-data.csv', 'w')
accelDataFile.write('trialNumber,time,x,y,z\n')
trialDataFile.write('trialNumber,cued-hand,cued-finger,correct,tap-1-ms,tap-1-finger,tap-2-ms,tap-2-finger,tap-3-ms,tap-3-finger\n')
# ----
# -- setup experiment randomisation --
stimList = []
for finger in rightToUse:
stimList.append({'hand':'right','finger':finger})
for finger in leftToUse:
stimList.append({'hand':'left','finger':finger})
trials = data.TrialHandler(stimList,exptInfo['05. No. trials per finger'])
trials.data.addDataType('correct')
# ----
# -- setup feedback --
if exptInfo['08. Provide feedback']:
#pygame.mixer.pre_init()
#pygame.mixer.init()
#sounds = [pygame.mixer.Sound('incorrect.wav'),pygame.mixer.Sound('correct.wav')]
feedbackText = ['INCORRECT','CORRECT']
# ----
# -- make serial connection to arduino --
arduino = {}
for h in handsToUse:
arduino[h] = serial.Serial(arduinoPort[h],
exptInfo['19. Serial baud rate'],
timeout=exptInfo['20. Serial timeout (sec)'])
print(h+' ')
ping(arduino[h],exptInfo['21. Print arduino messages'])
# --
# -- data recording settings --
for h in handsToUse:
sampling_time(arduino[h], exptInfo['10. Sampling time (ms)'],
exptInfo['21. Print arduino messages'])
tap_debounce(arduino[h], exptInfo['11. Tap debounce (ms)'],
exptInfo['21. Print arduino messages'])
#--
# -- motor settings --
for h in handsToUse:
motor_duration(arduino[h], exptInfo['12. Motor activation duration (ms)'],
exptInfo['21. Print arduino messages'])
motor_intensity(arduino[h], exptInfo['13. Motor intensity (0 - 255)'],
exptInfo['21. Print arduino messages'])
#--
# -- accelerometer settings --
for h in handsToUse:
accel_range(arduino[h], exptInfo['14. Accelerometer range (2,4,8,16G)'],
exptInfo['21. Print arduino messages'])
accel_threshold(arduino[h], exptInfo['15. Tap threshold (0 - 255)'],
exptInfo['21. Print arduino messages'])
accel_duration(arduino[h], exptInfo['16. Max tap duration (0 - 159 ms)'],
exptInfo['21. Print arduino messages'])
#--
# -- setup accelerometers --
for finger in leftToUse:
setup_accel(arduino['left'], finger, exptInfo['21. Print arduino messages'])
for finger in rightToUse:
setup_accel(arduino['right'], finger, exptInfo['21. Print arduino messages'])
# --
# -- run the experiment --
correctCount = 0
trialNum = 0
for thisTrial in trials:
print('\nTap cued on {} {}' .format(
thisTrial['hand'],
fingerName[thisTrial['finger']]))
## cue for a tap
tapResults = tap(arduino[thisTrial['hand']],
thisTrial['finger'],
exptInfo['21. Print arduino messages'])
correct = int(fingerName[thisTrial['finger']] == tapResults['firstThreeTapFingers'][0])
trials.data.add('correct',correct)
## provide feedback
if exptInfo['08. Provide feedback']:
#feedbackSound = sounds[correct]
#ch = feedbackSound.play()
print(feedbackText[correct])
print('Participant tapped {} finger.' .format(tapResults['firstThreeTapFingers'][0]))
#while ch.get_busy():
# pass
print('Reaction time {} ms' .format(tapResults['firstThreeTapTimes'][0]))
## record the data if not practice
if exptInfo['02. Test number (0 for practice)'] > 0:
thisAccel = tapResults['accelData'].transpose()
for row in range(len(thisAccel)):
accelDataFile.write('{},{},{},{},{}\n' .format(
trialNum+1,
thisAccel[row][0],
thisAccel[row][1],
thisAccel[row][2],
thisAccel[row][3]))
trialDataFile.write('{},{},{},{},{},{},{},{},{},{}\n' .format(
trialNum+1,
thisTrial['hand'],
fingerName[thisTrial['finger']],
correct,
tapResults['firstThreeTapTimes'][0],
tapResults['firstThreeTapFingers'][0],
tapResults['firstThreeTapTimes'][1],
tapResults['firstThreeTapFingers'][1],
tapResults['firstThreeTapTimes'][2],
tapResults['firstThreeTapFingers'][2]))
trialNum += 1
print('{} of {} trials complete\n' .format(trialNum, trials.nTotal))
print('\n=== EXPERIMENT FINISHED ===\n')
## save data to file
if exptInfo['02. Test number (0 for practice)'] > 0:
accelDataFile.close()
trialDataFile.close()
print('Data saved {}\n\n' .format(fileName))
else:
print('Practice only, no data saved.')
|
24,868 | ee8cca50cdeaf5baeeba168903d52fe1f0e49d68 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
from aiocouchdb.tests import utils
from aiocouchdb.tests.utils import (
modify_server,
populate_database,
run_for,
skip_for,
using_database,
uuid,
with_fixed_admin_party,
TestCase
)
from .. import attachment
from .. import database
from .. import designdoc
from .. import document
from .. import server
class ServerTestCase(utils.ServerTestCase):
server_class = server.Server
class DatabaseTestCase(ServerTestCase, utils.DatabaseTestCase):
database_class = database.Database
class DocumentTestCase(DatabaseTestCase, utils.DocumentTestCase):
document_class = document.Document
class DesignDocumentTestCase(DatabaseTestCase, utils.DesignDocumentTestCase):
designdoc_class = designdoc.DesignDocument
class AttachmentTestCase(DocumentTestCase, utils.AttachmentTestCase):
attachment_class = attachment.Attachment
|
24,869 | e5d1feaa7289fe91f912809a81f7902c4c230288 | #Python Program to find the area of triangle
a = float(input("Enter the first number"))
b = float(input("Enter the second number"))
c = float(input("Enter the third number"))
#Calculate the semi-perimeter
s = (a+b+c)/2
#Calculate the area
Area = (s*(s-a)*(s-b)*(s-c))**0.5
print("The area of triangle is 0.2f" %Area)
|
24,870 | c20b669710cdbe350ce9dd5c12d83006c6277649 | #
# PySNMP MIB module AcPerfMediaGateway (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AcPerfMediaGateway
# Produced by pysmi-0.3.4 at Mon Apr 29 17:17:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, NotificationType, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, IpAddress, Counter32, Counter64, Bits, Gauge32, Integer32, Unsigned32, enterprises, iso = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "NotificationType", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "IpAddress", "Counter32", "Counter64", "Bits", "Gauge32", "Integer32", "Unsigned32", "enterprises", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
audioCodes = MibIdentifier((1, 3, 6, 1, 4, 1, 5003))
acRegistrations = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 7))
acGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 8))
acProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 9))
acPerformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10))
acPerfMediaGateway = ModuleIdentity((1, 3, 6, 1, 4, 1, 5003, 10, 1))
acPerfMediaGateway.setRevisions(('2003-11-20 00:00',))
if mibBuilder.loadTexts: acPerfMediaGateway.setLastUpdated('200407121502Z')
if mibBuilder.loadTexts: acPerfMediaGateway.setOrganization('AudioCodes Ltd')
acPerfCp = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1))
acPerfCpNumDupsForCompletedTransactions = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpNumDupsForCompletedTransactions.setStatus('deprecated')
acPerfCpNumDupsForOutstandingTransactions = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpNumDupsForOutstandingTransactions.setStatus('deprecated')
acPerfCpMessageSendSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageSendSuccesses.setStatus('deprecated')
acPerfCpMessageSendErrors = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageSendErrors.setStatus('deprecated')
acPerfCpMessageReceiveSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageReceiveSuccesses.setStatus('deprecated')
acPerfCpMessageReceiveErrors = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageReceiveErrors.setStatus('deprecated')
acPerfCpProtocolSyntaxErrors = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpProtocolSyntaxErrors.setStatus('deprecated')
acPerfCpMessageRetransmissions = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageRetransmissions.setStatus('deprecated')
acPerfCpMessageMaxRetransmissionsExceeded = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageMaxRetransmissionsExceeded.setStatus('deprecated')
acPerfCpMessagesFromUntrustedSources = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessagesFromUntrustedSources.setStatus('deprecated')
acPerfRtp = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2))
acPerfRtpSenderPackets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSenderPackets.setStatus('deprecated')
acPerfRtpSenderOctets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSenderOctets.setStatus('deprecated')
acPerfRtpReceiverPackets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpReceiverPackets.setStatus('deprecated')
acPerfRtpReceiverOctets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpReceiverOctets.setStatus('deprecated')
acPerfRtpRcvrLostPackets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpRcvrLostPackets.setStatus('deprecated')
acPerfRtpFailedDueToLackOfResources = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpFailedDueToLackOfResources.setStatus('deprecated')
acPerfRtpSimplexInSessionsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexInSessionsTotal.setStatus('deprecated')
acPerfRtpSimplexInSessionsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexInSessionsCurrent.setStatus('deprecated')
acPerfRtpSimplexOutSessionsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexOutSessionsTotal.setStatus('deprecated')
acPerfRtpSimplexOutSessionsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexOutSessionsCurrent.setStatus('deprecated')
acPerfRtpDuplexSessionsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpDuplexSessionsTotal.setStatus('deprecated')
acPerfRtpDuplexSessionsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpDuplexSessionsCurrent.setStatus('deprecated')
acPerfSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10, 1, 3))
acPerfSystemPacketEndpoints = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 3, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfSystemPacketEndpoints.setStatus('deprecated')
acPerfSystemPacketEndpointsInUse = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 3, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfSystemPacketEndpointsInUse.setStatus('deprecated')
mibBuilder.exportSymbols("AcPerfMediaGateway", acPerfRtpDuplexSessionsCurrent=acPerfRtpDuplexSessionsCurrent, PYSNMP_MODULE_ID=acPerfMediaGateway, acProducts=acProducts, acPerfCpNumDupsForOutstandingTransactions=acPerfCpNumDupsForOutstandingTransactions, acPerfCpNumDupsForCompletedTransactions=acPerfCpNumDupsForCompletedTransactions, acPerfCp=acPerfCp, acPerfRtpFailedDueToLackOfResources=acPerfRtpFailedDueToLackOfResources, acRegistrations=acRegistrations, acPerfSystemPacketEndpoints=acPerfSystemPacketEndpoints, acPerfRtpSimplexInSessionsTotal=acPerfRtpSimplexInSessionsTotal, acPerfSystem=acPerfSystem, acPerfRtpSimplexInSessionsCurrent=acPerfRtpSimplexInSessionsCurrent, acPerfRtpSimplexOutSessionsCurrent=acPerfRtpSimplexOutSessionsCurrent, acPerfSystemPacketEndpointsInUse=acPerfSystemPacketEndpointsInUse, acPerfRtpReceiverOctets=acPerfRtpReceiverOctets, acPerfCpMessageReceiveErrors=acPerfCpMessageReceiveErrors, acPerfRtpRcvrLostPackets=acPerfRtpRcvrLostPackets, acPerfCpMessageSendErrors=acPerfCpMessageSendErrors, acPerfCpMessagesFromUntrustedSources=acPerfCpMessagesFromUntrustedSources, acPerformance=acPerformance, acPerfCpMessageRetransmissions=acPerfCpMessageRetransmissions, acPerfRtpSenderOctets=acPerfRtpSenderOctets, acPerfCpProtocolSyntaxErrors=acPerfCpProtocolSyntaxErrors, acPerfRtpReceiverPackets=acPerfRtpReceiverPackets, acPerfRtpDuplexSessionsTotal=acPerfRtpDuplexSessionsTotal, acPerfRtpSenderPackets=acPerfRtpSenderPackets, acPerfCpMessageReceiveSuccesses=acPerfCpMessageReceiveSuccesses, acGeneric=acGeneric, acPerfRtpSimplexOutSessionsTotal=acPerfRtpSimplexOutSessionsTotal, audioCodes=audioCodes, acPerfMediaGateway=acPerfMediaGateway, acPerfRtp=acPerfRtp, acPerfCpMessageSendSuccesses=acPerfCpMessageSendSuccesses, acPerfCpMessageMaxRetransmissionsExceeded=acPerfCpMessageMaxRetransmissionsExceeded)
|
24,871 | 91b862174f97bfb205084ebcda4a0ad5bfa9372d | # Generated by Django 3.1.6 on 2021-03-25 04:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BettingApp', '0014_auto_20210325_1021'),
]
operations = [
migrations.AddField(
model_name='mybet',
name='toWin',
field=models.FloatField(default=0),
),
]
|
24,872 | 37a0b899582b9a25ceebd4552fac9e772e85947c | # Generated by Django 3.0.2 on 2020-01-09 18:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0002_auto_20200107_0540'),
]
operations = [
migrations.CreateModel(
name='Wait',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=20)),
('player_count', models.PositiveIntegerField(default=0)),
],
),
]
|
24,873 | 4ccfc82fb9b806143b65407917bf8c57a628d9fa | from django.db import models
# Create your models here.
class Cell(models.Model):
cell_name = models.CharField(max_length = 30)
is_blocked = models.BooleanField(default = False)
sector = models.IntegerField(default = 0)
row = models.IntegerField(default = 0)
def __unicode__(self):
return self.cell_name
class Object(models.Model):
object_name = models.CharField(max_length = 30)
cell = models.ForeignKey(Cell)
owner = models.CharField(max_length = 30, default = "admin")
def __unicode__(self):
return self.object_name
|
24,874 | 8b50d103e07fb02adc3309613ff2dc87e76ed8d1 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import pyexcel
from collections import OrderedDict
url = "http://s.cafef.vn/bao-cao-tai-chinh/VNM/IncSta/2017/3/0/0/ket-qua-hoat-dong-kinh-doanh-cong-ty-co-phan-sua-viet-nam.chn"
conn = urlopen(url)
raw_data = conn.read()
webpage_text = raw_data.decode("utf-8")
# f = open("VNM.html", "wb")
# f.write(raw_data)
# f.close()
soup = BeautifulSoup(webpage_text,"html.parser")
#Lọc các tiêu đề các cột trong bảng
table1 = soup.find('table',id="tblGridData")
title_lists = [""]
td1_lists = table1.find_all("td","h_t")
for td in td1_lists:
title = td.string.replace("\r\n ","")
title_lists.append(title)
# Lấy các thông số còn lại:
table2 = soup.find('table',id="tableContent")
x = table2.find('tr')
|
24,875 | ea85a791c48e3459ae3fe4152f9750cd0a1211c4 | import json
from rest_framework import status
from callblocker.blocker import services
from callblocker.blocker.services import bootstrap
from callblocker.core.service import Service, ServiceStatus, ServiceState
from callblocker.core.servicegroup import ServiceGroupSpec
class FlippinService(Service):
def __init__(self, name):
self.state = ServiceState.INITIAL
self._name = name
self.exception = None
self.traceback = None
def start(self) -> 'Service':
self.state = ServiceState.READY
return self
def sync_start(self, timeout=None):
self.start()
def stop(self) -> 'Service':
self.state = ServiceState.TERMINATED
return self
def sync_stop(self, timeout=None):
self.stop()
def status(self) -> ServiceStatus:
return ServiceStatus(
self.state,
self.exception,
self.traceback
)
@property
def name(self) -> str:
return self._name
def test_provides_correct_service_status(api_client):
bootstrap_spec(
spec=ServiceGroupSpec(
fp1=lambda _: FlippinService('FlippingService 1'),
fp2=lambda _: FlippinService('FlippingService 2'),
fp3=lambda _: FlippinService('FlippingService 3')
)
)
summary = api_client.get('/api/services/').json()
for i, element in enumerate(summary, start=1):
assert element['id'] == f'fp{i}'
assert element['name'] == f'FlippingService {i}'
assert element['status']['state'] == 'READY'
fp1 = services.services().fp1
fp1.state = ServiceState.ERRORED
fp1.exception = EOFError('Ooops!')
fp1.traceback = ['Ooops']
fp1_summary = api_client.get('/api/services/fp1/').json()
assert fp1_summary['id'] == 'fp1'
assert fp1_summary['status']['state'] == 'ERRORED'
assert fp1_summary['status']['exception'] == 'EOFError: Ooops!'
assert fp1_summary['status']['traceback'] == ['Ooops']
def test_starts_stops_service(api_client):
bootstrap_spec(
ServiceGroupSpec(
fp1=lambda _: FlippinService('FlippingService 1')
)
)
assert api_client.get('/api/services/fp1/').json()['status']['state'] == 'READY'
for target in ['TERMINATED', 'READY']:
result = api_client.patch(
'/api/services/fp1/',
data=json.dumps({
'status': {'state': target}
}),
content_type='application/json'
)
assert result.status_code == status.HTTP_202_ACCEPTED
assert services.services().fp1.status().state == ServiceState[target]
def test_raises_400_on_malformed_request(api_client):
bootstrap_spec(
ServiceGroupSpec(
fp1=lambda _: FlippinService('FlippingService 1')
)
)
assert api_client.patch(
'/api/services/fp1/',
data=json.dumps({
'state': 'TERMINATED'
}),
content_type='application/json'
).status_code == 400
def bootstrap_spec(spec):
# This is hacky, and will improve as I figure out an
# API for it.
# Triggers the bootstrap call in urls.py
from callblocker import urls
# Overrides it with our stuff.
setattr(services, 'custom', spec)
bootstrap('custom').start()
|
24,876 | 782a76c8a1fc8d7d6ebb79c8f99f3580e99dd6ac | import time
import mongoengine as me
from openid.store import nonce
from openid.association import Association
from openid.store.interface import OpenIDStore
class MistAssociation(me.Document):
server_url = me.StringField()
handle = me.StringField()
secret = me.StringField()
issued = me.IntField()
lifetime = me.IntField(min_value=0, default=6 * 6 * 60)
assoc_type = me.StringField()
meta = {'allow_inheritance': False}
def is_expired(self):
return self.lifetime + self.issued < time.time()
class MistNonce(me.Document):
server_url = me.StringField()
timestamp = me.IntField()
salt = me.StringField()
def is_old(self, lifespan=None):
return is_nonce_old(self.timestamp, lifespan)
def is_nonce_old(timestamp, lifespan=None):
if not lifespan:
lifespan = nonce.SKEW
return abs(time.time() - timestamp) > lifespan
class OpenIdMistStore(OpenIDStore):
def storeAssociation(self, server_url, association):
"""
This method will store a MistAssociation object into mongodb after
creating a MistAssociation with the same values as the Association
provided.
Secret will be encoded because it constantly produced errors with
encoding.
"""
mist_association = MistAssociation()
mist_association.assoc_type = association.assoc_type
mist_association.handle = association.handle.hex()
mist_association.secret = association.secret.hex()
mist_association.lifetime = association.lifetime
mist_association.issued = association.issued
mist_association.server_url = server_url
mist_association.save()
def getAssociation(self, server_url, handle=None):
"""
Gets a server url and the handle and finds a matching association that
has not expired. Expired associations are deleted. The association
returned is the one with the most recent issuing timestamp.
"""
query = {'server_url': server_url}
if handle:
query.update({'handle': handle.hex()})
try:
mist_associations = MistAssociation.objects(**query)
except me.DoesNotExist:
mist_associations = []
filtered_mist_assocs = []
for assoc in mist_associations:
if assoc.is_expired():
assoc.delete()
else:
filtered_mist_assocs.append(assoc)
filtered_mist_assocs = sorted(filtered_mist_assocs,
key=lambda assoc: assoc.issued,
reverse=True)
if len(filtered_mist_assocs) > 0:
mist_assoc = filtered_mist_assocs[0]
association = Association(handle=mist_assoc.handle.decode('hex'),
secret=mist_assoc.secret.decode('hex'),
issued=mist_assoc.issued,
lifetime=mist_assoc.lifetime,
assoc_type=mist_assoc.assoc_type)
return association
return None
def removeAssociation(self, server_url, handle):
"""
This method removes the matching association if it's found,
and returns whether the association was removed or not.
"""
try:
mist_associations = MistAssociation.objects(
server_url=server_url, handle=handle.hex())
except me.DoesNotExist:
return False
for assoc in mist_associations:
assoc.delete()
return len(mist_associations) > 0
def useNonce(self, server_url, timestamp, salt):
"""Called when using a nonce.
This method should return C{True} if the nonce has not been
used before, and store it for a while to make sure nobody
tries to use the same value again. If the nonce has already
been used or the timestamp is not current, return C{False}.
You may use L{openid.store.nonce.SKEW} for your timestamp window.
"""
if is_nonce_old(timestamp):
return False
try:
mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,
timestamp=timestamp)
except me.DoesNotExist:
mist_nonces = []
if len(mist_nonces) == 0:
print("Timestamp = %s" % timestamp)
MistNonce(
server_url=server_url, salt=salt, timestamp=timestamp
).save()
return True
return False
def cleanupNonces(self):
"""Remove expired nonces from the store.
Discards any nonce from storage that is old enough that its
timestamp would not pass L{useNonce}.
This method is not called in the normal operation of the
library. It provides a way for store admins to keep
their storage from filling up with expired data.
@return: the number of nonces expired.
@returntype: int
"""
try:
mist_nonces = MistNonce.objects()
except me.DoesNotExist:
mist_nonces = []
counter = 0
for n in mist_nonces:
if n.is_old():
n.delete()
counter += 1
return counter
def cleanupAssociations(self):
"""Remove expired associations from the store.
This method is not called in the normal operation of the
library. It provides a way for store admins to keep
their storage from filling up with expired data.
@return: the number of associations expired.
@returntype: int
"""
try:
mist_associations = MistAssociation.objects()
except me.DoesNotExist:
mist_associations = []
counter = 0
for assoc in mist_associations:
if assoc.is_expired():
assoc.delete()
counter += 1
return counter
def cleanup(self):
"""Shortcut for C{L{cleanupNonces}()}, C{L{cleanupAssociations}()}.
This method is not called in the normal operation of the
library. It provides a way for store admins to keep
their storage from filling up with expired data.
"""
return self.cleanupNonces(), self.cleanupAssociations()
|
24,877 | aab0fca44609f022a29b68211750e2d2b974bf81 | import json
import urllib.request
from urllib.error import URLError
spares_url: str = 'https://job.firstvds.ru/spares.json'
alternatives_url: str = 'https://job.firstvds.ru/alternatives.json'
def get_json(url):
try:
with urllib.request.urlopen(url) as response:
resp = response.read()
except URLError as e:
resp = '{}'
return resp
def get_spares():
spares_res = get_json(spares_url)
return json.loads(spares_res)
def get_alternatives():
alternatives_res = get_json(alternatives_url)
return json.loads(alternatives_res)
# !Предполагаем, что одна запчасть может быть только в одном списке альтернатив.
def get_balances():
stocks = {}
alternatives = get_alternatives()['alternatives']
spares = get_spares()
# Сначала разберём список взаимозаменяемых запчастей
for k, v in alternatives.items():
count = 0
arrive = 0
mustbe = 0
for spare_name in v:
spare = spares.get(spare_name)
if spare is None:
continue
count += spare['count']
arrive += spare['arrive']
mustbe = max(spare['mustbe'], mustbe)
spares.pop(spare_name)
alert = mustbe > count + arrive
stocks[k] = {'count': count, 'mustbe': mustbe, 'arrive': arrive, 'alert': alert}
# А потом допишем к нему незаменяемые
for k, v in spares.items():
count = v['count']
arrive = v['arrive']
mustbe = v['mustbe']
alert = mustbe > count + arrive
stocks[k] = {'count': count, 'mustbe': mustbe, 'arrive': arrive, 'alert': alert}
return stocks
def get_requests():
request_dict = {}
spares = get_spares()
for k, v in spares.items():
count = v['count']
arrive = v['arrive']
mustbe = v['mustbe']
if mustbe > count + arrive:
request_dict[k] = mustbe - (count + arrive)
return request_dict
|
24,878 | 3c4fab48539a5f89d5eb0eab51bc15420d721ae3 | import copy
from collections import OrderedDict
from django.db.models import Subquery
from django.db.models.constants import LOOKUP_SEP
from django_filters import filterset, rest_framework
from django_filters.utils import get_model_field
from . import filters, utils
def related(filterset, filter_name):
"""
Return a related filter_name, using the filterset relationship if present.
"""
if not filterset.relationship:
return filter_name
return LOOKUP_SEP.join([filterset.relationship, filter_name])
class FilterSetMetaclass(filterset.FilterSetMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
new_class.auto_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.AutoFilter)]
new_class.related_filters = [
name for name, f in new_class.declared_filters.items()
if isinstance(f, filters.RelatedFilter)]
# see: :meth:`rest_framework_filters.filters.RelatedFilter.bind`
for name in new_class.related_filters:
new_class.declared_filters[name].bind(new_class)
# If model is defined, process auto filters
if new_class._meta.model is not None:
cls.expand_auto_filters(new_class)
return new_class
@classmethod
def expand_auto_filters(cls, new_class):
"""
Resolve `AutoFilter`s into their per-lookup filters. `AutoFilter`s are
a declarative alternative to the `Meta.fields` dictionary syntax, and
use the same machinery internally.
"""
# get reference to opts/declared filters
orig_meta, orig_declared = new_class._meta, new_class.declared_filters
# override opts/declared filters w/ copies
new_class._meta = copy.deepcopy(new_class._meta)
new_class.declared_filters = new_class.declared_filters.copy()
for name in new_class.auto_filters:
f = new_class.declared_filters[name]
# Remove auto filters from declared_filters so that they *are* overwritten
# RelatedFilter is an exception, and should *not* be overwritten
if not isinstance(f, filters.RelatedFilter):
del new_class.declared_filters[name]
# Use meta.fields to generate auto filters
new_class._meta.fields = {f.field_name: f.lookups or []}
for gen_name, gen_f in new_class.get_filters().items():
# get_filters() generates param names from the model field name
# Replace the field name with the parameter name from the filerset
gen_name = gen_name.replace(f.field_name, name, 1)
new_class.base_filters[gen_name] = gen_f
# restore reference to opts/declared filters
new_class._meta, new_class.declared_filters = orig_meta, orig_declared
class SubsetDisabledMixin:
"""
Used to disable filter subsetting (see: :meth:`FilterSet.disable_subset`).
"""
@classmethod
def get_filter_subset(cls, params, rel=None):
return cls.base_filters
class FilterSet(rest_framework.FilterSet, metaclass=FilterSetMetaclass):
def __init__(self, data=None, queryset=None, *, relationship=None, **kwargs):
self.base_filters = self.get_filter_subset(data or {}, relationship)
super().__init__(data, queryset, **kwargs)
self.relationship = relationship
self.related_filtersets = self.get_related_filtersets()
self.filters = self.get_request_filters()
@classmethod
def get_fields(cls):
fields = super(FilterSet, cls).get_fields()
for name, lookups in fields.items():
if lookups == filters.ALL_LOOKUPS:
field = get_model_field(cls._meta.model, name)
fields[name] = utils.lookups_for_field(field)
return fields
@classmethod
def get_filter_subset(cls, params, rel=None):
"""
Returns the subset of filters that should be initialized by the
FilterSet, dependent on the requested `params`. This helps minimize
the cost of initialization by reducing the number of deepcopy ops.
The `rel` argument is used for related filtersets to strip the param
of its relationship prefix. See `.get_param_filter_name()` for info.
"""
# Determine names of filters from query params and remove empty values.
# param names that traverse relations are translated to just the local
# filter names. eg, `author__username` => `author`. Empty values are
# removed, as they indicate an unknown field eg, author__foobar__isnull
filter_names = {cls.get_param_filter_name(param, rel) for param in params}
filter_names = {f for f in filter_names if f is not None}
return OrderedDict(
(k, v) for k, v in cls.base_filters.items() if k in filter_names
)
@classmethod
def disable_subset(cls, *, depth=0):
"""
Disable filter subsetting, allowing the form to render the filterset.
Note that this decreases performance and should only be used when
rendering a form, such as with DRF's browsable API.
"""
if not issubclass(cls, SubsetDisabledMixin):
cls = type('SubsetDisabled%s' % cls.__name__,
(SubsetDisabledMixin, cls), {})
# recursively disable subset for related filtersets
if depth > 0:
# shallow copy to prevent modifying original `base_filters`
cls.base_filters = cls.base_filters.copy()
# deepcopy RelateFilter to prevent modifying original `.filterset`
for name in cls.related_filters:
f = copy.deepcopy(cls.base_filters[name])
f.filterset = f.filterset.disable_subset(depth=depth - 1)
cls.base_filters[name] = f
return cls
@classmethod
def get_param_filter_name(cls, param, rel=None):
"""
Get the filter name for the request data parameter.
ex::
# regular attribute filters
>>> FilterSet.get_param_filter_name('email')
'email'
# exclusion filters
>>> FilterSet.get_param_filter_name('email!')
'email'
# related filters
>>> FilterSet.get_param_filter_name('author__email')
'author'
# attribute filters based on relationship
>>> FilterSet.get_param_filter_name('author__email', rel='author')
'email'
"""
# check for empty param
if not param:
return param
# strip the rel prefix from the param name.
prefix = '%s%s' % (rel or '', LOOKUP_SEP)
if rel and param.startswith(prefix):
param = param[len(prefix):]
# Attempt to match against filters with lookups first. (username__endswith)
if param in cls.base_filters:
return param
# Attempt to match against exclusion filters
if param[-1] == '!' and param[:-1] in cls.base_filters:
return param[:-1]
# Match against relationships. (author__username__endswith).
# Preference more specific filters. eg, `note__author` over `note`.
for name in reversed(sorted(cls.related_filters)):
# we need to match against '__' to prevent eager matching against
# like names. eg, note vs note2. Exact matches are handled above.
if param.startswith("%s%s" % (name, LOOKUP_SEP)):
return name
def get_request_filters(self):
"""
Build a set of filters based on the request data. This currently
includes only filter exclusion/negation.
"""
# build the compiled set of all filters
requested_filters = OrderedDict()
for filter_name, f in self.filters.items():
requested_filters[filter_name] = f
# exclusion params
exclude_name = '%s!' % filter_name
if related(self, exclude_name) in self.data:
# deepcopy the *base* filter to prevent copying of model & parent
f_copy = copy.deepcopy(self.base_filters[filter_name])
f_copy.parent = f.parent
f_copy.model = f.model
f_copy.exclude = not f.exclude
requested_filters[exclude_name] = f_copy
return requested_filters
def get_related_filtersets(self):
"""
Get the related filterset instances for all related filters.
"""
related_filtersets = OrderedDict()
for related_name in self.related_filters:
if related_name not in self.filters:
continue
f = self.filters[related_name]
related_filtersets[related_name] = f.filterset(
data=self.data,
queryset=f.get_queryset(self.request),
relationship=related(self, related_name),
request=self.request,
prefix=self.form_prefix,
)
return related_filtersets
def filter_queryset(self, queryset):
queryset = super(FilterSet, self).filter_queryset(queryset)
queryset = self.filter_related_filtersets(queryset)
return queryset
def filter_related_filtersets(self, queryset):
"""
Filter the provided `queryset` by the `related_filtersets`. It is
recommended that you override this method to change the filtering
behavior across relationships.
"""
for related_name, related_filterset in self.related_filtersets.items():
# Related filtersets should only be applied if they had data.
prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)
if not any(value.startswith(prefix) for value in self.data):
continue
field_name = self.filters[related_name].field_name
lookup_expr = LOOKUP_SEP.join([field_name, 'in'])
subquery = Subquery(related_filterset.qs.values('pk'))
queryset = queryset.filter(**{lookup_expr: subquery})
return queryset
def get_form_class(self):
class Form(super(FilterSet, self).get_form_class()):
def add_prefix(form, field_name):
field_name = related(self, field_name)
return super(Form, form).add_prefix(field_name)
def clean(form):
cleaned_data = super(Form, form).clean()
# when prefixing the errors, use the related filter name,
# which is relative to the parent filterset, not the root.
for related_filterset in self.related_filtersets.values():
for key, error in related_filterset.form.errors.items():
self.form.errors[related(related_filterset, key)] = error
return cleaned_data
return Form
|
24,879 | 5d20dbcd94e01ae14351fc4cb63a32a957a9835f | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-11 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('courses', '0003_auto_20171106_2038'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('billing_first_name', models.CharField(max_length=50, verbose_name='First name')),
('billing_last_name', models.CharField(max_length=60, verbose_name='Last name')),
('billing_phone', models.CharField(max_length=20, verbose_name='Phone')),
('billing_email', models.EmailField(max_length=254, verbose_name='Email')),
('order_sum', models.IntegerField(verbose_name='Sum')),
('pay_status', models.BooleanField(verbose_name='Pay status')),
('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date added')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='courses.Course', verbose_name='Course')),
('discount', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='courses.Discount', verbose_name='Discount')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='courses.Student', verbose_name='Student')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
'ordering': ['-date_added'],
},
),
]
|
24,880 | 277c75a98672edf380da6d45109774cc30c30c12 | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from paddle import nn
from paddle.nn import functional as F
class PRENHead(nn.Layer):
def __init__(self, in_channels, out_channels, **kwargs):
super(PRENHead, self).__init__()
self.linear = nn.Linear(in_channels, out_channels)
def forward(self, x, targets=None):
predicts = self.linear(x)
if not self.training:
predicts = F.softmax(predicts, axis=2)
return predicts
|
24,881 | 880468967c8c1b4fbb70ebd74f7cbed8d1603972 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = 'you-will-can-guess'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
FUNDAMENTAL_ANALYSIS_API_KEY = 'ff71a350e4ce0bd415aeab4a60bbad40'
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
|
24,882 | d263a8d0c16356b81019190dcc0286cbc3869e64 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from api.base import DataAPI, DataDRFAPISet, DRFActionAPI
from api.utils import add_dataapi_inner_header, add_esb_common_params
from conf.settings import DATAMANAGE_API_URL
class DatamanageApi(object):
MODULE = "DATAMANAGE"
def __init__(self):
self.rules = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dataquality/rules/",
primary_key="rule_id",
module="datamanage",
description="数据管理规则API",
)
self.tasks = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dataquality/audit_tasks/",
primary_key="task_id",
module="datamanage",
description="数据管理规则任务API",
)
self.get_data_dict_count = DataAPI(
url=DATAMANAGE_API_URL + "datamap/retrieve/get_data_dict_count/",
method="POST",
module="datamanage",
description="获取数据字典列表统计数据",
)
self.get_data_dict_list = DataAPI(
url=DATAMANAGE_API_URL + "datamap/retrieve/get_data_dict_list/",
method="POST",
module="datamanage",
description="获取数据字典列表数据列表",
)
self.influx_query = DataAPI(
url=DATAMANAGE_API_URL + "dmonitor/metrics/query/",
method="POST",
module="datamanage",
description="查询数据质量TSDB中的监控指标",
)
self.influx_report = DataAPI(
url=DATAMANAGE_API_URL + "dmonitor/metrics/report/",
method="POST",
module="datamanage",
description="上报数据监控自定义指标",
)
self.range_metric_by_influxdb = DataAPI(
url=DATAMANAGE_API_URL + "lifecycle/range/range_metric_by_influxdb/",
method="GET",
module="datamanage",
description="range_metric_by_influxdb",
)
self.alerts = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/alerts/",
primary_key="alert_id",
module="datamanage",
description="告警相关接口",
custom_config={
"send": DRFActionAPI(method="post", detail=False),
},
)
self.event_types = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dataquality/event_types/",
primary_key="id",
module="datamanage",
description="事件类型",
)
self.notify_configs = DataAPI(
url=DATAMANAGE_API_URL + "dataquality/events/notify_configs/",
method="GET",
module="datamanage",
description="获取事件通知配置",
)
self.dmonitor_data_sets = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/data_sets/",
primary_key="data_set_id",
module="datamanage",
description="获取监控所有dataset的信息",
default_timeout=180,
)
self.dmonitor_data_operations = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/data_operations/",
primary_key="data_operation_id",
module="datamanage",
description="获取监控所有data_processing和data_transferring",
default_timeout=180,
)
self.sampling_result_tables = DataAPI(
url=DATAMANAGE_API_URL + "dataquality/sampling/result_tables/",
method="GET",
module="datamanage",
description="获取待采样的结果表列表",
default_timeout=180,
)
self.dmonitor_flows = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/flows/",
primary_key="flow_id",
module="datamanage",
description="获取数据监控所需的数据流信息",
default_timeout=180,
custom_config={"dataflow": DRFActionAPI(method="get", detail=False)},
)
self.alert_configs = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/alert_configs/",
primary_key="alert_config_id",
module="datamanage",
description="获取监控配置",
custom_config={
"dataflow": DRFActionAPI(
method="get",
url_path="dataflow/{flow_id}",
detail=False,
url_keys=["flow_id"],
),
"rawdata": DRFActionAPI(
method="get",
url_path="rawdata/{flow_id}",
detail=False,
url_keys=["flow_id"],
),
},
)
self.alert_shields = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/alert_shields/",
primary_key="alert_shield_id",
module="datamanage",
description="告警屏蔽规则",
custom_config={
"in_effect": DRFActionAPI(method="get", detail=False),
},
)
self.datamodels = DataDRFAPISet(
url=DATAMANAGE_API_URL + "datamodel/models/",
primary_key="model_id",
module="datamanage",
description="数据模型接口集合",
custom_config={
"import_model": DRFActionAPI(
method="post", detail=False, url_path="import"
),
"release": DRFActionAPI(method="post", detail=True),
},
custom_headers=add_dataapi_inner_header(),
before_request=add_esb_common_params,
)
self.generate_datamodel_instance = DataAPI(
url=DATAMANAGE_API_URL + "datamodel/instances/dataflow/",
method="POST",
module="datamanage",
description="模型应用实例生成完整dataflow任务",
custom_headers=add_dataapi_inner_header(),
before_request=add_esb_common_params,
)
self.dmonitor_batch_executions = DataDRFAPISet(
url=DATAMANAGE_API_URL + "dmonitor/batch_executions/",
primary_key="exec_id",
module="datamanage",
description="按时间获取离线任务执行记录",
default_timeout=180,
custom_headers={
"blueking-language": "en",
},
custom_config={
"by_time": DRFActionAPI(method="get", detail=False),
"latest": DRFActionAPI(method="get", detail=False),
},
)
self.dmonitor_batch_schedules = DataAPI(
method="GET",
url=DATAMANAGE_API_URL + "dmonitor/batch_schedules/",
module="datamanage",
description=u"获取离线处理的调度信息",
default_timeout=180,
)
|
24,883 | 336c42792be564b21b6fa47787e77da6998ceba1 | from django.contrib import admin
# Register your models here.
from main_app.models import PicUpload
admin.site.register(PicUpload) |
24,884 | 96463285812ff6c57ddd82a3a161f66e5db73084 | ###############################################################################
# Imports
###############################################################################
import torch.nn as nn
###############################################################################
# Network
###############################################################################
class Net(nn.Module):
def __init__(self, arch, n_meta_features=None):
super(Net, self).__init__()
self.arch = arch
self.n_meta_features = n_meta_features
if 'ResNet' in str(arch.__class__):
self.arch.fc = nn.Linear(in_features=512, out_features=500, bias=True)
if 'EfficientNet' in str(arch.__class__):
self.arch._fc = nn.Linear(in_features=1280, out_features=500, bias=True)
if n_meta_features:
self.meta = nn.Sequential(nn.Linear(n_meta_features, 500),
nn.BatchNorm1d(500),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(500, 250), # FC layer output will have 250 features
nn.BatchNorm1d(250),
nn.ReLU(),
nn.Dropout(p=0.2))
self.ouput = nn.Linear(500 + 250, 1)
else:
self.ouput = nn.Linear(500, 1)
def forward(self, inputs):
"""
No sigmoid in forward because we are going to use BCEWithLogitsLoss
Which applies sigmoid for us when calculating a loss
"""
x, meta = inputs
features = self.arch(x)
if self.n_meta_features:
meta_features = self.meta(meta)
features = torch.cat((cnn_features, meta_features), dim=1)
return self.ouput(features) |
24,885 | 299663963709d24d10c14a4862559e61575ea210 | import configparser
from view import View
from listen import Listener
from report import Reporter
from manager import Manager
class ConfigReader():
def __init__(self, path):
self._parser = configparser.ConfigParser()
self._parser.read(path)
def init_view(self):
conf = self._parser['graphical']
enabled = conf.getboolean('enabled')
if enabled is not True:
return None
view = View()
return view
def init_listener(self):
conf = self._parser['listener']
enabled = conf.getboolean('enabled')
if enabled is not True:
return None
addr = conf.get('listener_addr')
port = conf.getint('listener_port')
listener = Listener(addr, port)
return listener
def init_reporter(self):
conf = self._parser['reporter']
enabled = conf.getboolean('enabled')
if enabled is not True:
return None
addr = conf.get('collector_addr')
port = conf.getint('collector_port')
collect_period = conf.getfloat('collect_info_period')
report_period = conf.getfloat('send_report_period')
reporter = Reporter()
reporter.add_collector(addr, port)
if collect_period is not None:
reporter.set_collect_period(collect_period)
if report_period is not None:
reporter.set_report_period(report_period)
return reporter
def init_manager(self):
view = self.init_view()
listener = self.init_listener()
reporter = self.init_reporter()
manager = Manager()
if view is not None:
manager.add_view(view)
if listener is not None:
manager.add_listener(listener)
if reporter is not None:
manager.add_reporter(reporter)
return manager
|
24,886 | adebdaf27cc7d7c9c53d83547d5a4bf2a938ebc3 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 17:30:18 2020
@author: MADCAT
"""
for i in range(1,101):
if i % 3 == 0 and i % 15 != 0:
print("Fizz")
elif i % 5 == 0 and i % 15 != 0:
print("Buzz")
elif i % 15 == 0:
print("FizzBuzz")
else :
print(i) |
24,887 | d1a5fc81b21dba69a70e3c31e9012a87e2dfb616 | from abaqus import *
from abaqusConstants import *
from caeModules import *
from driverUtils import *
executeOnCaeStartup()
# Model
model = mdb.models['Model-1']
# Part
sketch = model.ConstrainedSketch(name='sketch', sheetSize=1.0)
sketch.rectangle((0, 0), (1, 1))
part = model.Part(name='part', dimensionality=THREE_D, type=DEFORMABLE_BODY)
part.BaseSolidExtrude(sketch=sketch, depth=1)
# Create sets
part.Set(name='set-all', cells=part.cells.findAt(coordinates=((0.5, 0.5, 0.5), )))
part.Set(name='set-bottom', faces=part.faces.findAt(coordinates=((0.5, 0.5, 0.0), )))
part.Set(name='set-top', faces=part.faces.findAt(coordinates=((0.5, 0.5, 1.0), )))
part.Surface(name='surface-top',
side1Faces=part.faces.findAt(coordinates=((0.5, 0.5, 1.0), )))
# Assembly
model.rootAssembly.DatumCsysByDefault(CARTESIAN)
model.rootAssembly.Instance(name='instance', part=part, dependent=ON)
# Material
material = model.Material(name='material')
material.Elastic(table=((1000, 0.2), ))
material.Density(table=((2500, ), ))
# Section
model.HomogeneousSolidSection(name='section', material='material', thickness=None)
part.SectionAssignment(region=part.sets['set-all'], sectionName='section')
# Step
step = model.StaticStep(name='Step-1', previous='Initial', description='',
timePeriod=1.0, timeIncrementationMethod=AUTOMATIC,
maxNumInc=100, initialInc=0.01, minInc=0.001, maxInc=0.1)
# Output request
field = model.FieldOutputRequest('F-Output-1', createStepName='Step-1',
variables=('S', 'E', 'U'))
# Boundary condition
bottom_instance = model.rootAssembly.instances['instance'].sets['set-bottom']
bc = model.DisplacementBC(name='BC-1', createStepName='Initial',
region=bottom_instance, u3=SET)
# Load
top_instance = model.rootAssembly.instances['instance'].surfaces['surface-top']
pressure = model.Pressure('pressure', createStepName='Step-1', region=top_instance,
magnitude=100)
# Mesh
elem1 = mesh.ElemType(elemCode=C3D8R)
elem2 = mesh.ElemType(elemCode=C3D6)
elem3 = mesh.ElemType(elemCode=C3D4)
part.setElementType(regions=(part.cells, ), elemTypes=(elem1, elem2, elem3))
part.seedPart(size=0.1)
part.generateMesh()
# Job
job = mdb.Job(name='Job-1', model='Model-1')
job.writeInput()
# Submit the job
job.submit()
job.waitForCompletion()
# Save abaqus model
mdb.saveAs('compression.cae')
|
24,888 | 7313a3119e38414985120a657de80e2c8a803b53 | """Database Utilities to be used by higher application layers.
All methods throw a DatabaseException if the database operation failed.
No classes have to be instantiated except the initial database declared in
__init__.py in app. All methods are static.
"""
from sys import maxint
# Query constants
GET_SPECIAL_UNAMES_BY_QID = 'select u.uname from users as u join permissions as p on p.pid=u.id and p.qid=? and p.permission_level=?'
GET_ALL_QUEUES = 'select * from queues'
GET_ALL_QUEUE_SETTINGS = 'select * from qsettings'
GET_MEMBER_DATA_BY_QID = 'select qi.uid, u.uname, qi.relative_position, qi.optional_data from qindex as qi join users as u on qi.qid=? and qi.uid=u.id order by qi.relative_position'
GET_PERMISSIONED_QIDS_BY_UID = 'select qid from permissions where pid=? and permission_level=?'
GET_POSITION = 'select relative_position from qindex where uid=? and qid=?'
GET_PROFILED_USER_BY_USERNAME = 'select * from users where temp=0 and uname=?'
GET_Q_HISTORY_BY_QID = 'select * from qhistory where qid=? and join_time is not null and leave_time is not null'
GET_QUEUES_BY_UID = 'select * from qindex where uid=?'
GET_QUEUE_SETTINGS_BY_ID = 'select * from qsettings where qid=?'
GET_TEMP_USER_BY_ID = 'select * from users where temp=1 and id=?'
INSERT_INTO_QUEUE_HISTORY = 'insert into QHistory values (?, ?, ?, ?)'
INSERT_MEMBER_INTO_QUEUE = 'insert into QIndex values(?, ?, (select ending_index from Queues where id=?), ?)'
INSERT_PROFILED_USER = 'insert into users values(NULL, ?, ?, ?, ?, ?, ?, ?)'
INSERT_QUEUE = 'insert into queues values(null, 0, 0)'
INSERT_QUEUE_SETTINGS = 'insert into qsettings values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'
INSERT_TEMP_USER = 'insert into users values(NULL, 1, ?, NULL, NULL, NULL, NULL, NULL)'
REMOVE_MEMBER_FROM_QUEUE = 'delete from qindex where uid=? and qid=?'
UPDATE_POSITION = 'update qindex set relative_position=? where uid=? and qid=?'
UPDATE_QUEUE_FOR_ADD = 'update Queues set ending_index=ending_index+1 where id=?'
UPDATE_QUEUE_FOR_REMOVE = 'update queues set starting_index=starting_index+1 where id=?'
UPDATE_QUEUE_HISTORY = 'update qhistory set leave_time=? where uid=? and qid=? and leave_time is null'
UPDATE_QUEUE_SETTINGS = 'update qsettings set qname=?, max_size=?, keywords=?, location=?, active=?, min_wait_rejoin=?, website=?, organization=?, disclaimer=?, prompt=? where qid=?'
def query_db(query, args=()):
db = get_db()
cursor = db.execute(query, args)
rows = cursor.fetchall()
cursor.close()
return rows
def check_usernames(usernames):
result = {'SUCCESS': False}
for username in usernames:
rows = query_db(GET_PROFILED_USER_BY_USERNAME, (username,))
if (not rows) or (len(rows) == 0):
result['username'] = username
return result
result['SUCCESS'] = True
return result
def user_dict_to_db_tuple(user_dict):
return (user_dict['temp'] if user_dict.has_key('temp') else 0,
user_dict['uname'],
user_dict['fname'] if user_dict.has_key('fname') else None,
user_dict['lname'] if user_dict.has_key('lname') else None,
user_dict['email'] if user_dict.has_key('email') else None,
user_dict['pw'],
user_dict['salt']
)
def qsettings_dict_to_db_tuple(qsettings):
return (qsettings['qid'],
qsettings['qname'],
qsettings['max_size'] if qsettings.has_key('max_size') else maxint,
qsettings['keywords'] if qsettings.has_key('keywords') else None,
qsettings['location'] if qsettings.has_key('location') else None,
qsettings['active'] if qsettings.has_key('active') else 1,
qsettings['min_wait_rejoin'] if qsettings.has_key('min_wait_rejoin') else 0,
qsettings['website'] if qsettings.has_key('website') else None,
qsettings['organization'] if qsettings.has_key('organization') else None,
qsettings['disclaimer'] if qsettings.has_key('disclaimer') else None,
qsettings['prompt'] if qsettings.has_key('prompt') else None
)
def qsettings_dict_to_db_tuple_modify(qsettings):
return (qsettings['qname'],
qsettings['max_size'] if qsettings.has_key('max_size') else maxint,
qsettings['keywords'] if qsettings.has_key('keywords') else None,
qsettings['location'] if qsettings.has_key('location') else None,
qsettings['active'] if qsettings.has_key('active') else 1,
qsettings['min_wait_rejoin'] if qsettings.has_key('min_wait_rejoin') else maxint,
qsettings['website'] if qsettings.has_key('website') else None,
qsettings['organization'] if qsettings.has_key('organization') else None,
qsettings['disclaimer'] if qsettings.has_key('disclaimer') else None,
qsettings['prompt'] if qsettings.has_key('prompt') else None,
qsettings['qid']
)
class DatabaseException(Exception):
pass
class PermissionException(Exception):
pass
class ValidationException(Exception):
pass
# Database Utilities
import permissions
import validators
import sqlite3
import time
from app import get_db
#############################################
# User related utilities.
#############################################
def create_temp_user(user_dict):
db = get_db()
cursor = db.cursor()
cursor.execute(INSERT_TEMP_USER, (user_dict['uname'],))
user_dict['id'] = cursor.lastrowid
cursor.close()
db.commit()
return user_dict['id']
def create_user_profile(user_dict):
try:
print 'enter db_util.create_user_profile'
db = get_db()
rows = db.execute(GET_PROFILED_USER_BY_USERNAME, (user_dict['uname'],)).fetchall()
if (rows and (len(rows) > 0)):
raise ValidationException('The given username is already in use.')
result = validators.encrypt_password(user_dict['pw'])
user_dict['pw'] = result[0]
user_dict['salt'] = result[1]
user_dict['temp'] = 0
cursor = db.cursor()
cursor.execute(INSERT_PROFILED_USER, user_dict_to_db_tuple(user_dict))
user_dict['id'] = cursor.lastrowid
cursor.close()
db.commit()
print 'exit db_util.create_user_profile: success.'
return user_dict['id']
except sqlite3.Error as e:
print 'exit db_util.create_user_profile: failure. '
print e.message
raise e
def create_user(user_dict):
"""Adds a user defined by user_data to the database.
Args:
user_data: the data about the user to be added to the database. The uid will be ignored if included.
Returns:
The new uid if the user was successfully added to the database.
Raises:
sqlite3.Error: the database operation failed.
ValidationError: the username is already in use (only applies if account isn't temporary).
"""
if not user_dict.has_key('temp') or not user_dict['temp']:
user_dict['id'] = create_user_profile(user_dict)
else:
user_dict['id'] = create_temp_user(user_dict)
return user_dict['id']
def modify_user(user_data):
"""Modfiy the user to match the user_data.
The uid, username, and password are obtained from the current session, not the user_data.
Args:
user_data: the data about the user to be modified in the database. The id will be used to find the user in the database.
Returns:
None if the user's data was successfully updated.
Raises:
DatabaseException: the uid does not exist in the database.
ValidationException: the current session user is not logged in.
ValueError: the given user_data is invalid.
"""
raise NotImplementedError()
def delete_user():
"""Deletes the current session user from the database.
Returns:
None if the deletion was a success.
Raises:
sqlite3.Error: database operation failed.
ValidationException: the current session user is not logged in.
PermissionException: the current session does not have the required permissions.
"""
raise NotImplementedError()
def get_user_by_uname(username):
rows = query_db(GET_PROFILED_USER_BY_USERNAME, (username,))
if (not rows) or (len(rows) == 0):
raise ValidationException('The username', username, 'was not found.')
else:
return rows[0]
def get_uids(usernames):
uids = list()
if usernames is None:
return uids
for username in usernames:
rows = query_db(GET_PROFILED_USER_BY_USERNAME, (username,))
if rows is not None and len(rows) > 0:
uids.append(rows[0]['id'])
return uids
def get_user(username, given_password):
"""Retrieves the User associated with this user.
Returns:
A User object if the user was found. No temporary users are considered.
Raises:
sqlite3.Error: database operation failed.
ValidationException: the username password combination is invalid.
"""
err = 'The username password combination is invalid.'
rows = query_db(GET_PROFILED_USER_BY_USERNAME, (username,))
if (not rows) or (len(rows) == 0):
raise ValidationException(err)
else:
encrypted_password = rows[0]['pw']
salt = rows[0]['salt']
if validators.are_matching(encrypted_password, salt, given_password):
return rows[0]
else:
print 'passwords did not match.'
raise ValidationException(err)
def get_user_by_uid(uid):
rows = query_db('select * from users where uid=?', (uid,))
return rows
def get_temp_user(temp_uid):
"""Retrieves the user data associated with the given user id, only if the uid matches a temporary user.
Returns:
A User object if the user was found. Only temporary users are examined.
Raises:
DatabaseException: the temp_uid was not a temporary user.
"""
rows = query_db(GET_TEMP_USER_BY_ID, (temp_uid,))
if (not rows) or (len(rows) == 0):
return None
else:
return rows[0]
def get_special_users(qid, permission_level):
unames = list()
rows = query_db(GET_SPECIAL_UNAMES_BY_QID, (qid, permission_level))
if rows is not None:
unames = [row[0] for row in rows]
return unames
#################################################
# Queue related utilities.
#################################################
def get_history(qid):
# This will be expanded upon
rows = query_db(GET_Q_HISTORY_BY_QID, (qid,))
return rows
def create_queue(q_settings):
"""Creates a new queue with the defined settings. All settings except qid must exist.
Args:
q_settings: the settings for the new queue. The qid will be ignored if included.
Returns:
The new qid if the queue was successfully created.
Raises:
ValidationException: the username <uname> was not found.
"""
db = get_db()
cursor = db.cursor()
cursor.execute(INSERT_QUEUE)
q_settings['qid'] = cursor.lastrowid
cursor.execute(INSERT_QUEUE_SETTINGS, qsettings_dict_to_db_tuple(q_settings))
cursor.close()
db.commit()
permissions.add_permission_list(get_uids(q_settings['admins']), q_settings['qid'], permissions.ADMIN)
if q_settings.has_key('managers'):
permissions.add_permission_list(get_uids(q_settings['managers']), q_settings['qid'], permissions.MANAGER)
if q_settings.has_key('blocked_users'):
permissions.add_permission_list(get_uids(q_settings['blocked_users']), q_settings['qid'], permissions.BLOCKED_USER)
return q_settings['qid']
def modify_queue_settings(q_settings):
"""Modifies the queue with the qid defined in q_settings to match the q_settings given.
If the user's session does not have proper permissions, a PermissionException will be raised.
Args:
q_settings: the settings for the queue to be modified. The qid will be used to find the queue in the database.
Returns:
void if the queue's settings were successfully updated.
Raises:
DatabaseException:
(1) The queue doesn't exist.
(2) The q_settings are invalid.
PermissionException: the current session user does not have permission to modify this queue's settings.
"""
db = get_db()
db.execute(UPDATE_QUEUE_SETTINGS, qsettings_dict_to_db_tuple_modify(q_settings))
db.commit()
permissions.update_permissions(q_settings['qid'],
get_uids(q_settings['admins']),
get_uids(q_settings['managers']) if q_settings.has_key('managers') else None,
get_uids(q_settings['blocked_users']) if q_settings.has_key('blocked_users') else None)
def delete_queue(qid):
"""Deletes the queue with the given qid.
Args:
qid: the id of the queue to be deleted.
Returns:
void if the queue was successfully deleted.
Raises:
DatabaseException: the queue doesn't exist.
PermissionException: the current session user does not have permission to delete this queue.
"""
raise NotImplementedError()
def get_queue_settings(qid):
"""Retrieves the queue_settings associated with qid.
Returns:
The QSettings associated with the qid.
Raises:
DatabaseException: the queue doesn't exist.
PermissionException: the current session user does not have permission to view this queue.
"""
db = get_db()
rows = query_db(GET_QUEUE_SETTINGS_BY_ID, (qid,))
if (not rows) or (len(rows) == 0):
raise sqlite3.Error('The queue does not exist.')
return rows[0]
def get_all_queues():
db = get_db()
settings_rows = query_db(GET_ALL_QUEUE_SETTINGS)
queue_rows = query_db(GET_ALL_QUEUES)
return (settings_rows, queue_rows)
def get_permissioned_qids(uid, permission_level):
rows = query_db(GET_PERMISSIONED_QIDS_BY_UID, (uid, permission_level))
return rows
def add_to_queue(uid, qid, optional_data):
db = get_db()
db.execute(INSERT_MEMBER_INTO_QUEUE, (uid, qid, qid, optional_data))
db.execute(UPDATE_QUEUE_FOR_ADD, (qid,))
db.execute(INSERT_INTO_QUEUE_HISTORY, (uid, qid, int(time.time()), None))
db.commit()
def swap(uid1, uid2, qid):
rows1 = query_db(GET_POSITION, (uid1, qid))
relative_position1 = rows1[0]['relative_position']
rows2 = query_db(GET_POSITION, (uid2, qid))
relative_position2 = rows2[0]['relative_position']
db = get_db()
db.execute(UPDATE_POSITION, (relative_position2, uid1, qid))
db.execute(UPDATE_POSITION, (relative_position1, uid2, qid))
db.commit()
def remove_by_uid_qid(uid, qid):
"""
Returns:
nothing is returned. The q_member data should have been obtained from the software model.
"""
db = get_db()
db.execute(REMOVE_MEMBER_FROM_QUEUE, (uid, qid))
db.execute(UPDATE_QUEUE_FOR_REMOVE, (qid,))
db.execute(UPDATE_QUEUE_HISTORY, (int(time.time()), uid, qid))
db.commit()
def get_queue_members(qid):
rows = query_db(GET_MEMBER_DATA_BY_QID, (qid,))
return rows
|
24,889 | 46e8c32a434655f5d82fc44d40b05e87528ec9ae | 3 #!/usr/bin/env python
#
# File: ProgramsTab.py
# by @BitK_
#
import re
import json
from functools import partial
from java.awt import (
Font,
Color,
GridBagLayout,
GridBagConstraints,
Dimension,
Desktop,
GridLayout,
BorderLayout,
FlowLayout,
)
from java.net import URI
from javax.swing import (
Box,
BoxLayout,
SpringLayout,
JList,
JTable,
JPanel,
JButton,
JScrollPane,
JLabel,
JTextField,
ListCellRenderer,
ListSelectionModel,
DefaultListModel,
)
from BetterJava import (
ColumnPanel,
make_constraints,
RowPanel,
FixedColumnPanel,
FixedRowPanel,
SplitPanel,
make_title_border,
HTMLRenderer,
CallbackActionListener,
)
from javax.swing.BorderFactory import createEmptyBorder
from helpers import async_call, same_size
import context
def guess_scope(s):
domain_pattern = re.compile(
(
r"^"
r"(?:(?P<protocol>https?)://)?"
r"(?P<host>"
r"(?:\*\.)?" # allow wildcard at the start
r"[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*"
r"(?:\.[a-zA-Z0-9]+(?:\-[a-zA-Z0-9-]+)*)+"
r")"
r"(?P<port>:[0-9]+)?" # potential port
r"(?:/(?P<file>.*))?" # potential path
r"$"
)
)
match = domain_pattern.match(s)
if match:
url = {"enabled": True}
url["protocol"] = match.group("protocol") or "any"
host = re.escape(match.group("host"))
host_with_stars = host.replace("\\*", ".*")
url["host"] = "^{}$".format(host_with_stars)
if match.group("port"):
url["port"] = match.group("port")
if match.group("file"):
url["file"] = match.group("file")
return url
else:
return None
class ScopesBox(ColumnPanel):
def __init__(self, scopes):
ColumnPanel.__init__(self)
scope_list = JList(tuple(entry.scope for entry in scopes))
scope_list.setVisibleRowCount(10)
btn_list = RowPanel()
select_all = JButton("Select all")
select_all.setMaximumSize(select_all.getPreferredSize())
select_all.addActionListener(
CallbackActionListener(partial(self.do_selection, scope_list, scopes))
)
btn_list.add(select_all)
add_scope = JButton("Add to scope")
add_scope.setMaximumSize(add_scope.getPreferredSize())
add_scope.addActionListener(
CallbackActionListener(partial(self.add_to_scope, scope_list))
)
btn_list.add(add_scope)
self.add(JScrollPane(scope_list))
self.add(btn_list)
self.setBorder(make_title_border("Scopes"))
self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height))
def add_to_scope(self, scope_list, event):
config = json.loads(context.callbacks.saveConfigAsJson("target.scope"))
config["target"]["scope"]["advanced_mode"] = True
for maybe_url in scope_list.getSelectedValues():
url = guess_scope(maybe_url)
if url:
config["target"]["scope"]["include"].append(url)
context.callbacks.loadConfigFromJson(json.dumps(config))
def do_selection(self, scope_list, scopes, event):
scope_list.setSelectionInterval(0, len(scopes) - 1)
class OutOfScopeBox(ColumnPanel):
def __init__(self, out_of_scope):
ColumnPanel.__init__(self)
out_of_scope_list = JList(tuple(out_of_scope))
self.add(JScrollPane(out_of_scope_list))
self.setBorder(make_title_border("Out of scope"))
self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height))
class RewardBox(JPanel):
def __init__(self, program):
self.setLayout(GridLayout())
self.setBorder(make_title_border("Rewards"))
rewards = [
["minimum", program.bounty_reward_min],
["low", program.bounty_reward_low],
["medium", program.bounty_reward_medium],
["high", program.bounty_reward_high],
["critical", program.bounty_reward_critical],
]
table = JTable(rewards, ["level", "reward"])
table.setMaximumSize(table.getPreferredSize())
self.add(table)
class StatsBox(JPanel):
def __init__(self, program):
self.setLayout(GridLayout())
self.setBorder(make_title_border("Stats"))
stats = [
["Average response time", program.stats.average_first_time_response],
["Reports - total", program.stats.total_reports],
["Reports - last month", program.stats.total_reports_current_month],
["Reports - last week", program.stats.total_reports_last7_days],
["Reports - last 24h", program.stats.total_reports_last24_hours],
["Hunter thanked", program.stats.total_hunter_thanked],
]
table = JTable(stats, ["", ""])
self.add(table)
class RulesBox(JScrollPane):
def __init__(self, html_rules):
html = u"<html><body>{}</body></html>".format(html_rules)
html_renderer = HTMLRenderer(html)
html_renderer.add_css_file("style.css")
JScrollPane.__init__(self, html_renderer)
self.setBorder(make_title_border("Rules"))
class TitleBtnBox(FixedColumnPanel):
def __init__(self, program):
url = "https://yeswehack.com/programs/{}".format(program.slug)
btn = JButton("Open in browser")
btn.addActionListener(
CallbackActionListener(lambda _: Desktop.getDesktop().browse(URI(url)))
)
self.add(btn)
class UABox(JPanel):
def __init__(self, program):
self.setLayout(GridBagLayout())
self.setBorder(make_title_border("User-Agent", padding=5))
btn = JButton("Add to settings")
ua_text = JTextField(program.user_agent)
self.add(
ua_text, make_constraints(weightx=4, fill=GridBagConstraints.HORIZONTAL)
)
self.add(btn, make_constraints(weightx=1))
self.setMaximumSize(Dimension(9999999, self.getPreferredSize().height + 10))
def add_to_options(event):
prefix = "Generated by YWH-addon"
config = json.loads(
context.callbacks.saveConfigAsJson("proxy.match_replace_rules")
)
# remove other YWH addon rules
match_replace_rules = filter(
lambda rule: not rule["comment"].startswith(prefix),
config["proxy"]["match_replace_rules"],
)
new_rule = {
"is_simple_match": False,
"enabled": True,
"rule_type": "request_header",
"string_match": "^User-Agent: (.*)$",
"string_replace": "User-Agent: $1 {}".format(program.user_agent),
"comment": "{} for {}".format(prefix, program.slug),
}
match_replace_rules.append(new_rule)
config["proxy"]["match_replace_rules"] = match_replace_rules
context.callbacks.loadConfigFromJson(json.dumps(config))
btn.addActionListener(CallbackActionListener(add_to_options))
class TitleBox(JPanel):
def __init__(self, program):
self.setLayout(BorderLayout())
title = JLabel(program.title)
title.setFont(Font("Arial", Font.BOLD, 28))
title.setHorizontalAlignment(JLabel.CENTER)
title.setVerticalAlignment(JLabel.CENTER)
title.setBorder(createEmptyBorder(15, 5, 15, 5))
if not program.public:
lbl = JLabel("Private")
lbl.setFont(Font("Arial", Font.BOLD, 20))
lbl.setForeground(Color(0xFF2424))
lbl.setBorder(createEmptyBorder(15, 15, 15, 15))
leftbox = lbl
else:
leftbox = Box.createHorizontalGlue()
btnbox = TitleBtnBox(program)
btnbox.setBorder(createEmptyBorder(5, 5, 5, 5))
self.add(leftbox, BorderLayout.LINE_START)
self.add(title, BorderLayout.CENTER)
self.add(btnbox, BorderLayout.LINE_END)
same_size(leftbox, btnbox)
self.setMaximumSize(Dimension(99999, self.getPreferredSize().height))
class ProgramPane(JPanel):
def __init__(self, program):
self.setLayout(BorderLayout())
left_col = RulesBox(program.rules_html)
right_col = ColumnPanel()
scopes = ScopesBox(program.scopes)
right_col.add(scopes)
if program.out_of_scope:
out_of_scopes = OutOfScopeBox(program.out_of_scope)
right_col.add(out_of_scopes)
if program.user_agent:
right_col.add(UABox(program))
reward_stat = FixedRowPanel()
reward_stat.add(RewardBox(program))
reward_stat.add(StatsBox(program))
reward_stat.setMaximumSize(
Dimension(99999, reward_stat.getPreferredSize().height)
)
right_col.add(reward_stat)
right_col.add(Box.createVerticalGlue())
cols = FixedRowPanel()
cols.add(left_col)
cols.add(right_col)
self.add(TitleBox(program), BorderLayout.PAGE_START)
self.add(cols, BorderLayout.CENTER)
class ProgramRenderer(ListCellRenderer, JLabel):
def getListCellRendererComponent(
self, jlist, program, index, isSelected, cellHashFocus
):
if isSelected:
self.setBackground(Color(0xFF2424))
self.setForeground(Color.white)
else:
if program.public:
self.setBackground(Color.white)
else:
self.setBackground(Color(0xFFDDDDD))
self.setForeground(Color.black)
self.setText(program.title)
self.setOpaque(1)
self.setBorder(createEmptyBorder(5, 10, 5, 10))
return self
class ProgramsTab(JPanel):
def __init__(self):
self.programs = []
self.setLayout(BoxLayout(self, BoxLayout.PAGE_AXIS))
self.JprogramList = JList()
self.JprogramList.setSelectionMode(ListSelectionModel.SINGLE_SELECTION)
self.JprogramList.addListSelectionListener(self.handle_select)
scrollPane = JScrollPane(self.JprogramList)
scrollPane.setMinimumSize(Dimension(300, 0))
self.splitPane = SplitPanel(scrollPane, JPanel())
self.add(self.splitPane)
self.load_program_list()
def load_program_list(self):
# fetch and display program async
async_call(
context.api.get_programs, self.display_program_list, self.display_error
)
def display_program_list(self, programs):
self.programs = programs
# titles = tuple(program.title for program in self.programs)
model = DefaultListModel()
for program in programs:
model.addElement(program)
self.JprogramList.setModel(model)
self.JprogramList.setCellRenderer(ProgramRenderer())
if self.programs:
async_call(
lambda: context.api.get_program_details(self.programs[0].slug),
self.load_program_details,
)
else:
self.splitPane.setRightComponent(JPanel())
def display_error(self, error):
self.JprogramList.setListData(tuple())
self.splitPane.setRightComponent(JLabel("Error : {}".format(error)))
def load_program_details(self, pgm_details):
pane = ProgramPane(pgm_details)
loc = self.splitPane.getDividerLocation()
self.splitPane.setRightComponent(pane)
self.splitPane.setDividerLocation(loc)
def handle_select(self, event):
jlist = event.source
if event.valueIsAdjusting:
return None
selected_idx = jlist.getSelectedIndex()
if selected_idx < 0 or selected_idx > len(self.programs):
return None
slug = self.programs[selected_idx].slug
async_call(
lambda: context.api.get_program_details(slug), self.load_program_details
)
|
24,890 | 172922e7bb980a15bc2d488cad24dae026839441 | import sys
sys.stdin = open('workshop행렬찾기_input.txt')
tc = int(input())
for T in range(tc):
table = []
N = int(input())
for i in range(N):
table.append(list(map(int, list(input().split()))))
ware = []
col = 0
row = 0
for i in range(N):
for j in range(N):
if table[i][j] != 0:
box = []
i2 = i
j2 = j
col = 0
row = 0
while True: # 행의 개수 찾기
if table[i2][j] != 0:
row += 1
i2 += 1
if table[i2][j] == 0:
box.append(row)
break
while True: # 열의 개수 찾기
if table[i][j2] != 0:
col += 1
j2 += 1
if table[i][j2] == 0:
box.append(col)
break
box.insert(0, row * col)
ware.append(box)
for c in range(i, i + row): # clear
for l in range(j, j + col):
table[c][l] = 0
else:
continue
ware.sort()
print(f'#{T + 1} {len(ware)}', end=" ")
for i in range(len(ware)):
print(ware[i][1], end=" ")
print(ware[i][2], end=" ")
print() |
24,891 | 96952478eba70f3c18b17a259312f7ce73c6d733 | def isVowel2(char):
'''
char: a single letter of any case
returns: True if char is a vowel and False otherwise.
'''
list = ['a', 'e', 'i', 'o', 'u'];
return char.lower() in list
print isVowel2('a') |
24,892 | 623c0a528bd84f78f49e33917cc4531591e7ecb8 | import pdb
import numpy as np
import cPickle as pickle
from gensim.models import Word2Vec
from cut1 import readTXT
from genans import parseAnsOneHot
wordvec_file = '../GloVe/glove.6B.300d.txt'
folder = "embs.mc500.dev.txt/"
ansFilename='../Data/mc500.dev.ans'
txtFilename = '../Data/mc500.dev.txt'
stopWordFile = '../Data/stopwords.txt'
dataPickle_name = "../Pickle/"+"mc500.dev.auto.txt"+".pickle"
print "pickling to ... ",dataPickle_name
print "Loading wor2vec..."
word_vec = Word2Vec.load_word2vec_format(wordvec_file, binary=False)
ans = parseAnsOneHot(ansFilename)
print "Loading",txtFilename.split('/')[-1],"..."
txtList = readTXT(txtFilename)
data = []
for q_id in range(len(ans)):
fout = open(folder+str(q_id)+'_txt','r')
one_Q = []
for line in fout:
for word in line.split():
one_Q.append(word)
Q = np.asarray(one_Q,dtype='float32')
oneQ = []
for entry in txtList[q_id][1:]:
count = 0.
temp_vector = np.zeros(300,dtype='float32')
for word in entry:
word = word.lower()
if word not in word_vec:
if '\'s' in word:
word = word.split('\'')[0]
elif 'n\'t' in word:
temp_vector = np.add(temp_vector,np.asarray(word_vec[word.split('n')[0]]))
count += 1.
word = 'not'
elif '\'d' in word:
temp_vector = np.add(temp_vector,np.asarray(word_vec[word.split('\'')[0]]))
count += 1.
word = 'would'
elif 'i\'m' in word:
temp_vector = np.add(temp_vector,np.asarray(word_vec['i']))
count += 1.
word = 'am'
elif '\'ll' in word:
temp_vector = np.add(temp_vector,np.asarray(word_vec[word.split('\'')[0]]))
count += 1.
word = 'will'
elif '\'ve' in word:
temp_vector = np.add(temp_vector,np.asarray(word_vec[word.split('\'')[0]]))
count += 1.
word = 'have'
elif '\'re' in word:
temp_vector = np.add(temp_vector,np.asarray(word_vec[word.split('\'')[0]]))
count += 1.
word = 'are'
elif '(' in word:
word = word.split('(')[1]
elif ')' in word:
word = word.split(')')[0]
elif '.' in word:
for oneword in word.split('.'):
if oneword and oneword in word_vec:
temp_vector = np.add(temp_vector,np.asarray(word_vec[oneword]))
count+=1.
continue
elif ';' in word:
for oneword in word.split(';'):
if oneword and oneword in word_vec:
temp_vector = np.add(temp_vector,np.asarray(word_vec[oneword]))
count+=1.
continue
elif ':' in word:
for oneword in word.split(':'):
if oneword and oneword in word_vec:
temp_vector = np.add(temp_vector,np.asarray(word_vec[oneword]))
count+=1.
continue
elif '\'' in word:
for oneword in word.split('\''):
if oneword and oneword in word_vec:
temp_vector = np.add(temp_vector,np.asarray(word_vec[oneword]))
count+=1.
continue
elif '-' in word:
for oneword in word.split('-'):
if oneword and oneword in word_vec:
temp_vector = np.add(temp_vector,np.asarray(word_vec[oneword]))
count+=1.
continue
try:
temp_vector = np.add(temp_vector,np.asarray(word_vec[word]))
count += 1.
except:
print word
if count == 0:
oneQ.append(temp_vector)
else:
oneQ.append(np.divide(temp_vector,count))
data.append([np.hstack([Q]+oneQ),ans[q_id]])
pdb.set_trace()
print "Pickling..."
fh =open(dataPickle_name,'wb')
pickle.dump(data,fh,pickle.HIGHEST_PROTOCOL)
fh.close()
|
24,893 | dec2e8085a917c7c61c23af8fdb3723f32edabef | from commons import *
from api import *
aqlc.create_collection('chat_messages')
aqlc.create_collection('chat_channels')
aqlc.create_collection('chat_memberships')
'''
chat messages
- uid
- cid
- content
- t_c
chat_channels
- uid (creator)
- t_c
- title
chat_memberships
- uid
- cid
- t_c
'''
class Chat:
def get_new_channel_id(self):
return obtain_new_id('chat_channel')
def get_channel(self, cid):
return aql('for i in chat_channels filter i.cid==@cid return i',
cid=cid, silent=True)[0]
def new_membership(self, cid, uid):
exist = aql('''for i in chat_memberships
filter i.cid==@cid and i.uid==@uid
return i''', cid=cid, uid=uid, silent=True)
if exist:
raise Exception('you are already in that channel')
return aql('insert @k into chat_memberships', k=dict(
uid=uid,
cid=cid,
t_c = time_iso_now(),
silent=True,
))
def create_channel_uid(self, uid, title):
existing = aql('for i in chat_channels filter i.uid==@uid and i.title==@title return i', uid=uid, title=title)
if existing:
raise Exception('channel with the same title and owner already exists')
cid = self.get_new_channel_id()
newc = aql('insert @k into chat_channels return NEW',
k=dict(
uid=uid,
title=title,
cid=cid,
t_c = time_iso_now(),
))[0]
return newc
def post_message(self, cid, uid, content):
banned_check()
content = content.strip()
content_length_check(content)
# check if channel exists
channel = self.get_channel(cid)
if not channel: raise Exception('channel id not found')
# check if user in channel
if uid>0 and channel['cid']!=1:
if not aql('''
for i in chat_memberships
filter i.uid==@uid and i.cid==@cid
return i''', uid=uid, cid=cid):
raise Exception('you are not member of that channel')
lastm = self.get_last_message(uid)
cdt = 15
earlier = time_iso_now(-cdt)
if lastm:
if lastm['content']==content:
raise Exception('repeatedly sending same message')
if lastm['t_c']>earlier:
raise Exception(f'两次发送间隔应大于{cdt}秒,请稍微等一下')
new_msg = dict(
cid=cid, uid=uid, content=content, t_c=time_iso_now()
)
spam_detected = spam_kill(content)
if spam_detected:
new_msg['spam']=True
aql('insert @k into chat_messages', k=new_msg)
return {'error':False}
def get_last_message(self, uid):
lastm = aql('''
for i in chat_messages filter i.uid==@uid sort i.t_c desc
limit 1 return i
''', uid=uid)
return None if not lastm else lastm[0]
############
def create_channel(self, title):
must_be_logged_in()
uid = g.selfuid
title_length_check(title)
newc = self.create_channel_uid(uid, title)
cid = newc['cid']
return {'error':False,'channel':newc, 'cid':cid}
def list_channels(self):
uid = g.selfuid
res = aql('for i in chat_channels return i')
return {'channels':res}
def join_channel(self, cid):
must_be_logged_in()
channel = self.get_channel(cid)
if not channel:
raise Exception('channel cid not found')
uid = g.selfuid
cuid = channel['uid']
if uid!=cuid and channel['title']!="公海":
# you are not owner of said channel
followings = aql('''
for i in followings
filter i.follow==true
and i.to_uid==@uid and i.uid==@cuid
return i
''', uid=uid, cuid=cuid)
if not followings:
raise Exception('cant join channels of someone who didnt follow you')
self.new_membership(cid, uid)
return {'error':False}
def post(self, cid, content):
must_be_logged_in()
uid = g.selfuid
return self.post_message(cid, uid, content)
def get_messages_after(self, cid, ts):
ma = aqls('''
for i in chat_messages
filter i.cid==@cid and i.t_c > @ts
sort i.t_c asc
limit 25
return i
''', ts=ts, cid=cid)
for m in ma: m['content'] = self.render_message(m)
return {'messages': ma}
def get_messages_before(self, cid, ts):
ma = aqls('''
for i in chat_messages
filter i.cid==@cid and i.t_c < @ts
sort i.t_c desc
limit 25
return i
''', ts=ts, cid=cid)
for m in ma: m['content'] = self.render_message(m)
return {'messages': ma}
def render_message(self, m):
rendered = render_template_g('chat_message.html.jinja',
message = m,
)
return rendered.strip()
def test(self):
return {'test':'success'}
chat = Chat()
IndexCreator.create_indices('chat_messages', [['cid','t_c'],['uid','t_c']])
IndexCreator.create_indices('chat_channels', [['cid','t_c'],['uid','t_c']])
IndexCreator.create_indices('chat_memberships',
[['cid','uid','t_c'],['uid','t_c']])
@register('chat')
def _():
j = g.j
fname = j['f']
args = j['a'] if 'a' in j else []
kwargs = j['kw'] if 'kw' in j else {}
f = getattr(chat, fname)
res = f(*args, **kwargs)
return res
@app.route('/deer')
@app.route('/liao')
@app.route('/chat')
def chatpage():
# m = chat.get_messages_before(1, '2047')['messages']
m = []
return render_template_g('chat.html.jinja',
page_title = '聊天室',
hide_title = True,
messages = m,
)
|
24,894 | 14ead71de8a387edca9877604134f6e27f972ccc | # -*- coding: UTF-8 -*-
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
import numpy as np
def forms_points():
# passando a url
scope = ['https://spreadsheets.google.com/feeds']
# passando o nome do meu arquivo com as credenciais e a url como parâmetro
credentials = ServiceAccountCredentials.from_json_keyfile_name('credenciais_sheets.json', scope)
gc = gspread.authorize(credentials)
# url da planilha de respostas:
# https://docs.google.com/spreadsheets/d/1lpRAk_SSG8iJOhMNKV1yVRTK61zkforDL_6Lm2iUZBg/edit?pli=1#gid=1096379494
# abrindo a planilha especificada pelo seu ID/key (fica depois de /d e antes de /edit)
wks = gc.open_by_key('1lpRAk_SSG8iJOhMNKV1yVRTK61zkforDL_6Lm2iUZBg')
# pega o nome da página 1 da planilha
worksheet = wks.get_worksheet(0)
#pega todo o conteúdo de uma planilha e e traz como listas de dicionario
list_of_dicts = worksheet.get_all_records()
# criando um df a partir de todos os registros da sheet salvos em um dict
dataframe = pd.DataFrame(list_of_dicts)
# # lista para renomear as colunas
colunas = {
'Em caso positivo, inclua nome, sigla e link do grupo no Diretório de Grupos do CNPq.':'grupo_no_Diretório_Grupos_CNPq',
'Email Address':'Email',
'Indique o número de alunos de graduação com bolsa utilizando o espaço de pesquisa: (40 pontos cada)':'alunos_grad_com_bolsa',
'Indique o número de alunos de graduação sem bolsa utilizando o espaço de pesquisa: (20 pontos cada)':'alunos_grad_sem_bolsa',
'Indique o número de alunos de pós-graduação com bolsa utilizando o espaço de pesquisa: (60 pontos cada)':'alunos_pos_com_bolsa',
'Indique o número de alunos de pós-graduação sem bolsa utilizando o espaço de pesquisa: (40 pontos cada)':'alunos_pos_sem_bolsa',
'Indique o número de docentes da EACH no grupo que estão credenciados em programas de pós-graduação da EACH: (100 pontos cada)':'n_docentes_cred_pos_EACH',
'Indique o número de docentes da EACH no grupo que estão credenciados em programas de pós-graduação fora da EACH: (50 pontos cada)':'n_docentes_cred_pos_fora_EACH',
'Indique o número de docentes da EACH no grupo que não tem credenciamento em programas de pós-graduação: (10 pontos cada)':'n_docentes_sem_cred_pos_EACH',
'Indique o número de docentes da EACH que ocupam exclusivamente o espaço do presente grupo: (100 pontos cada)':'QtD_docentes_ocupantes',
'Indique o número de docentes da EACH que ocupam simultaneamente o espaço do presente grupo e outros espaços de pesquisa: (20 pontos cada)':'QtD_docentes_ocupantes_simultaneamente',
'Indique o número de egressos (ex-alunos) postulantes a vaga na pós-graduação utilizando o espaço de pesquisa: (10 pontos cada)':'ex_alunos_com_pos',
'Indique o número de estagiários de pós-doutorado com bolsa utilizando o espaço de pesquisa: (40 pontos cada)':'estagiarios_pos-doc_com_bolsa',
'Indique o número de estagiários de pós-doutorado sem bolsa utilizando o espaço de pesquisa: (20 pontos cada)':'estagiarios_pos-doc_sem_bolsa',
'Indique o número de fomentos a pesquisa (em andamento e concluídas) obtidos pelos docentes da EACH no grupo: (100 pontos cada)':'Captacoes_financeiras',
'Indique o número de orientações de doutorado (finalizadas ou vigentes) dos docentes da EACH no grupo: (100 pontos cada)':'n_orientações_doutorado',
'Indique o número de orientações de iniciação científica, PUB ou IC voluntária (finalizadas ou vigentes) dos docentes da EACH no grupo: (40 pontos cada)':'n_orientacoes_IC_Pub',
'Indique o número de orientações de mestrado (finalizadas ou vigentes) dos docentes da EACH no grupo: (80 pontos cada)':'n_orientacoes_mestrado',
'Indique o número de orientações de trabalhos de conclusão de curso (finalizadas ou vigentes) dos docentes da EACH no grupo: (10 pontos cada)':'orientacoes_TCC',
'Indique o número de outras modalidades de orientações de graduação (finalizadas ou vigentes) dos docentes da EACH no grupo: (10 pontos cada)':'n_outras_orientacoes',
'Indique o número de professores colaboradores utilizando o espaço de pesquisa: (30 pontos cada)':'n_profs_colaboradores',
'Indique o número de professores visitantes utilizando o espaço de pesquisa: (30 pontos cada)':'n_profs_visitantes',
'Indique o número de servidores técnicos(as) ou secretários(as) utilizando o espaço de pesquisa: (10 pontos cada)':'servidores_técnicos_secretários',
'Indique o número de supervisões de pós-doutorado (finalizadas ou vigentes) dos docentes da EACH no grupo: (70 pontos cada)':'n_supervisões_pós-doutorado',
'Nome do Grupo de Pesquisa':'Grupo',
'O espaço de pesquisa utilizado é contínuo, ou seja, há contiguidade nos ambientes ocupados pelo grupo para atividades de pesquisa?':'contiguidade',
'O espaço utilizado é ocupado por mais de um docente da EACH?':'Mais_de_um_Docente',
'O espaço utilizado é ocupado por mais de um grupo de pesquisa?':'Mais_de_um_grupo',
'O grupo está cadastrado no diretório de Grupos do CNPq?':'Cadastro_no_CNPQ',
'Por favor, inclua os currículos Lattes atualizados dos docentes da EACH pertencentes ao grupo para cômputo da produção docente (formato de arquivo xml, extraído diretamente da Plataforma Lattes):':'xmls',
'Quantas patentes os membros possuem?':'n_patentes_grupo',
'Quantas produções artísticas, culturais e/ou técnica do Extrato A os membros do grupo possuem?':'produções_artist_culturais_A',
'Quantas produções artísticas, culturais e/ou técnica do Extrato B os membros do grupo possuem?':'produções_artist_culturais_B',
'Quantas produções artísticas, culturais e/ou técnica do Extrato C os membros do grupo possuem?':'produções_artist_culturais_C',
'Quantos prêmios e honrarias os membros do grupo possuem?':'Quantidade_Prêmios_Honrarias',
'Quantos softwares os membros do grupo já desenvolveram?':'Qtd_Softwares',
'Quantos são os bolsistas de produtividade acadêmica de extrato 1A?':'bolsa_prodt_1A',
'Quantos são os bolsistas de produtividade acadêmica de extrato 1B?':'bolsa_prodt_1B',
'Quantos são os bolsistas de produtividade acadêmica de extrato 1C?':'bolsa_prodt_1C',
'Quantos são os bolsistas de produtividade acadêmica de extrato 1D?':'bolsa_prodt_1D',
'Quantos são os bolsistas de produtividade acadêmica de extrato 2?':'bolsa_prodt_2',
'Selecione a opção que melhor descreve a frequência de uso do espaço de pesquisa:':'frequência_uso',
'Selecione a opção que melhor descreve a natureza do uso do espaço de pesquisa:':'natureza_do_uso',
'Timestamp':'Timestamp',
'Indique o número de alunos de pós-graduação com bolsa utilizando o espaço de pesquisa: (60 pontos cada)':'alunos_pos_com_bolsa',
'Indique o número de alunos de pós-graduação sem bolsa utilizando o espaço de pesquisa: (40 pontos cada)':'alunos_pos_sem_bolsa',
}
# renomeando as colunas do df
dataframe = dataframe.rename(colunas,axis=1)
# alterando a ordem das colunas do df
#pegando a quantidade de grupos = linhass no df
qtd_linhas = dataframe['Grupo'].count()
soma_pesos = 10 # soma de pesos por sessão
point = {}
for i in range(qtd_linhas):
pontos_medios = 0
pontos = []
if dataframe.loc[i]['Mais_de_um_Docente'] == 'Sim':
pontos.append(100*2)
if dataframe.loc[i]['Mais_de_um_grupo'] == 'Sim':
pontos.append(100*2)
if dataframe.loc[i]['natureza_do_uso'] == 'Experimentos, análises, reuniões,armazenamento de material e equipamentos':
pontos.append(100*2)
if dataframe.loc[i]['natureza_do_uso'] == 'Reuniões,armazenamento de material/equipamentos':
pontos.append(80*2)
if dataframe.loc[i]['natureza_do_uso'] == 'Reuniões':
pontos.append(40*2)
if dataframe.loc[i]['frequência_uso'] == 'Espaço utilizado esporadicamente':
pontos.append(20*2)
if dataframe.loc[i]['frequência_uso'] == 'Espaço utilizado regularmente':
pontos.append(100*2)
if dataframe.loc[i]['Cadastro_no_CNPQ'] == 'Sim':
pontos.append(100*2)
if dataframe.loc[i]['Cadastro_no_CNPQ'] == 'Não':
pontos.append(20*2)
pontos.append(dataframe.loc[i]['QtD_docentes_ocupantes']*100*2)
pontos.append(dataframe.loc[i]['QtD_docentes_ocupantes_simultaneamente']*20*2)
if dataframe.loc[i]['contiguidade'] == 'Sim':
pontos.append(100)
if dataframe.loc[i]['contiguidade'] == 'Não, o espaço é fracionado em diferentes locais na EACH':
pontos.append(50)
#umatodos os campos que precisam ser contabilizados pelo programa
pontos.append(dataframe.loc[i]['n_docentes_cred_pos_EACH']*100)
pontos.append(dataframe.loc[i]['n_docentes_cred_pos_fora_EACH']*50)
pontos.append(dataframe.loc[i]['n_outras_orientacoes']*10)
pontos.append(dataframe.loc[i]['Captacoes_financeiras']*100)
pontos.append(dataframe.loc[i]['alunos_pos_com_bolsa']*60)
pontos.append(dataframe.loc[i]['alunos_pos_sem_bolsa']*40)
pontos.append(dataframe.loc[i]['estagiarios_pos-doc_com_bolsa']*40)
pontos.append(dataframe.loc[i]['estagiarios_pos-doc_sem_bolsa']*20)
pontos.append(dataframe.loc[i]['n_profs_colaboradores']*30)
pontos.append(dataframe.loc[i]['n_profs_visitantes']*30)
pontos.append(dataframe.loc[i]['alunos_grad_com_bolsa']*40)
pontos.append(dataframe.loc[i]['alunos_grad_sem_bolsa']*20)
pontos.append(dataframe.loc[i]['ex_alunos_com_pos']*10)
pontos.append(dataframe.loc[i]['servidores_técnicos_secretários']*10)
pontos.append(dataframe.loc[i]['bolsa_prodt_1A']*100)
pontos.append(dataframe.loc[i]['bolsa_prodt_1B']*80)
pontos.append(dataframe.loc[i]['bolsa_prodt_1C']*60)
pontos.append(dataframe.loc[i]['bolsa_prodt_1D']*40)
pontos.append(dataframe.loc[i]['bolsa_prodt_2']*20)
pontos.append(dataframe.loc[i]['Qtd_Softwares']*60)
pontos.append(dataframe.loc[i]['n_patentes_grupo']*120)
pontos.append(dataframe.loc[i]['Quantidade_Prêmios_Honrarias']*50)
pontos.append(dataframe.loc[i]['produções_artist_culturais_A']*100)
pontos.append(dataframe.loc[i]['produções_artist_culturais_B']*60)
pontos.append(dataframe.loc[i]['produções_artist_culturais_C']*20)
pontos_medios = round(np.sum(pontos)/soma_pesos,2)
point.update({dataframe.loc[i]["Grupo"]: pontos_medios})
df = pd.DataFrame(data=point, index=['Pontos'])
return(df) |
24,895 | 735ab1e6ea9fdb9f54609cc4b2c24f08fc7011f5 | __author__ = "Foleevora"
import numpy as np
import cv2
import matplotlib.pyplot as plt
def showImage():
# 1. 이미지 로드
# imgfile = 'images/logo.jpg'
# img = cv2.imread(imgfile, cv2.IMREAD_COLOR)
# cv2.namedWindow('logo', cv2.WINDOW_NORMAL)
# cv2.imshow('logo', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# 2. 이미지 복사
# imgfile = 'images/logo.jpg'
# img = cv2.imread(imgfile, cv2.IMREAD_COLOR)
# cv2.imshow('logo', img)
#
# k = cv2.waitKey(0) & 0xFF
#
# if k == 27:
# cv2.destroyAllWindows()
# elif k == ord('c'):
# cv2.imwrite('images/logo_copy.jpg', img)
# cv2.destroyAllWindows()
# 3. matplotlib를 이용하여 이미지 로드
# imgfile = 'images/logo.jpg'
# img = cv2.imread(imgfile, cv2.IMREAD_GRAYSCALE)
#
# plt.imshow(img, cmap='gray', interpolation='bicubic')
# plt.xticks([])
# plt.yticks([])
# plt.title('logo')
# plt.show()
showImage()
|
24,896 | b451bd1e68f44545dd1a66a84bb7b0f788c9957a | import tensorflow as tf
import scanpy as sc
import numpy as np
import os
|
24,897 | a4dd71a2a4a482053089080114612e91712e79d7 | from binary_tree import *
"""
https://leetcode.com/problems/closest-binary-search-tree-value/
https://discuss.leetcode.com/topic/22590/4-7-lines-recursive-iterative-ruby-c-java-python
https://discuss.leetcode.com/topic/37526/clean-python-code
"""
def closest_value(root, target):
path = []
while root:
path.append(root.val)
root = root.left if target < root.val else root.right
return min(path[::-1], key=lambda x: abs(target - x))
def closest_value_2(root, target):
closest = root.val
while root:
if abs(root.val - target) < abs(closest - target):
closest = root.val
root = root.left if target < root.val else root.right
return closest
if __name__ == '__main__':
tree = BinaryTree(17)
tree.insert_left(5)
tree.insert_right(35)
tree.left.insert_left(2)
tree.right.insert_left(29)
print closest_value(tree, 1)
"""
17
/ \
5 35
/ /
2 29
"""
|
24,898 | 2cbdc4534d7e5bb5ee2b4e353f1f1c0d358f3303 | """
A python implementation of built in classes for looking at method signatures.
Modified on 2014-11-11
"""
class bool():
def __abs__(self, ???):
"""
abs(self)
"""
def __add__(self, ???):
"""
Return self+value.
"""
def __and__(self, ???):
"""
Return self&value.
"""
def __bool__(self, ???):
"""
self != 0
"""
def __ceil__(self, ???):
"""
Ceiling of an Integral returns itself.
"""
def __class__(self, ???):
"""
bool(x) -> bool
Returns True when the argument x is true, False otherwise.
The builtins True and False are the only two instances of the class bool.
The class bool is a subclass of the class int, and cannot be subclassed.
"""
def __delattr__(self, ???):
"""
Implement delattr(self, name).
"""
def __dir__(self, ???):
"""
__dir__() -> list
default dir() implementation
"""
def __divmod__(self, ???):
"""
Return divmod(self, value).
"""
def __doc__(self, ???):
"""
str(object='') -> str
str(bytes_or_buffer[, encoding[, errors]]) -> str
Create a new string object from the given object. If encoding or
errors is specified, then the object must expose a data buffer
that will be decoded using the given encoding and error handler.
Otherwise, returns the result of object.__str__() (if defined)
or repr(object).
encoding defaults to sys.getdefaultencoding().
errors defaults to 'strict'.
"""
def __eq__(self, ???):
"""
Return self==value.
"""
def __float__(self, ???):
"""
float(self)
"""
def __floor__(self, ???):
"""
Flooring an Integral returns itself.
"""
def __floordiv__(self, ???):
"""
Return self//value.
"""
def __format__(self, ???):
"""
None
"""
def __ge__(self, ???):
"""
Return self>=value.
"""
def __getattribute__(self, ???):
"""
Return getattr(self, name).
"""
def __getnewargs__(self, ???):
"""
None
"""
def __gt__(self, ???):
"""
Return self>value.
"""
def __hash__(self, ???):
"""
Return hash(self).
"""
def __index__(self, ???):
"""
Return self converted to an integer, if self is suitable for use as an index into a list.
"""
def __init__(self, ???):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
def __int__(self, ???):
"""
int(self)
"""
def __invert__(self, ???):
"""
~self
"""
def __le__(self, ???):
"""
Return self<=value.
"""
def __lshift__(self, ???):
"""
Return self<<value.
"""
def __lt__(self, ???):
"""
Return self<value.
"""
def __mod__(self, ???):
"""
Return self%value.
"""
def __mul__(self, ???):
"""
Return self*value.
"""
def __ne__(self, ???):
"""
Return self!=value.
"""
def __neg__(self, ???):
"""
-self
"""
def __new__(self, ???):
"""
Create and return a new object. See help(type) for accurate signature.
"""
def __or__(self, ???):
"""
Return self|value.
"""
def __pos__(self, ???):
"""
+self
"""
def __pow__(self, ???):
"""
Return pow(self, value, mod).
"""
def __radd__(self, ???):
"""
Return value+self.
"""
def __rand__(self, ???):
"""
Return value&self.
"""
def __rdivmod__(self, ???):
"""
Return divmod(value, self).
"""
def __reduce__(self, ???):
"""
helper for pickle
"""
def __reduce_ex__(self, ???):
"""
helper for pickle
"""
def __repr__(self, ???):
"""
Return repr(self).
"""
def __rfloordiv__(self, ???):
"""
Return value//self.
"""
def __rlshift__(self, ???):
"""
Return value<<self.
"""
def __rmod__(self, ???):
"""
Return value%self.
"""
def __rmul__(self, ???):
"""
Return value*self.
"""
def __ror__(self, ???):
"""
Return value|self.
"""
def __round__(self, ???):
"""
Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
def __rpow__(self, ???):
"""
Return pow(value, self, mod).
"""
def __rrshift__(self, ???):
"""
Return value>>self.
"""
def __rshift__(self, ???):
"""
Return self>>value.
"""
def __rsub__(self, ???):
"""
Return value-self.
"""
def __rtruediv__(self, ???):
"""
Return value/self.
"""
def __rxor__(self, ???):
"""
Return value^self.
"""
def __setattr__(self, ???):
"""
Implement setattr(self, name, value).
"""
def __sizeof__(self, ???):
"""
Returns size in memory, in bytes
"""
def __str__(self, ???):
"""
Return str(self).
"""
def __sub__(self, ???):
"""
Return self-value.
"""
def __subclasshook__(self, ???):
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
def __truediv__(self, ???):
"""
Return self/value.
"""
def __trunc__(self, ???):
"""
Truncating an Integral returns itself.
"""
def __xor__(self, ???):
"""
Return self^value.
"""
def bit_length(self, ???):
"""
int.bit_length() -> int
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
def conjugate(self, ???):
"""
Returns self, the complex conjugate of any int.
"""
def denominator(self, ???):
"""
int(x=0) -> integer
int(x, base=10) -> integer
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is a number, return x.__int__(). For floating point
numbers, this truncates towards zero.
If x is not a number or if base is given, then x must be a string,
bytes, or bytearray instance representing an integer literal in the
given base. The literal can be preceded by '+' or '-' and be surrounded
by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.
Base 0 means to interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def from_bytes(self, ???):
"""
int.from_bytes(bytes, byteorder, *, signed=False) -> int
Return the integer represented by the given array of bytes.
The bytes argument must either support the buffer protocol or be an
iterable object producing bytes. Bytes and bytearray are examples of
built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the native
byte order of the host system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument indicates whether two's complement is
used to represent the integer.
"""
def imag(self, ???):
"""
int(x=0) -> integer
int(x, base=10) -> integer
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is a number, return x.__int__(). For floating point
numbers, this truncates towards zero.
If x is not a number or if base is given, then x must be a string,
bytes, or bytearray instance representing an integer literal in the
given base. The literal can be preceded by '+' or '-' and be surrounded
by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.
Base 0 means to interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def numerator(self, ???):
"""
int(x=0) -> integer
int(x, base=10) -> integer
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is a number, return x.__int__(). For floating point
numbers, this truncates towards zero.
If x is not a number or if base is given, then x must be a string,
bytes, or bytearray instance representing an integer literal in the
given base. The literal can be preceded by '+' or '-' and be surrounded
by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.
Base 0 means to interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def real(self, ???):
"""
int(x=0) -> integer
int(x, base=10) -> integer
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is a number, return x.__int__(). For floating point
numbers, this truncates towards zero.
If x is not a number or if base is given, then x must be a string,
bytes, or bytearray instance representing an integer literal in the
given base. The literal can be preceded by '+' or '-' and be surrounded
by whitespace. The base defaults to 10. Valid bases are 0 and 2-36.
Base 0 means to interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def to_bytes(self, ???):
"""
int.to_bytes(length, byteorder, *, signed=False) -> bytes
Return an array of bytes representing an integer.
The integer is represented using length bytes. An OverflowError is
raised if the integer is not representable with the given number of
bytes.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the native
byte order of the host system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument determines whether two's complement is
used to represent the integer. If signed is False and a negative integer
is given, an OverflowError is raised.
"""
|
24,899 | 332a37ca6d9c298ce7d97bfb5349d2eebc3e4b17 | #!/usr/bin/env python3
'''
pack_manager.py
tkinter pack manager sample
for python3 training at www.jasaplus.com
'''
from tkinter import Tk,Button, Frame, LEFT, RIGHT, BOTTOM, TOP, RAISED, X, Y, BOTH
master = Tk()
master.title("Pack Manager")
master.geometry("600x400+300+10")
frame1 = Frame(master, relief=RAISED, borderwidth=1, bg='#3b6a12',height='200')
frame1.pack(fill=X)
#side option
btn1 = Button(frame1, text="Button 1")
btn1.pack(side=LEFT)
btn2 = Button(frame1, text="Button 2")
btn2.pack(side=RIGHT)
btn3 = Button(frame1, text="Button 3")
btn3.pack(side=TOP)
btn4 = Button(frame1, text="Button 4")
btn4.pack(side=BOTTOM)
frame2 = Frame(master, relief=RAISED, borderwidth=1, bg='#8bde41',height='200')
frame2.pack(fill=X)
#fill option
btn1x = Button(frame2, text="Button 1")
btn1x.pack(fill=X)
btn1x = Button(frame2, text="Button 1")
btn1x.pack(fill=None)
master.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.