index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,100 | f8d3cbbc4c356a877bba9695cdd01530f34ddcc0 | import op
n = 65
e = op.encode_num(n)
print(e)
d = op.decode_num(e)
print(d)
|
983,101 | f7e43849b46737c045ab9174b295b1ee5b373d40 | from django.conf.urls import patterns, include, url
from tastypie.api import Api
from api import *
v1_api = Api(api_name='v1')
v1_api.register(MyTagResource())
v1_api.register(UserProfileResource())
v1_api.register(MakeyResource())
v1_api.register(UserResource())
v1_api.register(LikeMakeyResource())
v1_api.register(TestResource())
v1_api.register(CommentResource())
v1_api.register(ProductResource())
v1_api.register(NoteResource())
v1_api.register(Note2Resource())
v1_api.register(DocumentationResource())
v1_api.register(TextDocumentationResource())
v1_api.register(ImageResource())
v1_api.register(LocationResource())
v1_api.register(ShopResource())
v1_api.register(VideoResource())
v1_api.register(StepResource())
v1_api.register(FileResource())
# product page resources
v1_api.register(ProductResourceProductPage())
v1_api.register(MakeyResourceProductPage())
v1_api.register(TutorialResourceProductPage())
v1_api.register(ShopUrlResourceProductPage())
v1_api.register(ImageResourceProductPage())
v1_api.register(DescriptionResourceProductPage())
v1_api.register(LikeResourceProductPage())
v1_api.register(ProductReviewResourceProductPage())
v1_api.register(VoteProductReviewResource())
v1_api.register(VoteTutorialResource())
v1_api.register(VoteMakeyResource())
# Landing page resources
v1_api.register(TopProductsResourceLandingPage())
v1_api.register(ProductResourceLandingPage())
v1_api.register(ShopResourceLandingPage())
v1_api.register(TopUsersResourceLandingPage())
v1_api.register(TopUsersResourceRecoSubPage())
v1_api.register(TopShopsResourceLandingPage())
v1_api.register(TopMakeysResourceLandingPage())
v1_api.register(TopSpacesResource())
v1_api.register(TopTagsResource())
#Cfi Store Resources
v1_api.register(LikeCfiStoreItemResource())
v1_api.register(CfiStoreItemResource())
v1_api.register(CfiStoreItemFullResource())
#Likes
v1_api.register(LikeShopResource())
v1_api.register(LikeNoteResource())
v1_api.register(LikeImageResource())
v1_api.register(LikeVideoResource())
v1_api.register(LikeCommentResource())
v1_api.register(LikeArticleResource())
#Store Page Resoureces
v1_api.register(ProductShopUrlResource())
v1_api.register(ShopReviewResource())
v1_api.register(VoteShopReviewResource())
v1_api.register(ShopResourceProductPage())
#User Interactions
v1_api.register(UserInteractionResource())
v1_api.register(LikeCfiStoreItemWithProductResource())
v1_api.register(LikeProductResourceWithProduct())
v1_api.register(LikeShopWithShopResource())
v1_api.register(VoteMakeyWithMakeyResource())
v1_api.register(TutorialResource())
v1_api.register(VoteTutorialWithTutorialResource())
v1_api.register(ProductReviewResource())
v1_api.register(VoteProductReviewWithProductResource())
v1_api.register(ShopReviewWithShopResource())
v1_api.register(VoteShopReviewWithReviewResource())
v1_api.register(ShopUrlClicks())
#Makey Page
v1_api.register(NewUserResource())
v1_api.register(NewProductResource())
v1_api.register(ProductResourceMakeyPage())
v1_api.register(FavoriteMakeyResource())
#Maker Page
v1_api.register(MakeyResourceMakerPage())
v1_api.register(SpaceResourceMakerPage())
#Search Page
v1_api.register(MakeyResourceSearchPage())
# Space Page
v1_api.register(SpaceResource())
v1_api.register(SpaceReviewResource())
v1_api.register(VoteSpaceReviewResource())
v1_api.register(InventoryResource())
v1_api.register(NewInventoryResource())
#Article Page
v1_api.register(ArticleResource())
v1_api.register(TagResource())
v1_api.register(LikeChannelResource())
#Listing
v1_api.register(ListingResource())
v1_api.register(LikeListingResource())
# Send Mail
# v1_api.register(SendMailResource())
# Forum
v1_api.register(QuestionResource())
v1_api.register(AnswerResource())
urlpatterns = patterns('',
url(r'', include(v1_api.urls)),
)
|
983,102 | 5b3ccd1899a49c27a3f405150a9d6391a60e68d2 | # coding=utf-8
n=int(input())
x=1
while(1):
X=x
for i in range(n-1):
x=(x/2)-1
if(x==1):
print(X)
break
else:
x=X+1 |
983,103 | 412b0bba6dfcf5e4c32a39ef22bdbb0aec0d905c | weight = int(input())
feet, inches = [int(i) for i in input().split()]
bmi = round(703 * (weight / ((feet * 12) + inches) ** 2), 1)
print(bmi)
if bmi >= 30:
print('Obese')
elif bmi >= 25:
print('Overweight')
elif bmi >= 18.5:
print('Normal')
elif bmi < 18.5:
print('Underweight') |
983,104 | 34aa5753e11cd5a715c42aa6945494877d059b57 | import unittest
import os
import logging
import datetime
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.clients.binance import enums
from cryptoxlib.clients.binance.BinanceWebsocket import CandlestickSubscription
from cryptoxlib.Pair import Pair
from CryptoXLibTest import CryptoXLibTest, WsMessageCounter
api_key = os.environ['BINANCEAPIKEY']
sec_key = os.environ['BINANCESECKEY']
class BinanceWs(CryptoXLibTest):
@classmethod
def initialize(cls) -> None:
cls.print_logs = True
cls.log_level = logging.DEBUG
async def init_test(self):
self.client = CryptoXLib.create_binance_client(api_key, sec_key)
#@unittest.expectedFailure
async def test_candlesticks_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
CandlestickSubscription(Pair("BTC", "USDT"), enums.CandelstickInterval.I_1MIN,
callbacks = [message_counter.generate_callback(1)])
])
await self.assertWsMessageCount(message_counter, 15.0)
if __name__ == '__main__':
unittest.main() |
983,105 | 1400299bec1755aef448e34b097bbac52e9120ef | import socket
import threading
from _thread import *
import pygame
from game import Game
# ustawiamy tak server, żeby przyjmował wszystko i port dowolny
server = ''
port = 7777
# utworzenie i zbidnowanie socketu, a następnie nasłuchiwanie
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
str(e)
s.listen(2)
print("Waiting for a connection, Server Started")
# czytamy sobie wiadomosc rodzielajac string wspolrzednych na pojedyncze wspolrzedne
def read_pos(str):
print(str)
str = str.split(",")
return int(str[0]), int(str[1])
# funkcja ktora odpowiada za ruch kropki
def move(data, player):
if data == (0, 1):
theGame.players[player][1] = theGame.players[player][1] + 2 * theGame.speed
if theGame.blocked(theGame.maze.maze, theGame.players[player][0], theGame.players[player][1]):
theGame.players[player][1] = theGame.players[player][1] - 2 * theGame.speed
if data == (0, -1):
theGame.players[player][1] = theGame.players[player][1] - 2 * theGame.speed
if theGame.blocked(theGame.maze.maze, theGame.players[player][0], theGame.players[player][1]):
theGame.players[player][1] = theGame.players[player][1] + 2 * theGame.speed
if data == (1, 0):
theGame.players[player][0] = theGame.players[player][0] + 2 * theGame.speed
if theGame.blocked(theGame.maze.maze, theGame.players[player][0], theGame.players[player][1]):
theGame.players[player][0] = theGame.players[player][0] - 2 * theGame.speed
if data == (-1, 0):
theGame.players[player][0] = theGame.players[player][0] - 2 * theGame.speed
if theGame.blocked(theGame.maze.maze, theGame.players[player][0], theGame.players[player][1]):
theGame.players[player][0] = theGame.players[player][0] + 2 * theGame.speed
# wątek odpowiedzialny za obsługe klienta tworzymy jeden dla każdego klienta
def threaded_client(conn):
nick = conn.recv(2048).decode()
i = 1
# tutaj zapobiegamy powtórzeniu nicków poprzez dodanie liczby do nicku
while nick in theGame.players.keys():
nick += str(i)
# generujemy kolor
color = theGame.colorGenerator()
print(color)
x, y = theGame.get_initpos()
# ustawiamy podstawowe atrybuty graczy, które przechowujemy w słowniku
theGame.players[nick] = [x, y, color, theGame.getStartPoints()]
while True:
try:
data = read_pos(conn.recv(2048).decode())
if data[0] == 0 and data[1] == 0:
break
move(data, nick)
# tutaj odsłyłamy cokolwiek, ponieważ bez tego odsyłania przychodziło wiecęj wiadomosci niż program czytał
# i nagle musiał przeczytać za dużo, przy używaniu sleepa wystepowały laggi
conn.send(str.encode("2"))
if not data:
print("Disconnected")
break
except:
break
del theGame.players[nick]
print("Lost connection")
conn.close()
# wątek odpowiedzialny za samą gre
def runGame():
theGame.maze.generate_maze()
theGame.on_init()
theGame.name = ''
while True:
pygame.event.pump()
theGame.on_render()
# w mainie przyjmujemy wiadomość i tworzymy wątek dla każdego klienta
if __name__ == "__main__":
theGame = Game()
thread = threading.Thread(target=runGame)
thread.start()
while True:
conn, addr = s.accept()
print("Connected to:", addr)
start_new_thread(threaded_client, (conn,))
|
983,106 | d7d424841331b5e16d8ba546e237b7d4723f8394 | import pytest
from ..compare_version_strings import compare_versions, prepare_comparison_result
class TestClass:
class TestCompareVersion:
def test_versions_same_length_first_bigger_case1(self):
assert compare_versions('1.2.1', '1.2.0') > 0,\
"Should return a +ve number if first version is bigger than the second with same length [case 1]"
def test_versions_same_length_first_bigger_case2(self):
assert compare_versions('1.4.1.0', '1.4.0.6') > 0,\
"Should return a +ve number if first version is bigger than the second with same length [case 2]"
def test_versions_same_length_first_bigger_case3(self):
assert compare_versions('6', '2') > 0,\
"Should return a +ve number if first version is bigger than the second with same length [case 3]"
def test_versions_same_length_second_bigger_case1(self):
assert compare_versions('1.2.0', ' 1.2.1') < 0,\
"Should return a +ve number if first version is smaller than the second with same length [case 1]"
def test_versions_same_length_second_bigger_case2(self):
assert compare_versions('1.4.0.6', '1.4.1.0') < 0,\
"Should return a -ve number if first version is smaller than the second with same length [case 2]"
def test_versions_same_length_second_bigger_case3(self):
assert compare_versions('2', '6') < 0,\
"Should return a -ve number if first version is smaller than the second with same length [case 3]"
def test_versions_same_length_equals(self):
assert compare_versions('1.2.3.4', '1.2.3.4') == 0,\
"Should return zero if are equal with same length"
def test_versions_different_length_first_bigger_case1(self):
assert compare_versions('1.2.1.0.0.0.1', '1.2.0.2.9.0.1.2.1.2.3.5') > 0,\
"Should return a +ve number if first version is bigger than the second with different length [case 1]"
def test_versions_different_length_first_bigger_case2(self):
assert compare_versions('1.4.1.0.0.0.0.0.0', '1.4.0.6') > 0,\
"Should return a +ve number if first version is bigger than the second with different length [case 2]"
def test_versions_different_length_first_bigger_case3(self):
assert compare_versions('6', '1.0.0.0.0') > 0,\
"Should return a +ve number if first version is bigger than the second with different length [case 3]"
def test_versions_different_length_second_bigger_case1(self):
assert compare_versions('1.2.0.2.2.3.5.7.1', '1.2.1.0.0.1') < 0,\
"Should return a -ve number if first version is smaller than the second with different length [case 1]"
def test_versions_different_length_second_bigger_case2(self):
assert compare_versions('1.4.0.6.0.0', '1.4.1.0.0.9.0.5') < 0,\
"Should return a -ve number if first version is smaller than the second with different length [case 2]"
def test_versions_different_length_second_bigger_case3(self):
assert compare_versions('1.0', '5.9.8.5.2') < 0,\
"Should return a -ve number if first version is smaller than the second with different length [case 3]"
def test_versions_different_length_equals_case1(self):
assert compare_versions('1.2.3.0.0.0.0.0.0', '1.2.3') == 0,\
"Should return zero if are equal with different length [case 1]"
def test_versions_different_length_equals_case2(self):
assert compare_versions('1.2.3.0.0.1.0', '1.2.3.0.0.1.0.0.0') == 0,\
"Should return zero if are equal with different length [case 2]"
def test_should_raise_error_if_not_string_parameter(self):
with pytest.raises(AttributeError):
compare_versions('1', 3.6)
def test_should_raise_error_if_unexpected_character(self):
with pytest.raises(ValueError):
compare_versions('2.3.d.1', '2.b.4.a')
class TestPrepareComparisonResult:
def test_versions_comparison_result_string_case1(self):
assert prepare_comparison_result('1.2.3', '1.2.3') == "'1.2.3' is equal to '1.2.3'"
def test_versions_comparison_result_string_case2(self):
assert prepare_comparison_result('1', '5') == "'1' is smaller than '5'"
def test_versions_comparison_result_string_case3(self):
assert prepare_comparison_result(
'1.2.3.0.0.1.0', '1.2.3.0.0.1.0.0.0'
) == "'1.2.3.0.0.1.0' is equal to '1.2.3.0.0.1.0.0.0'"
def test_versions_comparison_result_string_case4(self):
assert prepare_comparison_result('1.2.3.0.0.0.0.0.0', '1.2.3') == "'1.2.3.0.0.0.0.0.0' is equal to '1.2.3'"
def test_versions_comparison_result_string_case5(self):
assert prepare_comparison_result('1.0', '5.9.8.5.2') == "'1.0' is smaller than '5.9.8.5.2'"
def test_versions_comparison_result_string_case6(self):
assert prepare_comparison_result(
'1.4.0.6.0.0', '1.4.1.0.0.9.0.5'
) == "'1.4.0.6.0.0' is smaller than '1.4.1.0.0.9.0.5'"
def test_versions_comparison_result_string_case7(self):
assert prepare_comparison_result(
'1.2.0.2.2.3.5.7.1', '1.2.1.0.0.1'
) == "'1.2.0.2.2.3.5.7.1' is smaller than '1.2.1.0.0.1'"
def test_versions_comparison_result_string_case8(self):
assert prepare_comparison_result('5', '1.0.0.0.0') == "'5' is greater than '1.0.0.0.0'"
def test_versions_comparison_result_string_case9(self):
assert prepare_comparison_result(
'1.4.1.0.0.0.0.0.0', '1.4.0.6'
) == "'1.4.1.0.0.0.0.0.0' is greater than '1.4.0.6'"
def test_versions_comparison_result_string_case9(self):
assert prepare_comparison_result(
'1.2.1.0.0.0.1', '1.2.0.2.9.0.1.2.1.2.3.5'
) == "'1.2.1.0.0.0.1' is greater than '1.2.0.2.9.0.1.2.1.2.3.5'"
def test_versions_comparison_result_string_case10(self):
assert prepare_comparison_result('1.4.0.6', '1.4.1.0') == "'1.4.0.6' is smaller than '1.4.1.0'"
def test_versions_comparison_result_string_case11(self):
assert prepare_comparison_result('1.2.0', '1.2.1') == "'1.2.0' is smaller than '1.2.1'"
def test_versions_comparison_result_string_case12(self):
assert prepare_comparison_result('5', '1') == "'5' is greater than '1'"
def test_versions_comparison_result_string_case13(self):
assert prepare_comparison_result('1.4.1.0', '1.4.0.6') == "'1.4.1.0' is greater than '1.4.0.6'"
def test_versions_comparison_result_string_case14(self):
assert prepare_comparison_result('1.2.1', '1.2.0') == "'1.2.1' is greater than '1.2.0'"
def test_versions_comparison_result_string_case15(self):
with pytest.raises(AttributeError):
prepare_comparison_result('1', 2.5)
def test_versions_comparison_result_string_case16(self):
with pytest.raises(ValueError):
prepare_comparison_result('1.2.c', '1.b.1.a')
|
983,107 | a4c5d2a7542adc32b0e6326fe7ee400ac23ad605 |
# coding: utf-8
# In[1]:
import utils
import h5py
import numpy as np
import math
import tensorflow as tf
# In[2]:
def rotate(sample,theta):
c=math.cos(theta)
s=math.sin(theta)
affine=np.matrix([[c,-s,0],[s,c,0],[0,0,1]])
sample=np.matmul(sample,affine)
affine=np.matrix([[1,0,0],[0,c,-s],[0,s,c]])
sample=np.matmul(sample,affine)
affine=np.matrix([[c,0,s],[0,1,0],[-s,0,c]])
sample=np.matmul(sample,affine)
return sample
def jitter(data):
noise=np.random.normal(0,0.1,(2048,3))
return data+noise
def rotateall(data,label):
list=[]
labels=[]
for x,y in zip(data,label):
list.append(rotate(x,math.pi/4.0))
# list.append(rotate(x,math.pi/2.0))
list.append(jitter(x))
list.append(rotate(x,math.pi*3.0/4))
labels.extend([y for i in range(3)])
return list,labels
# list.append(rotate(x,math.pi))
# In[3]:
def load_h5(filepath,flag):
"""
Data loader function.
Input: The path of h5 filename
Output: A tuple of (data,label)
"""
data=[]
label=[]
file=open(filepath)
for h5_filename in file:
h5_filename=h5_filename.strip()
# print(h5_filename)
f = h5py.File(h5_filename)
data.extend( f['data'][:])
label.extend(f['label'][:])
if flag:
points,labels=rotateall(f['data'][:],f['label'][:])
data.extend(points)
label.extend(labels)
return (data, label)
# In[4]:
data=load_h5("data/modelnet40_ply_hdf5_2048/train_files.txt",True)
# In[5]:
def one_hot(label):
output=[]
for x in label:
temp=[0]*40
# print(x)
temp[x[0]]=1
output.append(temp)
return output
# In[6]:
def create_batch(data,batch_size):
batchtrainData=[]
batchtrainLabel=[]
trainingData,trainingLabel=data
trainingLabel=one_hot(trainingLabel)
n=len(trainingData)
for i in range (int(n/batch_size)):
batchtrainData.append(trainingData[batch_size*i:min(batch_size*(i+1),n)])
batchtrainLabel.append(trainingLabel[batch_size*i:min(batch_size*(i+1),n)])
return np.array(batchtrainData),np.array(batchtrainLabel)
# In[7]:
x,y=data
len(x)
# sess=data[0][0]
# In[8]:
def createConvLayer(input_layer,flag,layer,inputsize,size):
with tf.variable_scope(layer):
conv1 = tf.layers.conv2d(inputs=input_layer,filters=size,kernel_size=[1, inputsize],padding="Valid",activation=tf.nn.relu)
conv_bn = tf.contrib.layers.batch_norm(conv1, data_format='NHWC',center=True, scale=True,is_training=flag,scope='bn')
return conv_bn
def createDenseCell(x,flag,size,layer):
with tf.variable_scope(layer):
h1 = tf.contrib.layers.fully_connected(x, size,activation_fn=tf.nn.relu,scope='dense')
h2 = tf.contrib.layers.batch_norm(h1,center=True, scale=True,is_training=flag,scope='bn')
return h2
def classificationLayer(flag,input,layers):
output=createDenseCell(input,flag,512,layers[0])
output2=createDenseCell(output,flag,256,layers[1])
output3=createDenseCell(output,flag,40,layers[2])
return output3
def getLayer(input_image,flag,mlp,n):
new_points=input_image
# number_points=input_image.shape[1]
for i, num_out_channel in enumerate(mlp,n):
inputsize=new_points.shape[2]
new_points = createConvLayer(new_points,flag,'conv%d'%(i),inputsize,num_out_channel)
# print(new_points.shape,number_points)
new_points=tf.reshape(new_points,[-1,number_points,num_out_channel,1])
return new_points
def tnet(input_image,flag,size,n):
mlp1=getLayer(input_image,flag,[64,64,128,1024],n)
# print(mlp1.shape)
pooled = tf.nn.max_pool(mlp1,
ksize=[1, number_points, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
featurevec=tf.reshape(pooled,[-1,1024])
# print(featurevec.shape)
output1=createDenseCell(featurevec,flag,int(512*int(math.sqrt(size))),"layers%d"%(size))
output2 = tf.contrib.layers.fully_connected(output1, 256*(int(math.sqrt(size))),activation_fn=tf.nn.relu)
matrix=tf.contrib.layers.fully_connected(output1, size*size,activation_fn=tf.nn.relu)
matrix=tf.reshape(matrix,[-1,size,size])
return matrix
# In[ ]:
# In[9]:
batch_size=20
number_points=2048
label_number=40
with tf.device('/gpu:0'):
tf.reset_default_graph()
point_cloud = tf.placeholder(tf.float32,
shape=(batch_size, number_points, 3))
flag=tf.placeholder(tf.bool,name='flag')
tf_train_labels = tf.placeholder(tf.float32,
shape=(batch_size, label_number))
input_image = tf.expand_dims(point_cloud, -1)
matrix=tnet(input_image,flag,3,0)
transformed_input=tf.matmul(point_cloud,matrix)
input_image = tf.expand_dims(transformed_input, -1)
mlp1=getLayer(input_image,flag,[64,64],5)
f_matrix=tnet(mlp1,flag,64,10)
temp_feat=tf.reshape(mlp1,[-1,number_points,64])
transformed_feat=tf.matmul(temp_feat,f_matrix)
transformed_feat=tf.expand_dims(transformed_feat,-1)
mlp2=getLayer(transformed_feat,flag,[64,128,1024],18)
pooled = tf.nn.max_pool(mlp2,
ksize=[1, number_points, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
featurevec=tf.reshape(pooled,[-1,1024])
# print(featurevec.shape)
logits=classificationLayer(flag,featurevec,["layer5","layer6","layer7"])
# In[ ]:
with tf.device('/gpu:0'):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tf_train_labels, 1), tf.argmax(logits, 1)),'float32'))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(0.001).minimize(loss)
# In[ ]:
epoch=200
# sess=tf.InteractiveSession()
# output=open("output.txt",'w')
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
batch_traindata,batch_trainlabel=create_batch(data,batch_size)
# print(len(train_batch))
# x_valid,y_valid=mnist.validation.next_batch(5000)
allLoss=[]
import time
a = time.time()
with sess.as_default():
tf.global_variables_initializer().run()
for i in range(epoch):
running_loss=0
running_acc=0
for x_train,y_train in zip(batch_traindata,batch_trainlabel):
# print(y_train.shape)
t,loss_t,acc=sess.run([optimizer,loss,accuracy], feed_dict={point_cloud:x_train,tf_train_labels:y_train,flag:True})
running_loss+=loss_t
running_acc+=acc
print (running_loss,running_acc/len(batch_traindata))
print (time.time() -a)
# In[ ]:
testdata=load_h5("data/modelnet40_ply_hdf5_2048/test_files.txt",False)
batch_testdata,batch_testlabel=create_batch(testdata,batch_size)
print(np.random.random((2048,3)).shape)
# batch_testdata[0][6]-=2*np.ones((2048,3))
def getAccuracy(batchpoints,batchlabel):
running_acc=0
for x,y in zip(batchpoints,batchlabel):
# print(y_train.shape)
acc=sess.run(accuracy, feed_dict={point_cloud:x,tf_train_labels:y,flag:False})
# print(acc)
running_acc+=acc
return running_acc
print(getAccuracy(batch_testdata,batch_testlabel))
# In[ ]:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def Visualize_3D(x, y, z):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x, y, z)
plt.show()
image=x[6]
print(image.shape)
# image=transform[5]
Visualize_3D(image[:,0],image[:,1],image[:,2])
# print("Test Accuracy: ",getAccuracy(batch_testdata,batch_testlabel))
# In[ ]:
image=transform[6]
# image=x[]
Visualize_3D(image[:,0],image[:,1],image[:,2])
# In[ ]:
|
983,108 | 9855826d44eeb5a4a26a2ad4c0750cc10a4e7dc9 | from collections import Counter
def anagram(word1,word2):
var = False
if len(word1) != len(word2):
return False
else:
list1 = list(word1)
list2 = list(word2)
list1.sort()
list2.sort()
if list1 == list2:
return True
else:
return False
def checkword(word1, word2):
list1 = list(word1)
list2 = list(word2)
a = Counter(list1) - Counter(list2)
sum = 0
for i,j in a.items():
sum = sum+j
return sum
def check(word):
var = 0
leng = len(word)
if leng%2 != 0:
var = -1
return var
word1 = word[:leng/2]
word2 = word[leng/2:]
if anagram(word1, word2) == True:
var = 0
return var
else :
var = checkword(word1, word2)
return var
m = int(raw_input())
list1 = []
for i in range(0,m):
a = raw_input()
list1.append(a)
for char in list1:
res = check(char)
print res
|
983,109 | c4b6dfcc57edeebb632be2afbf8917068124d8ad | import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--d_model', type=int, default=0, help='d_model')
parser.add_argument('--d_head', type=int, default=2, help='head')
parser.add_argument('--d_inner', type=bool, default=True, help='inner layers')
parser.add_argument('--n_token', type=str, default='roberta-base', help='number of tokens')
parser.add_argument('--n_layer', type=str, default='gru', help='number of hidden layers')
parser.add_argument('--n_head', type=int, default=2, help='num attention heads')
parser.add_argument('--dropout', type=int, default=1024, help='dropout')
parser.add_argument('--dropatt', type=int, default=0.5, help='dropatt')
parser.add_argument('--attention_dropout_prob', type=int, default=1024, help='attention_dropout_prob')
parser.add_argument('--output_dropout_prob', type=int, default=0.5, help='output_dropout_prob')
args = parser.parse_args()
args = vars(args)
|
983,110 | 21b359fd5f0a440fb20f7516650c775a607c5e55 | a, b = map(int, input().split())
mok = a // b
sub = a % b
print(mok, sub, end=" ")
|
983,111 | 8a1252479c163a3cb0f846cb8b95ca06f4bf56c7 | from scipy.stats import skew
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
import fuzzywuzzy
def skew_kurtosis_value(df, feature):
""" Function for skewness and kurtosis value
Args:
df (str): The dataframe (input dataset)
feature (str): The target variable or any feature
Returns:
Skewness and kurtosis values
"""
skewness = df[feature].skew()
kurtosis = df[feature].kurt()
print("Skewness: {}".format(round(skewness, 2)))
if skewness > 0:
print("Positive/right skewness: mean and median > mode.")
else:
print("Negative/left skewness: mean and median < mode")
print("\nKurtosis: {}".format(round(kurtosis, 2)))
if kurtosis > 3:
print("Leptokurtic: more outliers")
else:
print("Platykurtic: less outliers")
def duplicate_drop(df):
# Check for any duplicates left
data_dup_drop = df.drop_duplicates()
print(df.shape)
print(data_dup_drop.shape)
print("Number of duplicates dropped: ")
print("Rows: {}".format(df.shape[0] - data_dup_drop.shape[0]))
print("Columns: {}".format(df.shape[1] - data_dup_drop.shape[1]))
def repetitive(df):
"""Find features with above 95% repeated values"""
total_rows = df.shape[0]
for col in df.columns:
count = df[col].value_counts(dropna=False)
high_percent = (count/total_rows).iloc[0]
if high_percent > 0.95:
print('{0}: {1:.1f}%'.format(col, high_percent*100))
print(count)
print()
def skewness_list(df):
num_feat = df.dtypes[df.dtypes != "object"].index
skewed_num_feat = df[num_feat].apply(lambda x: skew(x)).sort_values(ascending=False)
return skewed_num_feat
def fix_skewness(df):
"""Fix skewness in dataframe
Args:
df (str): The dataframe (input dataset)
Returns:
df (str): Fixed skewness dataframe
"""
# Skewness of all numerical features
num_feat = df.dtypes[df.dtypes != "object"].index
skewed_num_feat = df[num_feat].apply(lambda x: skew(x)).sort_values(ascending=False)
high_skew = skewed_num_feat[abs(skewed_num_feat) > 0.5].index # high skewed if skewness above 0.5
# Use boxocx transformation to fix skewness
for feat in high_skew:
df[feat] = boxcox1p(df[feat], boxcox_normmax(df[feat] + 1))
def inconsistent_feature_check(df, feature):
print(df[feature].value_counts())
# View all the different classes
feature = df[feature].unique()
feature.sort()
return feature
def capitalization_fix(df, feature):
# Change everything to lower case for consistency and precised value placement
df[feature] = df[feature].str.lower()
# Remove trailing whitespaces (in case)
df[feature] = df[feature].str.strip()
def replace_matches(df, feature, class_to_match, min_ratio):
# List of classes in feature
list_class = df[feature].unique()
# Top 10 closest matches
matches = fuzzywuzzy.process.extract(class_to_match, list_class,
limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)
# Matches with a high ratio (set by user)
close_matches = [m[0] for m in matches if m[1] >= min_ratio]
# Rows of all the close matches in our dataframe
rows_matches = df[feature].isin(close_matches)
# Replace all rows with close matches with the input matches
df.loc[rows_matches, feature] = class_to_match
print("REPLACED!")
def overfit_features(df):
"""Find a list of features that are overfitted"""
overfit = []
for col in df.columns:
counts = df[col].value_counts().iloc[0]
if counts / len(df)*100 > 99.94:
overfit.append(col)
return overfit
|
983,112 | 0079a8bc594c0ede0c5beebb30d17b54a7fafa9c | number = 2520
numberNotFound = True
test = 0
while numberNotFound:
for i in range(1,20):
if number % i != 0:
number += 1
test = 0
else:
test += 1
if test == 20:
print(number)
|
983,113 | d9ceb3fec50fc31cc4638be88b9218523088d27a | from flask import Flask, render_template, request, redirect, flash, send_from_directory
from datetime import datetime
import os
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'POST':
name = request.form.get('name') if request.form.get('name') else ""
address = request.form.get('address') if request.form.get('address') else ""
load_kw = float(request.form.get('loadKW'))
load_hp = float(request.form.get('loadHP'))
previous_units = float(request.form.get('previousConsumption'))
current_units = float(request.form.get('currentConsumption'))
units = int(current_units - previous_units)
billing_type = request.form.get('billingOptionsRadios')
start_date = datetime.strptime(request.form.get('startdate'), '%Y-%m-%d').strftime('%d/%m/%Y')
end_date = datetime.strptime(request.form.get('enddate'), '%Y-%m-%d').strftime('%d/%m/%Y')
contracted_load = (0.746 * load_hp) + load_kw
contracted_load = round(contracted_load * 2) / 2 # rounding to nearest 0.5
cca = 0
ccb = 0
if billing_type == "Commercial":
if 0 < contracted_load > 1:
cca = contracted_load * 80
else:
if 0 < contracted_load > 1:
cca = 1 * 60
ccb = (contracted_load - 1) * 70
else:
cca = contracted_load * 60
slab_rates = set_slab_rates(billing_type)
slabs = calculate_slabs(units, slab_rates, billing_type)
tax = round(((slabs[0][0] + slabs[1][0] + slabs[2][0] + slabs[3][0] ) * 9)/100, 2)
fuel_cess = round(units * 0.29, 2)
discount = round(tax + fuel_cess, 2)
total = round(cca+ccb+slabs[0][0] + slabs[1][0] + slabs[2][0] + slabs[3][0] + discount, 2)
net_bill = round(cca+ccb+slabs[0][0] + slabs[1][0] + slabs[2][0] + slabs[3][0] , 2)
return render_template('submitted.html', name=name, address=address, previousConsumption=previous_units,
currentConsumption = current_units, load_kw=load_kw, load_hp=load_hp,
units=units, discount = format(discount, '.2f'), billing_type=billing_type,
cca=format(cca, '.2f'), ccb=format(ccb, '.2f'),
slab1=format(slabs[0][0], '.2f'), slab1_units=slabs[0][1],
slab2=format(slabs[1][0], '.2f'), slab2_units=slabs[1][1],
slab3=format(slabs[2][0], '.2f'), slab3_units=slabs[2][1],
slab4=format(slabs[3][0], '.2f'), slab4_units=slabs[3][1],
fuelcess=format(fuel_cess, '.2f'), tax=format(tax, '.2f'),
netbill=format(net_bill, '.2f'), contracted_load=round(contracted_load,1),
total = format(total, '.2f'),
slab1rate=format(slab_rates[0], '.2f'), slab2rate=format(slab_rates[1], '.2f'),
slab3rate=format(slab_rates[2], '.2f'), slab4rate=format(slab_rates[3], '.2f'),
startdate = start_date, enddate = end_date )
else:
return render_template('home.html')
def set_slab_rates(billing_type):
if billing_type=="Urban":
slab1_rate = 3.75
slab2_rate = 5.20
slab3_rate = 6.75
slab4_rate = 7.80
elif billing_type == "Rural":
slab1_rate = 3.65
slab2_rate = 4.90
slab3_rate = 6.45
slab4_rate = 7.30
elif billing_type=="Commercial":
slab1_rate = 8.00
slab2_rate = 9.00
slab3_rate = 0
slab4_rate = 0
return([slab1_rate, slab2_rate, slab3_rate, slab4_rate])
def calculate_slabs(units, slab_rates, billing_type):
if billing_type == "Urban" or billing_type == "Rural":
slab1 = 0
slab1_units = 0
slab2 = 0
slab2_units = 0
slab3 = 0
slab3_units = 0
slab4 = 0
slab4_units = 0
if units > 30:
slab1 = 30 * slab_rates[0]
slab1_units = 30
units = units - 30
if units > 70:
slab2 = 70 * slab_rates[1]
slab2_units = 70
units = units - 70
if units > 100:
slab3 = 100 * slab_rates[2]
slab3_units = 100
units = units - 100
if units > 0:
slab4 = 100 * slab_rates[3]
slab4_units = units
else:
slab3_units = units
slab3 = units * slab_rates[2]
else:
slab2_units = units
slab2 = units * slab_rates[1]
else:
slab1_units = units
slab1 = units * slab_rates[0]
return([[round(slab1, 2), slab1_units], [round(slab2, 2), slab2_units], [round(slab3, 2), slab3_units],
[round(slab4, 2), slab4_units]])
else:
slab1 = 0
slab2 = 0
if units > 50:
slab1 = 50 * slab_rates[0]
slab1_units = 50
units = units - 50
if units > 0:
slab2_units = units
slab2 = units * slab_rates[1]
else:
slab2_units = units
slab1 = units * slab_rates[0]
return([[round(slab1, 2), slab1_units], [round(slab2, 2), slab2_units], [0, 0], [0, 0]])
if __name__ == '__main__':
app.run(debug=True)
|
983,114 | b14b656a2a362c159056af8543e83b045641bc5b | from PIL import Image
import scipy
import numpy
import scipy.ndimage
class process(object):
def __init__(self):
self.sigma = 1.6
def createdog(self,imagearr):
"""
return a list of image arrays containning four octaves,
each ovtives has four dog image arrays
"""
re = [0,1,2,3]
re[0] = self.diff(self.gs_blur(self.sigma,imagearr))
for i in range(1,4):
base = self.sampling(re[i-1][2])
re[i] = self.diff(self.gs_blur(self.sigma, base))
return re
def diff(self,images):
"""
generate 4 difference gaussian images
input: a list of images in array form, the number of the list is five
return: a list contains four images in image form, which are generated
by the gaussian difference operation.
"""
diffArray = [0,1,2,3]
# compute the difference bewteen two adjacent images in the same ovtave
for i in range(1,5):
diffArray[i-1] = images[i]-images[i-1]
return numpy.array(diffArray)
def gs_blur(self,k,img):
"""
use gaussina blur to generate five images in different sigma value
input: a k as constant, and an image in array form
return: a list contains five images in image form which are blurred
"""
SIG = self.sigma
sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]
gsArray = [0,1,2,3,4]
scaleImages = [0,1,2,3,4]
for i in range(5):
gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])
return gsArray
def normalize(self,arr):
"""
normalize the pixel intensity
"""
arr = arr/(arr.max()/255.0)
return arr
def sampling(self,arr):
"""
do the equal-distance sampling to resize the oringal
image to its 1/4
input: an image in image form
return: a shrinked image in image form
"""
H=0
W=0
if arr.shape[0]%2 == 0:
H = arr.shape[0]/2
else:
H = 1+arr.shape[0]/2
if arr.shape[1]%2 == 0:
W = arr.shape[1]/2
else:
W = 1+arr.shape[1]/2
new_arr = numpy.zeros((H,W),dtype = numpy.int)
for i in range(H):
for j in range(W):
new_arr[i][j] = arr[2*i][2*j]
return new_arr
|
983,115 | 01f23d7c90bd6b8eb6443ed9b8c5c29703c9c304 | """
Resolve Annotator Classes in the Pipeline to Extractor Configs and Methods.
Each Spark NLP Annotator Class is mapped to at least one
Every Annotator should have 2 configs. Some might offor multuple configs/method pairs, based on model_anno_obj/NLP reference.
- default/minimalistic -> Just the results of the annotations, no confidences or extra metadata
- with meta -> A config that leverages white/black list and gets the most relevant metadata
- with positions -> With Begins/Ends
"""
from sparknlp.annotator import *
from sparknlp.base import *
from nlu.pipe.extractors.extractor_configs_OS import *
OS_anno2config = {
NerConverter: {
'default': default_ner_converter_config,
'default_full': default_full_config,
},
MultiClassifierDLModel: {
'default': default_multi_classifier_dl_config,
'default_full': default_full_config,
},
PerceptronModel: {
'default': default_POS_config,
'default_full': default_full_config,
},
ClassifierDLModel: {
'default': default_classifier_dl_config,
'default_full': default_full_config,
},
BertEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
BertForSequenceClassification : {
'default': default_classifier_dl_config,
'default_full': default_full_config,
},
DistilBertForSequenceClassification : {
'default': default_classifier_dl_config,
'default_full': default_full_config,
},
BertForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
DistilBertForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
RoBertaForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
XlmRoBertaForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
XlnetForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
AlbertForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
LongformerForTokenClassification: {
'default': default_token_classifier_config,
'default_full': default_full_config,
},
LongformerEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
AlbertEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
XlnetEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
RoBertaEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
XlmRoBertaEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
DistilBertEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
WordEmbeddingsModel: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
ElmoEmbeddings: {
'default': default_word_embedding_config,
'default_full': default_full_config,
},
BertSentenceEmbeddings: {
'default': default_sentence_embedding_config,
'default_full': default_full_config,
},
XlmRoBertaSentenceEmbeddings: {
'default': default_sentence_embedding_config,
'default_full': default_full_config,
},
Doc2VecModel: {
'default': default_sentence_embedding_config,
'default_full': default_full_config,
},
Doc2VecApproach: {
'default': default_sentence_embedding_config,
'default_full': default_full_config,
},
UniversalSentenceEncoder: {
'default': default_sentence_embedding_config,
'default_full': default_full_config,
},
SentenceEmbeddings: {
'default': default_sentence_embedding_config,
'default_full': default_full_config,
},
Tokenizer: {
'default': default_tokenizer_config,
'default_full': default_full_config,
},
TokenizerModel: {
'default': default_tokenizer_config,
'default_full': default_full_config,
},
RegexTokenizer: {
'default': default_tokenizer_config,
'default_full': default_full_config,
},
DocumentAssembler: {
'default': default_document_config,
'default_full': default_full_config,
},
SentenceDetectorDLModel: {
'default': default_sentence_detector_DL_config,
'default_full': default_full_config,
},
SentenceDetector: {
'default': default_sentence_detector_config,
'default_full': default_full_config,
},
ContextSpellCheckerModel: {
'default': default_spell_context_config,
'default_full': default_full_config,
},
SymmetricDeleteModel: {
'default': default_spell_symmetric_config,
'default_full': default_full_config,
},
NorvigSweetingModel: {
'default': default_spell_norvig_config,
'default_full': default_full_config,
},
LemmatizerModel: {
'default': default_lemma_config,
'default_full': default_full_config,
},
Normalizer: {
'default': default_norm_config,
'default_full': default_full_config,
},
NormalizerModel: {
'default': default_norm_config,
'default_full': default_full_config,
},
DocumentNormalizer: {
'default': default_norm_document_config,
'default_full': default_full_config,
},
Stemmer: {
'default': default_stemm_config,
'default_full': default_full_config,
},
NerDLModel: {
'default': default_NER_config,
'meta': meta_NER_config,
'default_full': default_full_config,
},
NerCrfModel: {
'default': '', # TODO
'default_full': default_full_config,
},
LanguageDetectorDL: {
'default': default_lang_classifier_config,
'default_full': default_full_config,
},
DependencyParserModel: {
'default': default_dep_untyped_config,
'default_full': default_full_config,
},
TypedDependencyParserModel: {
'default': default_dep_typed_config,
'default_full': default_full_config,
},
SentimentDLModel: {
'default': default_sentiment_dl_config,
'default_full': default_full_config,
},
SentimentDetectorModel: {
'default': default_sentiment_config,
'default_full': default_full_config,
},
ViveknSentimentModel: {
'default': default_sentiment_vivk_config,
'default_full': default_full_config,
},
Chunker: {
'default': default_chunk_config,
'default_full': default_full_config,
},
NGramGenerator: {
'default': default_ngram_config,
'default_full': default_full_config,
},
ChunkEmbeddings: {
'default': default_chunk_embedding_config,
'default_full': default_full_config,
},
StopWordsCleaner: {
'default': default_stopwords_config,
'default_full': default_full_config,
},
TextMatcherModel: {
'default': '', # TODO
'default_full': default_full_config,
},
TextMatcher: {
'default': '', # TODO
'default_full': default_full_config,
},
RegexMatcherModel: {
'default': '', # TODO
'default_full': default_full_config,
},
RegexMatcher: {
'default': '', # TODO
'default_full': default_full_config,
},
DateMatcher: {
'default': '', # TODO
'default_full': default_full_config,
},
MultiDateMatcher: {
'default': '', # TODO
'default_full': default_full_config,
},
Doc2Chunk: {
'default': default_doc2chunk_config,
'default_full': default_full_config,
},
Chunk2Doc: {
'default': '', # TODO
'default_full': default_full_config,
},
T5Transformer: {
'default': default_T5_config,
'default_full': default_full_config,
},
MarianTransformer: {
'default': default_marian_config,
'default_full': default_full_config,
},
YakeKeywordExtraction: {
'default': default_yake_config,
'default_full': default_full_config,
},
WordSegmenterModel: {
'default': default_word_segmenter_config,
'default_full': default_full_config,
},
# approaches
ViveknSentimentApproach: {'default': '', 'default_full': default_full_config, },
SentimentDLApproach: {'default': default_sentiment_dl_config, 'default_full': default_full_config, },
ClassifierDLApproach: {'default': default_classifier_dl_config, 'default_full': default_full_config, },
MultiClassifierDLApproach: {'default': default_multi_classifier_dl_config, 'default_full': default_full_config, },
NerDLApproach: {'default': default_NER_config, 'default_full': default_full_config, },
PerceptronApproach: {'default': default_POS_config, 'default_full': default_full_config, },
# PretrainedPipeline : {
# 'default' : '',
# }
}
|
983,116 | 0ebb9e6b953d06ee9c47d145e1ce92763d352492 |
from environment_models.base import BaseEnv
from airobot_utils.pusher_simulator import PusherSimulator
import numpy as np
class PusherEnv(BaseEnv):
def __init__(self):
self.simulator = PusherSimulator(render=False)
def transition_function(state, action):
self.simulator.apply_action(action)
return self.simulator.get_obs()
def reward_function(state, action):
return self.simulator.compute_reward_push(state)
BaseEnv.__init__(
self,
initial_state=self.simulator.get_obs(),
transition_function=transition_function,
reward_function=reward_function,
state_space_dimension=9,
action_space_dimension=2
)
def reset(self):
self.simulator.reset()
|
983,117 | 6926c08d5a29584b447c7527eb15bc41a01c3f77 | import operator
class FreqDict:
def __init__(self):
self.d = {}
def add(self, k):
if k in self.d:
self.d[k] += 1
else:
self.d[k] = 1
def sort(self, descending = True):
return sorted(self.d.items(), key=operator.itemgetter(1), reverse=descending) |
983,118 | 38560c518656b9fea73a4353f7c8ee399ba563cd | """
{
"author": "Yucheng Huang",
"difficulty": "medium",
"link": "https://leetcode.com/problems/queue-reconstruction-by-height/description/",
"category": ["greedy"],
"tags": [],
"questions": []
}
"""
"""
思路:
- 反过来,先h降序,再按k值升序,依次插入。这样的好处是每次插入不需要检查,插入的people.k就是index。
- `list.sort(key=lambda p: (-p[0], p[1]))`
- `list.insert(index, obj)`
"""
class Solution:
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda p: (-p[0], p[1]))
lst = []
for p in people:
lst.insert(p[1],p)
return lst |
983,119 | d88d526cf83c11fdf7ba1ffbd626439799f38dbb | #!/usr/bin/env python
s = raw_input()
j = 0
i = 0
total = 0
while i < 5:
k = j
while k < len(s) and s[k] != "+":
k = k + 1
total = int(s[j:k]) + total
j = k + 1
i = i + 1
print total
|
983,120 | 16fedb6930cd4ded7037fd27666da7c748385a1a | # -*- coding:utf-8 -*-
import logging
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def get_logger(name):
return logging.getLogger(name)
|
983,121 | e9d775da2f5795c781791e5e38463bd3d9b8174d | from typing import List
from src.database.daos.example_dao import ExampleDAO
from src.model.errors.business_error import BusinessError
from src.model.example_resource import ExampleResource
class ExampleCRUDService:
@classmethod
async def add(cls, resource: ExampleResource):
# Check if resource with given id exists
if await ExampleDAO.find(resource.id):
raise BusinessError(f'Resource with ID {resource.id} already exists.', 409)
# Store new resource
await ExampleDAO.store(resource)
@classmethod
async def remove(cls, resource_id: str):
# Check if doctor with given id exists
if not await ExampleDAO.find(resource_id):
raise BusinessError(f'There is no resource with id {resource_id}.', 404)
await ExampleDAO.delete(resource_id)
@classmethod
async def update(cls, resource: ExampleResource):
# Check if the resource to be modified exists
if not await ExampleDAO.find(resource.id):
raise BusinessError(f'There is no resource with id {resource.id}.', 404)
# Modify resource
await ExampleDAO.store(resource)
@classmethod
async def retrieve(cls, resource_id: str) -> ExampleResource:
""" Returns the doctor object associated to the given ID, if existent. """
resource = await ExampleDAO.find(resource_id)
if not resource:
raise BusinessError(f'There is no resource with id {resource_id}.', 404)
return resource
@classmethod
async def retrieve_all(cls) -> List[ExampleResource]:
""" Returns all doctors stored in the database. """
return await ExampleDAO.all()
|
983,122 | 517ea607a18c9543197ca3c5eb4769e258b36520 | # -*- coding: utf-8 -*-
import argparse
import os
import subprocess
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("directory",
help="specify the directory.")
arg_parser.add_argument("-b", "--bin", required=True,
help="specify the executive file needed to run.")
return arg_parser.parse_args()
def print_args(args):
bin_file = os.path.join(os.getcwd(), args.bin)
dir_name = os.path.join(os.getcwd(), args.directory)
print(bin_file)
print(dir_name)
def batch_process_ttitrace(dir):
for f in os.listdir(dir):
source_file = os.path.join(dir, f)
target_file = os.path.join(dir, f[:f.find(".")] + ".csv")
command = "tti_trace_parser_wmp.exe --decode %s --output %s" %(source_file, target_file)
subprocess.call(command)
def batch_process_ttitrace5g(dir):
for f in os.listdir(dir):
source_file = os.path.join(dir, f)
command = "./eventDecoder %s out" %(source_file)
subprocess.call(command)
def main():
args = parse_args()
print_args(args)
if args.bin == "tti_trace_parser_wmp.exe":
batch_process_ttitrace(args.directory)
elif args.bin == "eventDecoder":
batch_process_ttitrace5g(args.directory)
else:
pass
if __name__ == "__main__":
main()
|
983,123 | e79e09b629f15481ca5fe9c1b2fdbd5dfdd3574f | # (c) 2009, 2012, 2015, 2017 Tim Sawyer, All Rights Reserved
import re
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from adjudicators.models import ContestAdjudicator
from bbr.siteutils import browser_details
from bbr.render import render_auth
from contests.models import ContestResult, CurrentChampion
from move.models import PersonMergeRequest
from bbr.notification import notification
from people.models import Person, PersonAlias, PersonRelation
from people.views import _fetch_adjudication_details
from pieces.models import TestPiece
@login_required
def merge_request(request, pSourcePersonSlug):
"""
Request move of all results from one person to another
"""
try:
lSourcePerson = Person.objects.filter(slug=pSourcePersonSlug)[0]
lDestinationPerson = Person.objects.filter(id=request.POST['moveToPerson'])[0]
lPersonMergeRequest = PersonMergeRequest()
lPersonMergeRequest.source_person = lSourcePerson
lPersonMergeRequest.destination_person = lDestinationPerson
lPersonMergeRequest.lastChangedBy = request.user
lPersonMergeRequest.owner = request.user
lPersonMergeRequest.save()
notification(None, lPersonMergeRequest, 'move', 'person_merge', 'request', request.user, browser_details(request))
except IndexError:
# someone already merged one or either side
pass
return render_auth(request, 'move/merge_request_received.html')
@login_required
def list_merge_requests(request):
"""
List all requests for Person merges
"""
if request.user.profile.superuser == False:
raise Http404
lMergeRequests = PersonMergeRequest.objects.filter()
for mergeRequest in lMergeRequests:
mergeRequest.from_adjuducations_count = ContestAdjudicator.objects.filter(person=mergeRequest.source_person).count()
mergeRequest.to_adjuducations_count = ContestAdjudicator.objects.filter(person=mergeRequest.destination_person).count()
mergeRequest.from_compositions_count = TestPiece.objects.filter(composer=mergeRequest.source_person).count()
mergeRequest.to_compositions_count = TestPiece.objects.filter(composer=mergeRequest.destination_person).count()
mergeRequest.from_arranger_count = TestPiece.objects.filter(arranger=mergeRequest.source_person).count()
mergeRequest.to_arranger_count = TestPiece.objects.filter(arranger=mergeRequest.destination_person).count()
return render_auth(request, 'move/list_person_merge_requests.html', {'MergeRequests' : lMergeRequests})
@login_required
@csrf_exempt
def reject_merge(request, pMergePersonRequestSerial):
"""
Reject a Person merge
"""
if request.user.profile.superuser == False:
raise Http404
try:
lPersonMergeRequest = PersonMergeRequest.objects.filter(id=pMergePersonRequestSerial)[0]
except IndexError:
raise Http404
# send email back to original submitter
lReason = request.POST['reason']
if lReason:
lSubmitterUser = lPersonMergeRequest.owner
lDestination = lSubmitterUser.email
else:
lDestination = 'tsawyer@brassbandresults.co.uk'
lContext = {'Reason' : lReason, }
notification(None, lPersonMergeRequest, 'move', 'person', 'reject', request.user, browser_details(request), pDestination=lDestination, pAdditionalContext=lContext)
# delete merge request
lPersonMergeRequest.delete()
return render_auth(request, 'blank.htm')
@login_required
def merge_action(request, pMergePersonRequestSerial):
"""
Perform a merge of Persons
"""
if request.user.profile.superuser == False:
raise Http404
try:
lMergeRequest = PersonMergeRequest.objects.filter(id=pMergePersonRequestSerial)[0]
except IndexError:
raise Http404
lFromPerson = lMergeRequest.source_person
lToPerson = lMergeRequest.destination_person
# move results
lResultsToMove = ContestResult.objects.filter(person_conducting=lFromPerson)
for result in lResultsToMove:
result.person_conducting = lToPerson
if not result.conductor_name:
result.conductor_name = lFromPerson.name
result.lastChangedBy = request.user
result.save()
# move compositions/arrangements
lCompositionsToMove = TestPiece.objects.filter(composer=lFromPerson)
for piece in lCompositionsToMove:
piece.composer = lToPerson
piece.lastChangedBy = request.user
piece.save()
lArrangementsToMove = TestPiece.objects.filter(arranger=lFromPerson)
for piece in lArrangementsToMove:
piece.arranger = lToPerson
piece.lastChangedBy = request.user
piece.save()
# move adjudications
lContestsToMove = ContestAdjudicator.objects.filter(person=lFromPerson)
for result in lContestsToMove:
if not result.adjudicator_name:
result.adjudicator_name = result.person.name
result.person = lToPerson
result.lastChangedBy = request.user
result.save()
lSourceRelationshipsToMove = PersonRelation.objects.filter(source_person=lFromPerson)
for relationship in lSourceRelationshipsToMove:
relationship.source_person = lToPerson
relationship.lastChangedBy = request.user
relationship.save()
lDestinationRelationshipsToMove = PersonRelation.objects.filter(relation_person=lFromPerson)
for relationship in lDestinationRelationshipsToMove:
relationship.relation_person = lToPerson
relationship.lastChangedBy = request.user
relationship.save()
lChampionsToMove = CurrentChampion.objects.filter(conductor=lFromPerson)
for champ in lChampionsToMove:
champ.conductor = lToPerson
champ.lastChangedBy = request.user
champ.save()
# Process aliases
lInitialRegex = "^\w\.\s\w+$"
if lFromPerson.name.strip() != lToPerson.name.strip():
# if it's just initial surname, don't move
lMatches = re.match(lInitialRegex, lFromPerson.name)
if lMatches == None:
# does it exist already on destination Person?
try:
lExistingAlias = PersonAlias.objects.filter(person=lToPerson, name=lFromPerson.name)[0]
except IndexError:
lNewPreviousName = PersonAlias()
lNewPreviousName.person = lToPerson
lNewPreviousName.name = lFromPerson.name
lNewPreviousName.lastChangedBy = request.user
lNewPreviousName.owner = request.user
lNewPreviousName.save()
for previous_name in PersonAlias.objects.filter(person=lFromPerson):
# if it's just initial surname, don't move
lMatches = re.match(lInitialRegex, previous_name.name)
if lMatches == None:
# does it exist already on destination Person?
try:
lExistingAlias = PersonAlias.objects.filter(person=lToPerson, name=previous_name.name)[0]
except IndexError:
lNewPreviousName = PersonAlias()
lNewPreviousName.person = lToPerson
lNewPreviousName.name = previous_name.name
lNewPreviousName.lastChangedBy = request.user
lNewPreviousName.owner = request.user
lNewPreviousName.save()
notification(None, lMergeRequest, 'move', 'person_merge', 'done', request.user, browser_details(request))
lSubmitterUser = lMergeRequest.owner
notification(None, lMergeRequest, 'move', 'person', 'merge', request.user, browser_details(request), pDestination=lSubmitterUser.email)
lFromPerson.delete()
lMergeRequest.delete()
return HttpResponseRedirect('/move/people/') |
983,124 | 71f3b52192dfca40cda1b2f1bcb345ff2a742079 | import random
from board import Board
from gameplay import valid
class RandomPlayer:
def __init__(self, color):
self.color = color
self.name = 'random'
def next_move(self, board_data, color):
board = Board(board_data)
positions = board.valid_positions(self)
if len(positions) == 0:
return "pass"
bestMove = positions[random.randint(0,len(positions) - 1)]
return bestMove
def nextMoveR(self, board, color, time):
return self.next_move(board, color)
def getGameResult(self, board_data, game_ended=False, opponent_player=None):
pass
|
983,125 | f35b99b796338827f902c8a6faec4e80b32ec20d | # coding=utf-8
# Copyright 2019 Inria, Facebook AI Research, Musixmatch spa and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch UmBERTo model.
Adapted from ./modeling_camembert.py"""
import logging
from .configuration_umberto import UmbertoConfig
from .file_utils import add_start_docstrings
from .modeling_roberta import (
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
logger = logging.getLogger(__name__)
UMBERTO_PRETRAINED_MODEL_ARCHIVE_MAP = {
"umberto-wikipedia-uncased-v1": "https://mxmdownloads.s3.amazonaws.com/umberto/umberto-wikipedia-uncased-v1-pytorch_model.bin",
"umberto-commoncrawl-cased-v1": "https://mxmdownloads.s3.amazonaws.com/umberto/umberto-commoncrawl-cased-v1-pytorch_model.bin",
}
UMBERTO_START_DOCSTRING = r"""
"""
UMBERTO_INPUTS_DOCSTRING = r"""
"""
@add_start_docstrings(
"", UMBERTO_START_DOCSTRING, UMBERTO_INPUTS_DOCSTRING,
)
class UmbertoModel(RobertaModel):
r"""
Examples::
tokenizer = UmbertoTokenizer.from_pretrained('umberto-commoncrawl-cased-v1')
model = UmbertoModel.from_pretrained('umberto-commoncrawl-cased-v1')
input_ids = torch.tensor(tokenizer.encode("Umberto Eco è stato un grande scrittore")).unsqueeze(0)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
config_class = UmbertoConfig
pretrained_model_archive_map = UMBERTO_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""umBERTo Model with a `language modeling` head on top. """, UMBERTO_START_DOCSTRING, UMBERTO_INPUTS_DOCSTRING,
)
class UmbertoForMaskedLM(RobertaForMaskedLM):
r"""
tokenizer = UmbertoTokenizer.from_pretrained('umberto-commoncrawl-cased-v1')
model = UmbertoModel.from_pretrained('umberto-commoncrawl-cased-v1')
input_ids = torch.tensor(tokenizer.encode("Umberto Eco è stato un grande scrittore")).unsqueeze(0)
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
config_class = UmbertoConfig
pretrained_model_archive_map = UMBERTO_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""umBERTo Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
UMBERTO_START_DOCSTRING,
UMBERTO_INPUTS_DOCSTRING,
)
class UmbertoForSequenceClassification(RobertaForSequenceClassification):
r"""
Examples::
tokenizer = UmbertoTokenizer.from_pretrained('umberto-commoncrawl-cased-v1')
model = UmbertoModel.from_pretrained('umberto-commoncrawl-cased-v1')
input_ids = torch.tensor(tokenizer.encode("Umberto Eco è stato un grande scrittore")).unsqueeze(0)
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
config_class = UmbertoConfig
pretrained_model_archive_map = UMBERTO_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""UmBERTo Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
UMBERTO_START_DOCSTRING,
UMBERTO_INPUTS_DOCSTRING,
)
class UmbertoForMultipleChoice(RobertaForMultipleChoice):
r"""
Examples::
tokenizer = UmbertoTokenizer.from_pretrained('umberto-commoncrawl-cased-v1')
model = UmbertoModel.from_pretrained('umberto-commoncrawl-cased-v1')
choices = ["Umberto Eco è stato un grande scrittore", "Umberto Eco è stato un grande autore"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0)
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
config_class = UmbertoConfig
pretrained_model_archive_map = UMBERTO_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""umBERTo Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
UMBERTO_START_DOCSTRING,
UMBERTO_INPUTS_DOCSTRING,
)
class UmbertoForTokenClassification(RobertaForTokenClassification):
r"""
Examples::
tokenizer = UmbertoTokenizer.from_pretrained('umberto-commoncrawl-cased-v1')
model = UmbertoModel.from_pretrained('umberto-commoncrawl-cased-v1')
input_ids = torch.tensor(tokenizer.encode("Umberto Eco è stato un grande scrittore",
add_special_tokens=True)).unsqueeze(0)
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
config_class = UmbertoConfig
pretrained_model_archive_map = UMBERTO_PRETRAINED_MODEL_ARCHIVE_MAP
|
983,126 | 2f74343327348ea06ebf0f2f4d26adf3af98575b | import time
from . import core_pb2
from . import dataflow_ext_pb2
from . import dataflow_ext_pb2_grpc
from common import logger
class DataflowExt(dataflow_ext_pb2_grpc.DataflowExtServicer):
def __init__(self):
self.sessions = set()
def DescribeDataflow(self, request, context):
sessioncontext = request.context
logger.info("Got DescribeDataflow request, session=%s",
sessioncontext.session_id)
if request.pipeline_id == "pipeline_1":
return dataflow_ext_pb2.DataflowDescription(
pipeline_id = "pipeline_1",
modules=[
dataflow_ext_pb2.DataflowDescription.Module(
id="module_1",
type="reading_data",
label="Load Data",
inputs=[],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_2",
type="one_hot_encoder",
label="ISI: One Hot Encoder",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_3",
type="inputation",
label="ISI: Inputation",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_4",
type="classification",
label="sklearn: Linear SVM",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="labels", type="numpy_array")
],
)
],
connections=[
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_1",
to_module_id="module_2",
from_output_name="data_out",
to_input_name="data_in"
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_2",
to_module_id="module_3",
from_output_name="data_out",
to_input_name="data_in"
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_3",
to_module_id="module_4",
from_output_name="data_out",
to_input_name="data_in"
),
],
)
elif request.pipeline_id == "pipeline_2":
return dataflow_ext_pb2.DataflowDescription(
pipeline_id="pipeline_2",
modules=[
dataflow_ext_pb2.DataflowDescription.Module(
id="module_1",
type="reading_data",
label="Load Data",
inputs=[],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_2",
type="inputation",
label="ISI: Inputation",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_3",
type="classification",
label="Keras: CNN",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="labels", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_4",
type="metric",
label="Accuracy",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="labels", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="value", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_5",
type="metric",
label="ROC AUC",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="labels", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="value", type="numpy_array")
],
),
],
connections=[
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_1",
to_module_id="module_2",
from_output_name="data_out",
to_input_name="data_in",
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_2",
to_module_id="module_3",
from_output_name="data_out",
to_input_name="data_in",
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_3",
to_module_id="module_4",
from_output_name="labels",
to_input_name="labels",
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_3",
to_module_id="module_5",
from_output_name="labels",
to_input_name="labels",
),
],
)
elif request.pipeline_id == "pipeline_3":
return dataflow_ext_pb2.DataflowDescription(
pipeline_id="pipeline_3",
modules=[
dataflow_ext_pb2.DataflowDescription.Module(
id="module_1",
type="reading_data",
label="Load Data",
inputs=[],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_2",
type="inputation",
label="ISI: Inputation",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_3",
type="normalization",
label="sklearn: Normalization",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="data_out", type="numpy_array")
],
),
dataflow_ext_pb2.DataflowDescription.Module(
id="module_4",
type="classification",
label="sklearn: Gradient Boosting",
inputs=[
dataflow_ext_pb2.DataflowDescription.Input(name="data_in", type="numpy_array")
],
outputs=[
dataflow_ext_pb2.DataflowDescription.Output(name="labels", type="numpy_array")
],
)
],
connections=[
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_1",
to_module_id="module_2",
from_output_name="data_out",
to_input_name="data_in",
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_2",
to_module_id="module_3",
from_output_name="data_out",
to_input_name="data_in",
),
dataflow_ext_pb2.DataflowDescription.Connection(
from_module_id="module_3",
to_module_id="module_4",
from_output_name="data_out",
to_input_name="data_in",
),
],
)
def GetDataflowResults(self, request, context):
sessioncontext = request.context
logger.info("Got GetDataflowResults request, session=%s",
sessioncontext.session_id)
if request.pipeline_id == "pipeline_1" or request.pipeline_id == "pipeline_3":
time.sleep(1)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_1",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0
)
time.sleep(1)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_2",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
)
time.sleep(1)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_3",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.RUNNING,
progress=0.5,
execution_time=5.58123,
)
time.sleep(10)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_3",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
execution_time=15.243,
)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_4",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
execution_time=6.135,
)
elif request.pipeline_id == "pipeline_2":
time.sleep(1)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_1",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
)
time.sleep(1)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_2",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
)
time.sleep(1)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_3",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.RUNNING,
progress=0.5,
execution_time=12.235,
)
time.sleep(10)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_3",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
execution_time=6.234,
)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_4",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
execution_time=5.8455,
)
yield dataflow_ext_pb2.ModuleResult(
module_id="module_5",
response_info=core_pb2.Response(
status=core_pb2.Status(code=core_pb2.OK),
),
status=dataflow_ext_pb2.ModuleResult.DONE,
progress=1.0,
execution_time=46.23441,
)
def dataflowExt(server):
dataflow_ext_pb2_grpc.add_DataflowExtServicer_to_server(
DataflowExt(), server)
|
983,127 | 07b3b203364e4167d89d393f259fd0fcb8d6004e | import gdal
import numpy as np
import numpy.ma as ma
import sys
sys.path.insert(0, 'python')
import kernels
#from geo_trans import *
from readSent import *
def r_modis(fname, slic=None):
g = gdal.Open(fname)
if g is None:
raise IOError
else:
if slic==None:
return g.ReadAsArray()
elif g.RasterCount==1:
Lx,Ly = slic
return g.ReadAsArray()[Lx,Ly]
elif g.RasterCount>1:
Lx,Ly = slic
return g.ReadAsArray()[:, Lx, Ly]
else:
raise IOError
def ScaleExtent(data, shape): # used for unifine different array,
re = int(shape[0]/(data.shape[0]))
a = np.repeat(np.repeat(data, re, axis = 1), re, axis =0)
if re*(data.shape[0]-shape[0]) != 0:
extended = np.zeros(shape)
extended[:re*(data.shape[0]),:re*(data.shape[0])] = a
extended[re*(data.shape[0]):,re*(data.shape[0]):] = a[re*(data.shape[0])-shape[0]:, re*(data.shape[0])-shape[0]]
return extended
else:
return a
#bands = [2,3,4,8,13,11,12]
def get_kk(angles):
vza ,sza,raa = angles
kk = kernels.Kernels(vza ,sza,raa,\
RossHS=False,MODISSPARSE=True,\
RecipFlag=True,normalise=1,\
doIntegrals=False,LiType='Sparse',RossType='Thick')
return kk
def qa_to_ReW(modisQAs, bands):
magic = 0.618034
modis = r_modis(modisQAs[3][0])
QA = np.array([np.right_shift(np.bitwise_and(modis, np.left_shift(15,i*4)), i*4) for i in np.arange(0,7)])[bands,]
relative_W = magic**QA
relative_W[relative_W<magic**4]=0
return relative_W
def get_rs(modisQAs, modis_filenames, angles, bands = range(7)):
kk = get_kk(angles)
k_vol = kk.Ross
k_geo = kk.Li
br = np.array([r_modis(modis_filenames[i][0]) for i in bands])
mask = (br[:,0,:,:] > 32766) | (br[:,1,:,:] > 32766) |(br[:,2,:,:] > 32766)
rw = qa_to_ReW(modisQAs, bands) # correpsonding relative weights
brdf = br[:,0,:,:] + (br[:,1,:,:].T*k_vol).T + (br[:,2,:,:].T*k_geo).T
brdf = ma.array(brdf, mask = mask)
return [brdf,rw]
def get_brdf_six(fname, angles, bands = (7,), flag=None, Linds = None):
temp1 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:BRDF_Albedo_Parameters_Band%d'
temp2 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:BRDF_Albedo_Band_Mandatory_Quality_Band%d'
kk = get_kk(angles)
k_vol = kk.Ross
k_geo = kk.Li
if Linds==None:
br = np.array([r_modis(temp1%(fname, band)) for band in bands])
qa = np.array([r_modis(temp2%(fname, band)) for band in bands])
#mask = (br[:,0,:,:] > 32766) | (br[:,1,:,:] > 32766) |(br[:,2,:,:] > 32766)
brdf = br[:,0,:,:] + (br[:,1,:,:].T*k_vol).T + (br[:,2,:,:].T*k_geo).T
#brdf = ma.array(brdf, mask = mask)
return [brdf*0.001, qa]
else:
Lx, Ly = Linds
br = np.array([r_modis(temp1%(fname, band), slic=[Lx, Ly]) for band in bands])
qa = np.array([r_modis(temp2%(fname, band), slic=[Lx, Ly]) for band in bands])
brdf = br[:,0] + (br[:,1].T*k_vol).T + (br[:,2].T*k_geo).T
if flag==None:
return [brdf*0.001, qa]
else:
mask = (qa<=flag)
#val_brdf = brdf[:,mask]
#val_ins = np.array(Linds)[:,mask]
return [brdf*0.001, mask] |
983,128 | 72a0f0bb655f64e3fa1c36ff9a9cd32c15c532dd | #!/usr/bin/env python3
import sys
from subprocess import run, CalledProcessError
try:
import click
except ImportError:
print("Unable to import required dependency: click", file=sys.stderr)
sys.exit(2)
raise AssertionError
from click import ClickException
if sys.version_info[:2] < (3, 9):
print("Unsupported python version:", '.'.join(sys.version_info[:3]), file=sys.stderr)
print("Requires at least Python 3.9")
sys.exit(2)
raise AssertionError
from dataclasses import dataclass
@dataclass
class ExampleData:
package: str
args: list[str]
EXAMPLES = {
"binary_trees": ExampleData(package='zerogc-simple', args=['12']),
"binary_trees_parallel": ExampleData(package='zerogc-simple', args=['12']),
}
def normalize_example(name):
return name.replace('-', '_')
def print_seperator():
print()
print('-' * 8)
print()
@click.command()
@click.option('--all', is_flag=True, help="Run **all** the examples")
@click.option('--example', '-e', 'examples', multiple=True, help="The name of the example to run")
@click.option('--list', '-l', 'list_examples', is_flag=True, help="List available examples")
@click.option('--release', is_flag=True, help="Compile code in release mode")
def run_examples(list_examples: bool, examples: list[str], all: bool, release: bool):
if all:
if examples:
raise ClickException("Should not specify explicit examples along with `-all`")
else:
examples = sorted(EXAMPLES.keys())
if list_examples and examples:
raise ClickException("Should not specify any examples along with '--list'")
if not examples:
# Imply '--list' if nothing else is specified
list_examples = True
if list_examples:
click.echo("Listing available examples: [Type --help for more info]")
for example in EXAMPLES.keys():
click.echo(f" {example}")
sys.exit()
# Normalize all names
examples = list(map(normalize_example, examples))
extra_cargo_args = []
if release:
extra_cargo_args += ['--release']
for example_name in examples:
if example_name not in EXAMPLES:
raise ClickException("Invalid example name: {example_name}")
for example_name in examples:
example = EXAMPLES[example_name]
print(f"Compiling example: {example_name}")
try:
run(["cargo", "build", "--example", example_name, '-p', example.package, *extra_cargo_args], check=True)
except CalledProcessError as e:
raise ClickException(f"Failed to compile {example_name}")
print_seperator()
for index, example_name in enumerate(examples):
example = EXAMPLES[example_name]
print("Running example: {example_name}")
try:
run(["cargo", "run", "--example", example_name, '-p', example.package, *extra_cargo_args, '--', *example.args], check=True)
except CalledProcessError:
raise ClickException(f"Failed to run example: {example_name}")
if index + 1 != len(examples):
print_seperator()
if __name__ == "__main__":
run_examples()
|
983,129 | 6894db611a1b26a40eefd05854e95583e032feb4 | class Pair:
def __init__(self, x, y):
self.a = x
self.b = y
def __eq__(self, p):
if p == None:
return False
elif p.a == self.a and p.b == self.b:
return True
else:
return False
|
983,130 | f25d1fbf0d4e431a69e1b48be08f49ecc26b1740 | import sys
sys.path.append('/home/hari/Desktop/Projects/Thesis_Project/SPFlow_clone/SPFlow/src/')
import unittest
import logging
import numpy as np
from spn.algorithms.SPMNHelper import split_on_decision_node
from spn.algorithms.SPMNHelper import column_slice_data_by_scope
from spn.algorithms.SPMNHelper import get_ds_context
from spn.algorithms.SPMN import SPMNParams
from spn.algorithms.SPMNRework import SPMN
logging.basicConfig(level=logging.DEBUG)
class TestSPMN(unittest.TestCase):
def setUp(self):
feature_names = ['X0', 'X1', 'x2', 'D0', 'X3', 'X4', 'X5', 'D1', 'X6', 'U']
partial_order = [['X0', 'X1', 'x2'], ['D0'], ['X3', 'X4', 'X5'], ['D1'], ['X6', 'X7', 'U']]
decision_nodes = ['D0', 'D1']
utility_node = ['U']
util_to_bin = False
self.spmn = SPMN(partial_order, decision_nodes, utility_node, feature_names, util_to_bin)
x012_data = np.arange(30).reshape(10, 3)
d0_data = np.array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]).reshape(10, 1)
x345_data = np.arange(30, 60).reshape(10, 3)
d1_data = np.array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]).reshape(10, 1)
x6u_data = np.arange(60, 80).reshape(10, 2)
self.data = np.concatenate((x012_data, d0_data, x345_data, d1_data, x6u_data), axis=1)
def test_column_slice_data_by_scope_data10x7_dataScope3456789_sliceScope4678(self):
data = self.data[:, 3:]
logging.debug(f'initial data is {data}')
data_scope = [3, 4, 5, 6, 7, 8, 9]
slice_scope = [4, 6, 7, 8]
slice_data = column_slice_data_by_scope(data, data_scope, slice_scope)
logging.debug(f'sliced data is {slice_data}')
self.assertEqual((10, 4), slice_data.shape, msg=f'sliced data shape should be (10, 4),'
f' instead it is {slice_data.shape}')
def test_split_on_decision_node_decVal01_data10x3(self):
data = self.data[:, 7:]
np.random.shuffle(data)
logging.debug(f'initial data is {data}')
clusters, dec_vals = split_on_decision_node(data)
logging.debug(f'clustered data is {clusters}')
self.assertEqual((5, 2), clusters[0].shape, msg=f'clustered data shape should be (4, 3),'
f' instead it is {clusters[0].shape}')
self.assertEqual((5, 2), clusters[1].shape, msg=f'clustered data shape should be (4, 3),'
f' instead it is {clusters[0].shape}')
self.assertListEqual([0, 1], dec_vals.tolist(), msg=f'decision values should be [0, 1]'
f' instead it is {dec_vals.tolist()}')
def test_get_ds_context(self):
data = self.data[:, 4:9]
num_of_cols = data.shape[1]
logging.debug(f'data {data}')
scope = [4, 5, 6, 7, 8]
feature_names = self.spmn.params.feature_names
util_to_bin = self.spmn.params.util_to_bin
utility_node = self.spmn.params.utility_node
params = SPMNParams(utility_node=utility_node, feature_names=feature_names, util_to_bin=util_to_bin)
ds_context = get_ds_context(data, scope, params)
meta_types = ds_context.get_meta_types_by_scope(scope)
domains = ds_context.get_domains_by_scope(scope)
logging.debug(f'meta types {meta_types}')
logging.debug(f'domains {domains}')
self.assertEqual(num_of_cols, len(meta_types))
self.assertEqual(num_of_cols, len(domains))
if __name__ == '__main__':
unittest.main()
|
983,131 | 025e72e2c06dea9237cd0c053b60a35e98e8e9af | raw_folder = "raw_input/"
pre_folder = "preprocessed_input/"
post_folder = "processed_input/"
huge_folder = "huge/"
huge_file = "huge.txt"
flow_st = "Date flow start"
nf_r = "nfdump -r "
old_header = ["Date flow start", "Duration", "Proto", "Src IP Addr:Port",
"Dst IP Addr:Port", "Packets", "Bytes", "Flows"]
no_space_header = ["Date_flow_start", "Duration", "Proto", "Src_IP_Addr:Port",
"Dst_IP_Addr:Port", "Packets", "Bytes", "Flows"]
new_header = ["Date_flow_start", "Hour_flow_start", "Duration", "Proto", "Src_IP_Addr:Port",
"Dst_IP_Addr:Port", "Packets", "Bytes", "Flows"] |
983,132 | 0756bfb114df1ba5aa4ea395060b878a0823e5cf | from django.apps import AppConfig
class CookiecuttersConfig(AppConfig):
name = 'cookiecutters'
|
983,133 | 955ca19a2008489b5a6f2af8d3417496cdce74ef | __author__ = 'trunghieu11'
def main():
n, k = map(int, raw_input().split())
if n / 2 >= k:
print min(k + 1, n)
else:
print max(1, k - 1)
if __name__ == '__main__':
main() |
983,134 | a1771a655859fa730f3c741bbbdbf0b30a74808d | import numpy as np
from collections import defaultdict
from itertools import permutations
from copy import deepcopy
import random, cvxpy as cvx
############# utils ##################
def agg_preferences(prefs_list):
"""
Aggregate a list of pairwise preferences
Take a list of pairwise preferences and aggregate them into a dictionary.
This is the data structure that the other functions in this package use.
Note
----
The item IDs passed to this function don't strictly need to be integers starting at zero,
but algorithms that don't take an initial ordering will require that they be when aggregated.
IDs need to be comparable to each other (e.g. have a method to establish ID1<ID2).
Parameters
----------
prefs_list : list of tuples
List containing preferences between to items as tuples (ItemId1, ItemId2),
With ItemId1 being preferred to ItemId2.
Returns
-------
defaultdict
Dictionary with keys as (ItemId1, ItemId2) with ItemId1<ItemId2,
and values counting the number of times ItemId1 was preferred over ItemId2,
minus the number of times ItemId2 was preffered over ItemId1.
"""
aggregated_preferences=defaultdict(lambda: 0)
for els in prefs_list:
if els[0]<els[1]:
aggregated_preferences[(els[0],els[1])]+=1
else:
aggregated_preferences[(els[1],els[0])]-=1
return aggregated_preferences
def eval_ordering(ordering, prefs):
"""
Score an ordering
Evaluate the number of satisfied preferences minus the number of violated preferences
under the rank implied by an ordering.
Parameters
----------
ordering : list
Ordering to be evaluated, with the items in ranked order.
prefs : defaultdict
Aggregated preferences (see function 'agg_preferences')
Returns
-------
int
Number of satisfied preferences minus violated preferences of the ordering
"""
score=0
cnt=len(ordering)
for i in range(cnt-1):
for j in range(i+1,cnt):
e1,e2=ordering[i],ordering[j]
if e1<e2:
if (e1,e2) in prefs:
score+=prefs[(e1,e2)]
else:
if (e2,e1) in prefs:
score-=prefs[(e2,e1)]
return score
############# helpers ##################
#Generate all the possible pairs, will be used with minconflict
def _get_pairs_indices(n):
for i in range(n-1):
for j in range(i+1,n):
yield i,j
#Give a relative order between 2 items and see how many constrains will it satisy minus how many will it violate
def _score_pref(e1_name,e2_name,prefs_dict):
global e1_name1,e2_name1,prefs_dict1
e1_name1=e1_name
e2_name1=e2_name
prefs_dict1=prefs_dict
if e1_name<e2_name:
if (e1_name,e2_name) in prefs_dict:
return prefs_dict[e1_name,e2_name]
return np.random.rand()-0.5
else:
if (e2_name,e1_name) in prefs_dict:
return -prefs_dict[e2_name,e1_name]
return np.random.rand()-0.5
#Swap a pair of items in a list
def _swap(lst,pair_tuple):
lst[pair_tuple[0]],lst[pair_tuple[1]]=lst[pair_tuple[1]],lst[pair_tuple[0]]
#Examine the net effect of having a pair as it is vs. the other way around
def _pair_net_effect(lst,pair_ind,prefs_dict):
lst2=deepcopy(lst)
e1_ind,e2_ind=pair_ind
if e1_ind>e2_ind:
e1_ind,e2_ind=e2_ind,e1_ind
lst2[e1_ind],lst2[e2_ind]=lst2[e2_ind],lst2[e1_ind]
score=0
rev_score=0
for p1 in range(e1_ind):
score+=_score_pref(lst[p1],lst[e1_ind],prefs_dict)
rev_score+=_score_pref(lst2[p1],lst2[e1_ind],prefs_dict)
score+=_score_pref(lst[p1],lst[e2_ind],prefs_dict)
rev_score+=_score_pref(lst2[p1],lst2[e2_ind],prefs_dict)
for p2 in range(e1_ind+1,e2_ind):
score+=_score_pref(lst[e1_ind],lst[p2],prefs_dict)
rev_score+=_score_pref(lst2[e1_ind],lst2[p2],prefs_dict)
score+=_score_pref(lst[p2],lst[e2_ind],prefs_dict)
rev_score+=_score_pref(lst2[p2],lst2[e2_ind],prefs_dict)
for p3 in range(e2_ind+1,len(lst)):
score+=_score_pref(lst[e1_ind],lst[p3],prefs_dict)
rev_score+=_score_pref(lst2[e1_ind],lst2[p3],prefs_dict)
score+=_score_pref(lst[e2_ind],lst[p3],prefs_dict)
rev_score+=_score_pref(lst2[e2_ind],lst2[p3],prefs_dict)
score+=_score_pref(lst[e1_ind],lst[e2_ind],prefs_dict)
rev_score+=_score_pref(lst2[e1_ind],lst2[e2_ind],prefs_dict)
return (score,rev_score)
############# algorithms ##################
def greedy_order(dict_prefs, list_els):
"""
Greedy-order
Sort the items according to how many times each item is preferred over any other items.
Note
----
This is implemented here to serve as a handy comparison point, but this heuristic is very simple
and you can make a much faster implementation with a different data structure than the dict of preferences.
The time complexity of this implementation is O(items^2).
Parameters
----------
list_els : list
Items to be ordered
(e.g. list_els = [i for i in range(nitems)],
assuming they are enumerated by integers starting at zero)
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences')
Returns
-------
list
Ordered list according to this heuristic
"""
ordering=list()
els=deepcopy(list_els)
while els!=[]:
best_score=float("-infinity")
for e1 in els:
score_el=0
for e2 in els:
if e1==e2:
continue
score_el+=_score_pref(e1,e2,dict_prefs)
if score_el>best_score:
best_score=score_el
best_el=e1
ordering.append(best_el)
els.remove(best_el)
return ordering
def _kwiksort(list_els, dict_prefs):
if list_els==[]:
return []
pivot=np.random.choice(list_els)
left=[]
right=[]
for el in list_els:
if el==pivot:
continue
else:
if _score_pref(el,pivot,dict_prefs)<0:
right.append(el)
else:
left.append(el)
left=_kwiksort(left,dict_prefs)
right=_kwiksort(right,dict_prefs)
return left+[pivot]+right
def kwiksort(dict_prefs, list_els, runs=10, random_seed=None):
"""
Kwik-Sort algorithm
Sort the items with a similar logic as Quick-Sort.
As there is randomization in the choice of pivots,
the algorithm is run for multiple times and the best result is returned.
Time complexity is O(runs * items * log(items)).
Parameters
----------
list_els : list
Items to be ordered
(e.g. list_els = [i for i in range(nitems)],
assuming they are enumerated by integers starting at zero)
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences')
runs : int
Number of times to run the algorithm
random_seed : int
Initial random seed to use
Returns
-------
list
Ordered list according to this heuristic
"""
best_score=float("-infinity")
if random_seed is not None:
np.random.seed(random_seed)
for run in range(runs):
ordering=_kwiksort(list_els,dict_prefs)
score=eval_ordering(ordering,dict_prefs)
if score>best_score:
best_score=score
best_order=ordering
return best_order
def pagerank(dict_prefs, nitems, eps_search=20):
"""
PageRank applied to pairwise preferences
The PageRank algorithm is applied by constructing a transition matrix from preferences,
in such a way that items that are preferred over a given item have 'links' to them.
Then some small regularization is applied (small probability of any other item being preferred over each item),
and the regularization that provides the best ordering is returned.
Note
----
This is a naive implementation with dense matrices, initially filled by iterating over the preferences dict,
and it's not meant for web-scale applications.
If the number of items is very large (e.g. >= 10^4), you'd be better off implementing this
with more suitable data structures.
Parameters
----------
nitems : int
Number of items to be ordered
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences').
Elements must be enumarated as integers starting at zero
eps_search : int
Length of search grid for epsilon parameter in (0, 0.5]
Returns
-------
list
Ordered list according to this heuristic
"""
prefs_mat=np.zeros((nitems,nitems))
for k,v in dict_prefs.items():
if v==0:
continue
elif v>0:
prefs_mat[k[1],k[0]]+=v
else:
prefs_mat[k[0],k[1]]-=v
prefs_mat_orig=prefs_mat.copy()
eps_grid=list(.5**np.logspace(0,1,eps_search))
best=-10^5
best_order=None
for eps in eps_grid:
prefs_mat=prefs_mat_orig.copy()
for i in range(nitems):
prefs_mat[:,i]+=eps
tot=np.sum(prefs_mat[:,i])
prefs_mat[:,i]=prefs_mat[:,i]/tot
pr=np.ones((nitems,1))/nitems
for i in range(30):
pr=prefs_mat.dot(pr)
lst_pagerank=list(np.argsort(pr.reshape(-1)))
score_this_order=eval_ordering(lst_pagerank,dict_prefs)
if score_this_order>best:
best=score_this_order
best_order=deepcopy(lst_pagerank)
return best_order
def cvx_relaxation(dict_prefs, nitems):
"""
Linear relaxation of the optimization problem
Models the problem as assigning a score to each item, with a hinge loss with arbitrary margin such that,
for each preference between two items:
Loss(ItemPref,ItemNonPref)=pos(#prefs * (Score_ItemPref - Score_ItemNonPref) + 1)
where pos(.) is the positive part of a number (x*(x>0))
Note
----
The problem is modeled using cvxpy and solved with its default SDP solver in your computer
(This is most likely to be ECOS or SC)
Parameters
----------
nitems : int
Number of items to be ordered
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences').
Elements must be enumarated as integers starting at zero
Returns
-------
list
Ordered list according to this heuristic
"""
r=cvx.Variable(nitems)
obj=0
for k,v in dict_prefs.items():
if v>0:
obj+=cvx.pos(v*r[k[0]]-v*r[k[1]]+1)
else:
obj+=cvx.pos(-v*r[k[1]]+v*r[k[0]]+1)
prob=cvx.Problem(cvx.Minimize(obj))
prob.solve()
return list(np.argsort(np.array(r.value).reshape(-1)))
def minconflict(dict_prefs, initial_guess):
"""
Local search with min-conflict metaheuristic
At each iteration, swaps the pair of items that bring the highest score improvement
(number of satisfied preferences minus violated preferences).
Stops when no further improvement is possible.
Time complexity is O(iterations * items^3), thus not suitable for large problems.
Parameters
----------
initial_guess : list
Initial ordering of the items.
If you didn't have any ranking criteria or prior ordering beforehand,
you can get a starting ordering by just shuffling the list of items to be ordered,
or even use the order induced by some other algorithm from here (e.g. greedy-order)
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences').
Elements must be enumarated as integers starting at zero
Returns
-------
list
Ordered list according to this heuristic
"""
ordering=deepcopy(initial_guess)
while True:
best=0
best_swap=None
pairs_indices=_get_pairs_indices(len(ordering))
for pair_ind in pairs_indices:
score_as_is,score_rev=_pair_net_effect(ordering,pair_ind,dict_prefs)
improvement=(score_rev-score_as_is)
if improvement>best:
best=improvement
best_swap=pair_ind
if best_swap is None:
break
else:
_swap(ordering,best_swap)
return ordering
def random_swaps(dict_prefs, initial_guess, iterations=50000, repetitions=3, random_seed=None):
"""
Local search by random swaps
At each iteration, takes two items and swaps them if that improves the ordering.
Time complexity is O(repetitions * iterations * items + repetitions * items^2)
Parameters
----------
initial_guess : list
Initial ordering of the items.
If you didn't have any ranking criteria or prior ordering beforehand,
you can get a starting ordering by just shuffling the list of items to be ordered,
or even use the order induced by some other algorithm from here (e.g. greedy-order)
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences').
Elements must be enumarated as integers starting at zero
iterations : int
Number of iterations under each run.
This is the total number of trials regardless of whether the random pair ends up being swapped
repetitions : int
Number of times to repeat the procedure.
Orderings are evaluated after the number of iterations is over, and the best one is returned
random_seed : int
Initial random seed to use
Returns
-------
list
Ordered list according to this heuristic
"""
best=0
if random_seed is not None:
random.seed(random_seed)
np.random.seed(random_seed)
for rep in range(repetitions):
ordering=deepcopy(initial_guess)
for it in range(iterations):
candidates_ind=random.sample(range(len(ordering)), 2)
score_as_is,score_rev=_pair_net_effect(ordering,candidates_ind,dict_prefs)
if score_rev>score_as_is:
_swap(ordering,candidates_ind)
score=eval_ordering(ordering,dict_prefs)
if score>best:
best=score
best_ordering=deepcopy(ordering)
return best_ordering
def metropolis_hastings(dict_prefs, initial_guess, iterations=50000, explore_fact=2, random_seed=None):
"""
Local search with Metropolis-Hasting metaheuristic
At each iterations, choose a pair of items at random.
If swapping them improves the ordering, it swaps them.
If not, it swaps them with a probability inversely proportional to the score decrease of swapping the items.
Time complexity is O(iterations * items)
Parameters
----------
initial_guess : list
Initial ordering of the items.
If you didn't have any ranking criteria or prior ordering beforehand,
you can get a starting ordering by just shuffling the list of items to be ordered,
or even use the order induced by some other algorithm from here (e.g. greedy-order)
dict_prefs : defaultdict
Aggregated preferences (see function 'agg_preferences').
Elements must be enumarated as integers starting at zero
iterations : int
Number of iterations under each run.
This is the total number of trials regardless of whether the random pair ends up being swapped
explore_fact : float
Parameter for acceptance probability.
Pairs that don't improve the ordering are swapped with probability:
p(swap) = explore_fact^score_decrease
random_seed : int
Initial random seed to use
Returns
-------
list
Ordered list according to this heuristic
"""
ordering=deepcopy(initial_guess)
best=0
current_score=0
if random_seed is not None:
random.seed(random_seed)
np.random.seed(random_seed)
for it in range(iterations):
candidates_ind=random.sample(range(len(ordering)), 2)
score_as_is,score_rev=_pair_net_effect(ordering,candidates_ind,dict_prefs)
if score_rev>score_as_is:
_swap(ordering,candidates_ind)
current_score+=(score_rev-score_as_is)
if current_score>best:
best=current_score
best_ordering=deepcopy(ordering)
else:
criterion=(explore_fact)**(score_rev-score_as_is)
if np.random.random()<=criterion:
_swap(ordering,candidates_ind)
current_score+=(score_rev-score_as_is)
return best_ordering
|
983,135 | aec57a0b68279040439792b8cd6872b90052b11c | from django.db import models
class Argument(models.Model):
"""
A type of Reaction on a message that occurs
when a specific emoji is applied.
Actions are defined per ChatType.
"""
name = models.SlugField(
max_length=255,
help_text="The name of the argument that will be attached to a \
message.")
character = models.CharField(max_length=100)
chat_type = models.ForeignKey(
'ChatType', on_delete=models.CASCADE)
def __str__(self):
return ':{0}: = {1} for chat type {2}'.format(
self.character,
self.name,
self.chat_type
)
class Meta:
unique_together = ('character', 'chat_type')
|
983,136 | 77d30bdd4b126e64c0beb82009441842d689e810 | import re
import pytest
from .._util import *
def test_ProtocolError():
try:
raise ProtocolError("foo")
except ProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 400
try:
raise ProtocolError("foo", error_status_hint=418)
except ProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 418
def test_validate():
my_re = re.compile(br"(?P<group1>[0-9]+)\.(?P<group2>[0-9]+)")
with pytest.raises(ProtocolError):
validate(my_re, b"0.")
groups = validate(my_re, b"0.1")
assert groups == {"group1": b"0", "group2": b"1"}
def test_Sentinel():
S = Sentinel("S")
assert repr(S) == "S"
assert S == S
assert S in {S}
def test_bytesify():
assert bytesify(b"123") == b"123"
assert bytesify(bytearray(b"123")) == b"123"
assert bytesify("123") == b"123"
with pytest.raises(UnicodeEncodeError):
bytesify(u"\u1234")
with pytest.raises(TypeError):
bytesify(10)
|
983,137 | 076035d8aad96efbe2f8edf4a516ea6b80128280 | """empty message
Revision ID: 4d7e882e5f43
Revises: None
Create Date: 2015-01-09 10:18:03.465142
"""
# revision identifiers, used by Alembic.
revision = '4d7e882e5f43'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('notification',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=160), nullable=True),
sa.Column('subject', sa.String(length=100), nullable=True),
sa.Column('created_ts', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('device',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('relay',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=40), nullable=True),
sa.Column('switch_on_text', sa.String(length=16), nullable=True),
sa.Column('switch_off_text', sa.String(length=16), nullable=True),
sa.Column('arduino_pin', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('contact',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=40), nullable=True),
sa.Column('phone', sa.String(length=40), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('enable_sms_warnings', sa.Boolean(), nullable=True),
sa.Column('enable_email_warnings', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('sensor_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unit', sa.Unicode(length=5), nullable=True),
sa.Column('description', sa.String(length=120), nullable=True),
sa.Column('name', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('relay_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('relay_id', sa.Integer(), nullable=True),
sa.Column('created_ts', sa.DateTime(), nullable=True),
sa.Column('from_state', sa.Integer(), nullable=True),
sa.Column('to_state', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['relay_id'], ['relay.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('sensor',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sensor_code', sa.String(length=5), nullable=True),
sa.Column('description', sa.String(length=40), nullable=True),
sa.Column('save_to_rrd_db', sa.Boolean(), nullable=True),
sa.Column('rrd_db_path', sa.String(), nullable=True),
sa.Column('emit_every', sa.Integer(), nullable=True),
sa.Column('max_possible_value', sa.Float(precision=2, decimal_return_scale=2), nullable=True),
sa.Column('max_warning_value', sa.Float(precision=2, decimal_return_scale=2), nullable=True),
sa.Column('min_possible_value', sa.Float(precision=2, decimal_return_scale=2), nullable=True),
sa.Column('min_warning_value', sa.Float(precision=2, decimal_return_scale=2), nullable=True),
sa.Column('observable_measurements', sa.Integer(), nullable=True),
sa.Column('observable_alarming_measurements', sa.Integer(), nullable=True),
sa.Column('warning_wait_minutes', sa.Integer(), nullable=True),
sa.Column('enable_warnings', sa.Boolean(), nullable=True),
sa.Column('type_id', sa.Integer(), nullable=True),
sa.Column('device_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['device_id'], ['device.id'], ),
sa.ForeignKeyConstraint(['type_id'], ['sensor_type.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('sensor')
op.drop_table('relay_log')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('sensor_type')
op.drop_table('contact')
op.drop_table('relay')
op.drop_table('device')
op.drop_table('notification')
### end Alembic commands ###
|
983,138 | 6f5b56cbcb476fe5ec1e9c3ef9314b9bfae8ff64 | import utils.mySqlConn as mySqlConn
import pandas as pd
import matplotlib.pyplot as plt
query = '''select O.NM_ORGAO_SUPERIOR AS 'Orgão Superior',
P.NM_PROGRAMA_ORCAMENTARIO AS 'Programa Orçamentário',
sum(F.VLR_LIQUIDADO) AS 'Valor Liquidado'
from TBL_FATO F,
TBL_DIMENSAO_ORGAO O,
TBL_DIMENSAO_PROGRAMA P
where O.PK_ORGAO = F.FK_ORGAO
AND P.PK_PROGRAMA = F.FK_PROGRAMA
And VLR_LIQUIDADO > 0
group by O.NM_ORGAO_SUPERIOR, P.NM_PROGRAMA_ORCAMENTARIO
order by sum(F.VLR_LIQUIDADO) DESC'''
dataFrame = pd.read_sql(query, con=mySqlConn.getConnection())
dataFrame.loc[(dataFrame['Valor Liquidado'] <= 700000000000.00), 'Orgão Superior'] = 'Outros'
dataFrame.loc[(dataFrame['Valor Liquidado'] <= 700000000000.00), 'Programa Orçamentário'] = 'Outros'
dataFrame = dataFrame.groupby(['Orgão Superior', 'Programa Orçamentário']).agg({'Valor Liquidado' : sum})
dataFrame.plot(kind='pie', y='Valor Liquidado',
shadow=False, label='', legend=True, autopct='%1.1f%%', startangle=149,
title='Execução da despesa por Órgão Superior e Programa Orçamentário', figsize=(15, 10))
print(dataFrame)
plt.show()
|
983,139 | 47edff34942ab98ae223fc9b6d9d403095ddc2fd | import time
import opentracing
from nose.tools import assert_equals
class TestOTSpan:
def setUp(self):
""" Clear all spans before a test run """
recorder = opentracing.tracer.recorder
recorder.clear_spans()
def tearDown(self):
""" Do nothing for now """
return None
def test_span_interface(self):
span = opentracing.tracer.start_span("blah")
assert hasattr(span, "finish")
assert hasattr(span, "set_tag")
assert hasattr(span, "tags")
assert hasattr(span, "operation_name")
assert hasattr(span, "set_baggage_item")
assert hasattr(span, "get_baggage_item")
assert hasattr(span, "context")
assert hasattr(span, "log")
def test_span_ids(self):
count = 0
while count <= 1000:
count += 1
span = opentracing.tracer.start_span("test_span_ids")
context = span.context
assert 0 <= int(context.span_id, 16) <= 18446744073709551615
assert 0 <= int(context.trace_id, 16) <= 18446744073709551615
def test_span_fields(self):
span = opentracing.tracer.start_span("mycustom")
assert_equals("mycustom", span.operation_name)
assert span.context
span.set_tag("tagone", "string")
span.set_tag("tagtwo", 150)
assert_equals("string", span.tags['tagone'])
assert_equals(150, span.tags['tagtwo'])
def test_span_queueing(self):
recorder = opentracing.tracer.recorder
count = 1
while count <= 20:
count += 1
span = opentracing.tracer.start_span("queuethisplz")
span.set_tag("tagone", "string")
span.set_tag("tagtwo", 150)
span.finish()
assert_equals(20, recorder.queue_size())
def test_sdk_spans(self):
recorder = opentracing.tracer.recorder
span = opentracing.tracer.start_span("custom_sdk_span")
span.set_tag("tagone", "string")
span.set_tag("tagtwo", 150)
span.set_tag('span.kind', "entry")
time.sleep(0.5)
span.finish()
spans = recorder.queued_spans()
assert 1, len(spans)
sdk_span = spans[0]
assert_equals('sdk', sdk_span.n)
assert_equals(None, sdk_span.p)
assert_equals(sdk_span.s, sdk_span.t)
assert sdk_span.ts
assert sdk_span.ts > 0
assert sdk_span.d
assert sdk_span.d > 0
assert_equals("py", sdk_span.ta)
assert sdk_span.data
assert sdk_span.data.sdk
assert_equals('entry', sdk_span.data.sdk.Type)
assert_equals('custom_sdk_span', sdk_span.data.sdk.name)
assert sdk_span.data.sdk.custom
assert sdk_span.data.sdk.custom.tags
def test_span_kind(self):
recorder = opentracing.tracer.recorder
span = opentracing.tracer.start_span("custom_sdk_span")
span.set_tag('span.kind', "consumer")
span.finish()
span = opentracing.tracer.start_span("custom_sdk_span")
span.set_tag('span.kind', "server")
span.finish()
span = opentracing.tracer.start_span("custom_sdk_span")
span.set_tag('span.kind', "producer")
span.finish()
span = opentracing.tracer.start_span("custom_sdk_span")
span.set_tag('span.kind', "client")
span.finish()
span = opentracing.tracer.start_span("custom_sdk_span")
span.set_tag('span.kind', "blah")
span.finish()
spans = recorder.queued_spans()
assert 5, len(spans)
span = spans[0]
assert_equals('entry', span.data.sdk.Type)
span = spans[1]
assert_equals('entry', span.data.sdk.Type)
span = spans[2]
assert_equals('exit', span.data.sdk.Type)
span = spans[3]
assert_equals('exit', span.data.sdk.Type)
span = spans[4]
assert_equals('intermediate', span.data.sdk.Type)
span = spans[0]
assert_equals(1, span.k)
span = spans[1]
assert_equals(1, span.k)
span = spans[2]
assert_equals(2, span.k)
span = spans[3]
assert_equals(2, span.k)
span = spans[4]
assert_equals(3, span.k)
|
983,140 | 47d73fad0d79a72999e0b8238e4f49791d3af431 | #!/usr/bin/env
import rospy
import baxter_interface
rospy.init_node("nuevo")
gripper_izq = baxter_interface.Gripper('left')
#gripper_izq.calibrate()
print gripper_izq.parameters()
print gripper_izq.gripping()
print gripper_izq.force() |
983,141 | cb4a0050b512cd18bcf4fefe54963337d4e6054b | from integration_tests.api_test_case import ApiTestCase
from recipe.models import Recipes, Comments
class TagsTest(ApiTestCase):
def create_recipe(self):
url = '/0/recipes'
data = {
'commensals': 1,
'private': 1,
'draft': 1,
'name': 'Recipe',
}
headers = self.login()
resp = self.client.post(url, data=data, **headers)
ret = resp.data['recipe']
recipe = Recipes.objects.get(pk=ret['id'])
comment = Comments.objects.create(recipe=recipe, chef=self.user, comment='The Comment')
return ret
def test_get_comments(self):
"""
Test get comments
"""
recipe = self.create_recipe()
url = '/0/recipes'
url += '/%i/comments' % recipe['id']
resp = self.client.get(url)
self.assertPermissionDenied(resp)
headers = self.login()
resp = self.client.get(url, **headers)
self.assertEqual(resp.status_code, 200)
self.assertIn('comments', resp.data)
comments = resp.data['comments']
self.assertEqual(len(comments), 1)
self.assertIn('id', comments[0])
self.assertIn('comment', comments[0])
self.assertIn('chef', comments[0])
self.assertIn('id', comments[0]['chef'])
self.assertIn('name', comments[0]['chef'])
def test_add_comment(self):
"""
Test add comment
"""
recipe = self.create_recipe()
url = '/0/recipes'
url += '/%i/comments' % recipe['id']
data = {'comment': 'the comment'}
resp = self.client.post(url, data=data)
self.assertPermissionDenied(resp)
headers = self.login()
resp = self.client.post(url, data=data, **headers)
self.assertEqual(resp.status_code, 200)
self.assertIn('comment', resp.data)
comment = resp.data['comment']
self.assertIn('id', comment)
self.assertIn('comment', comment)
|
983,142 | 2a432ebfb143695ca92f089e49e1cda849775e9c | # Generated by Django 2.2.3 on 2019-08-01 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openbook_common', '0014_auto_20190709_1919'),
]
operations = [
migrations.AddField(
model_name='badge',
name='keyword_description_de',
field=models.CharField(blank=True, max_length=64, null=True, unique=True, verbose_name='keyword_description'),
),
migrations.AddField(
model_name='emoji',
name='keyword_de',
field=models.CharField(max_length=16, null=True, verbose_name='keyword'),
),
migrations.AddField(
model_name='emojigroup',
name='keyword_de',
field=models.CharField(max_length=32, null=True, verbose_name='keyword'),
),
migrations.AddField(
model_name='language',
name='name_de',
field=models.CharField(max_length=64, null=True, verbose_name='name'),
),
]
|
983,143 | c7a221071282f994181580552fc86026476d7d30 |
factorial_6 = 1 * 2 * 3 * 4 * 5 * 6
#print(factorial_6)
factorial_7 = 1 * 2 * 3 * 4 * 5 * 6 * 7
# Напишите функцию факториала
def fact(a):
count = 1
for i in range(1, a+1):
count *= i
return count
print( fact(6) )
# Рекурсия
def fact(a):
if a == 0:
return 1
return fact(a-1)
fact(6)
# Есть предел рекурсии - где-то около ~1000
|
983,144 | 2558fdde4ff6ff7fd607806f1a4ff2f4efe5370b | import os
import subprocess
import time
if __name__ == '__main__':
os.environ["TF_VAR_proxmox_url"] = f"https://{os.environ.get('hostname')}/api2/json"
os.environ["TF_VAR_proxmox_username"] = os.environ.get("username")
os.environ["TF_VAR_proxmox_password"] = os.environ.get("password")
print("Provisioning vms")
subprocess.Popen("terraform apply -auto-approve -var-file=config.tfvar".split(' '), env=os.environ).wait()
# Wait for deployment to stabilize
print("Waiting for deployment to stabilize")
time.sleep(60)
print("Wait over")
"""
Create cluster
"""
subprocess.Popen("ansible-playbook -u root -i inventory.ini install.yml".split(' '), env=os.environ).wait()
subprocess.Popen("bash create_cluster.sh".split(' '), env=os.environ).wait()
|
983,145 | d135a360f45ee7c282d1f8b8516396cd7e9fbe17 | from proveniencia import NotaProveniencia
orgao = NotaProveniencia()
orgao.getProveniencia() |
983,146 | cbfe6eddee66184f1b8f2383db7d44fd8f0e0aea | rule targets:
input:
expand("reads/{read}.fastq", read=config["reads"]),
expand("reads/{sample}_index.count.txt", sample=config["samples"]),
expand("reads/{sample}.demultiplexed/{iname}.read1.fastq", sample=config["samples"], iname=config["index_names"]),
expand("reads/{sample}.demultiplexed/{iname}.read2.fastq", sample=config["samples"], iname=config["index_names"]),
expand("reads/QC/{read}_fastqc.html", read=config["reads"])
rule unzip:
input:
"reads/{read}.fastq.gz"
output:
"reads/{read}.fastq"
shell:
"gunzip -c {input} > {output}"
rule index_count:
"""Count occurence of each sequenced index.
If input is fastq file of original reads use
sed -n '1~4p' | cut -d: -f10 | ...
to extract the index sequences from the sequence IDs.
"""
input:
"reads/{sample}_index.fastq"
output:
"reads/{sample}_index.count.txt"
shell:
" sed -n '2~4p' {input}"
" | sort | uniq -c | sort -nr" # get unique indices sorted by count
" > {output}"
rule demultiplex:
input:
read1 = "reads/{sample}.R1.fastq",
read2 = "reads/{sample}.R2.fastq",
iread = "reads/{sample}_index.fastq",
isheet = "reads/index_sheet.csv"
output:
expand("reads/{{sample}}.demultiplexed/{iname}.read1.fastq", iname=config["index_names"]),
expand("reads/{{sample}}.demultiplexed/{iname}.read2.fastq", iname=config["index_names"])
params:
outdir = "{sample}.demultiplexed/"
shell:
# "rm -r {params.outdir};" # needed because Bayexer complains if dir exists BUG
"Bayexer -i {input.read1} {input.read2} -j {input.iread} -o {params.outdir} -x {input.isheet}"
rule fastqc:
input:
"reads/{read}.fastq"
output:
"reads/QC/{read}_fastqc.html"
shell:
"fastqc -o ./reads/QC/ {input}"
|
983,147 | 327e12ff0d2e20ba075efb31bd1133d4020ea81d | from PIL import Image
def join(paths, flag='horizontal'):
imgs = []
for path in paths:
img = Image.open(path)
imgs += [img]
if flag == 'horizontal':
width = sum([img.size[0] for img in imgs])
height = img.size[1]
joint = Image.new('RGB', (width, height))
x_aixs = 0
for i,img in enumerate(imgs):
loc = (x_aixs, 0)
joint.paste(img, loc)
x_aixs += img.size[0]
joint.save('horizontal.png')
paths = ['train.jpg', 'valid.jpg', 'test.jpg']
join(paths)
|
983,148 | 0c9c3a0f728f17501110d37336eaa9d4219d20d8 | from src.articles.domain.entities import credential, category, article, tag, \
encryptor
from src.articles.domain.values import password
def tag_factory():
return [
tag.Tag('verbos'),
tag.Tag('vocabulario'),
tag.Tag('subtantivo'),
tag.Tag('gramatica')
]
def test_orm_can_save_article(session):
_article = article.Article(
title='An article',
description='A great description',
content='This is a useful article',
tags=tag_factory(),
category=category.Category.GUIDE
)
session.add(_article)
session.commit()
assert session.query(article.Article).first().title == 'An article'
def test_orm_deletes_a_tag_used_in_article(session):
_article = article.Article(
title='An article',
description='A great description',
content='This is a useful article',
tags=tag_factory(),
category=category.Category.GUIDE
)
session.add(_article)
session.commit()
_tag = session.query(tag.Tag).first()
session.delete(_tag)
session.commit()
assert len(session.query(article.Article).first().tags) == 3
def test_orm_saves_date_of_article(session):
_article = article.Article(
title='An article',
description='A great description',
content='This is a useful article',
tags=tag_factory(),
category=category.Category.GUIDE
)
session.add(_article)
session.commit()
assert session.query(article.Article).first().created_on
def test_can_save_credential(session):
_credential = credential.Credential(
username='tserewara',
)
_credential.set_password('Password1')
session.add(_credential)
session.commit()
assert session.query(credential.Credential).first() == _credential
def test_can_verify_credential_retrieved(session):
_credential = credential.Credential(
username='tserewara',
)
_credential.set_password('Password1')
session.add(_credential)
session.commit()
retrieved = session.query(credential.Credential).first()
assert retrieved.verify_password('Password1')
def test_returns_true_when_object_is_the_same_after_retrieving(session):
_credential = credential.Credential(
username='tserewara',
)
_credential.set_password('Password1')
session.add(_credential)
session.commit()
retrieved = session.query(credential.Credential).first()
assert retrieved == _credential
assert isinstance(retrieved._password, password.Password)
|
983,149 | f8ad5332a41bde0b44f5b24167d76a0893ed0eb3 | #!/usr/bin/env python3
import argparse
import urllib.parse
import sys
parser = argparse.ArgumentParser(description="Encode a URL")
parser.add_argument("url", type=str, nargs=1, help="url to encode")
args = parser.parse_args()
print (urllib.parse.quote_plus(str(args.url[0])))
|
983,150 | 364fc9b65ec17f4f200cd4be0246f95410c3581c | import binder.configurable
import django.dispatch
from django.db import models
# http://djangosnippets.org/snippets/1054/
class DocumentType(models.Model):
class Meta:
ordering = ('name',)
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return self.name
class Document(models.Model):
class Meta:
ordering = ('title',)
permissions = (
('view_document', "Can view documents using read-only form"),
)
title = models.CharField(max_length=255, unique=True)
document_type = models.ForeignKey(DocumentType)
file = models.FileField(upload_to='documents', blank=True)
notes = models.TextField(verbose_name="Description")
authors = models.ManyToManyField(binder.configurable.UserModel,
related_name="documents_authored")
external_authors = models.CharField(max_length=255, blank=True)
created = models.DateTimeField(auto_now_add=True)
try:
hyperlink = models.URLField(blank=True, verify_exists=False)
except TypeError: # https://github.com/andrewebdev/django-adzone/issues/12
hyperlink = models.URLField(blank=True)
uploader = models.ForeignKey(binder.configurable.UserModel,
related_name="documents_uploaded", null=True)
confidential = models.BooleanField("CONFIDENTIAL DO NOT SHARE OUTSIDE ATA")
deleted = models.BooleanField()
on_validate = django.dispatch.Signal(providing_args=['instance'])
def __unicode__(self):
return "Document<%s>" % self.title
def get_authors(self):
return ', '.join([u.full_name for u in self.authors.all()])
get_authors.short_description = 'Authors'
def clean(self):
from django.core.exceptions import ValidationError
models.Model.clean(self)
if not self.file and not self.hyperlink:
raise ValidationError('You must either attach a file ' +
'or provide a hyperlink')
if not self.title:
raise ValidationError('You must either attach a file ' +
'or provide a title')
try:
self.on_validate.send(sender=Document, instance=self)
except ValidationError as e:
# print "on_validate raised a ValidationError: %s" % e
raise e
@models.permalink
def get_absolute_url(self):
"""
The URL used in search results to link to the "document" found:
we use this to point to the read-only view page.
"""
return ('admin:documents_document_readonly', [str(self.id)])
|
983,151 | 85f3ea90b801065304d7243dbbb01828e5479886 | # Generated by Django 3.0.3 on 2020-04-16 10:58
from django.db import migrations, models
import goats.models
class Migration(migrations.Migration):
dependencies = [
('goats', '0011_auto_20200403_1103'),
]
operations = [
migrations.AddField(
model_name='goat',
name='cold_protection',
field=models.IntegerField(default=goats.models.random_int),
),
migrations.AddField(
model_name='goat',
name='is_inside',
field=models.BooleanField(auto_created=True, default=False),
),
]
|
983,152 | 743a95e34155092712f0be52f302b98aa1d4f32d | from django.http import HttpResponse
from django.shortcuts import render
from django.conf import settings
from . import forms
from .src import twitter_word_count
def index(request):
return render(request, 'index.html')
def get_tweets(request):
if request.method == 'POST' and request.is_ajax():
form = forms.get_tweets_form(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
words = twitter_word_count.twitter_word_count(settings.API_TWITTER).get_words_of_tweets(username)
form.save()
return render(request, 'tweets_list.html', {'words': words})
response = HttpResponse('El formulario no es valido')
response.status_code = 400
return response
response = HttpResponse('Petición no válida')
response.status_code = 400
return response
|
983,153 | b7a503e032d5593b20fab78b744098eef66d35a8 | # -*- coding: UTF-8 -*-
#接受5个参数
#如果单文件对比,则第一个是输入文件,第二个是输出文件,第三个是排除个数,第四个是基准字段,最后一个参数为signle
#如果是两文件对比,则第一个参数是旧文件,第二个是新文件,第三个是输出文件,第四个是排除个数,第五个是基准字段
import linecache;
import sys;
def huanbi(str1,str2,paichu1,jizhun1):
#第一二个参数是要环比的行,第三个参数是排除的个数,且是从第一个开始算,第四个是需要比较的基准字段
str1=str1[:-1];
str2=str2[:-1];
list1=str1.split('|');
list2=str2.split('|');
int1=int(paichu1);
jizhun1=int(jizhun1)-1;
if list1[jizhun1] != list2[jizhun1]:
print("多行文件环比中,两个文件相同行的基准字段不同,请重新确认");
exit(1);
result=str('');
it=0;
while (it<int1):
result=result+list2[it]+'|';
it=it+1;
if len(list1) == len(list2):
i=int1;
while (i<len(list1)):
t=round(float(list2[i]),5)-round(float(list1[i]),5);
result=result+str(t)+'|';
i+=1
return result
else:
return "error";
if __name__ == '__main__':
argcount=len(sys.argv[1:]);
if argcount == 5:
if sys.argv[5] == "signle":
inputfile=sys.argv[1];
outputfile=sys.argv[2];
paichu=int(sys.argv[3]);
jizhun=int(sys.argv[4]);
linecount = len(open(inputfile,'rU').readlines());
hang=int(0);
while hang<linecount-1:
lasthang=int(hang);
newhang=int(hang)+1;
str1=str(linecache.getlines(inputfile)[lasthang]);
str1=str1.strip('\n');
str2=str(linecache.getlines(inputfile)[newhang]);
str2=str2.strip('\n');
outfile=open(outputfile,"a");
newstr=huanbi(str1,str2,paichu,jizhun);
outfile.write(newstr+"\n");
outfile.close();
hang=hang+1;
exit(0);
else:
lastfile=sys.argv[1];
newfile=sys.argv[2];
outputfile=sys.argv[3];
paichu=int(sys.argv[4]);
jizhun=int(sys.argv[5]);
count1 = len(open(lastfile,'rU').readlines());
count2 = len(open(newfile,'rU').readlines());
if count1 != count2:
print("多行文件环比操作的两个文件,行数不同");
exit(1);
hang=int(0);
while hang<count2:
str1=str(linecache.getlines(lastfile)[hang]);
str1=str1.strip('\n');
str2=str(linecache.getlines(newfile)[hang]);
str2=str2.strip('\n');
outfile=open(outputfile,"a");
newstr=huanbi(str1,str2,paichu,jizhun);
outfile.write(newstr+"\n");
outfile.close();
hang=hang+1;
exit(0);
else:
print("python程序环比计算huanbi,收到的参数个数不为5个");
exit(1);
|
983,154 | f7026a428620c144804420db179db25b12e356ff | from gpiozero import MotionSensor
from picamera import PiCamera
import time
from datetime import datetime
from fractions import Fraction
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import os
import config
print("BEGIN")
camera = PiCamera()
camera.resolution = (1920, 1080)
# camera.rotation = 180
print("Starting camera...")
out_path = "motion_captures/"
num = 0
pir = MotionSensor(4)
print("Waiting 30s while motion detector learns environment")
time.sleep(30)
while True:
print("Starting motion detection...")
pir.wait_for_motion()
print("Motion detected!" + str(num))
num += 1
time.sleep(2)
img_name = str(datetime.now())
img_name = img_name.replace(" ", "-")
img_name = img_name.replace(":", "-")
img_name = img_name.replace(".", "-")
img_name += ".jpg"
camera.capture(out_path + img_name)
print("Captured: {}".format(img_name))
print("Sending image to recognize_faces_image.py...")
CLICommand = 'python /home/pi/Development/edgar/face-recognition-opencv/recognize_faces_image2.py --encodings /home/pi/Development/edgar/face-recognition-opencv/encodings.pickle --image "/home/pi/Development/edgar/motion_captures/{imgName}" --detection-method hog'.format(
imgName=img_name)
f = os.popen(CLICommand)
names = f.read()
# Sending Email Notification
sender_email = config.sender_email
receiver_email = config.receiver_email
password = config.password
message = MIMEMultipart("alternative")
message["Subject"] = "Person Detected"
message["From"] = sender_email
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
text = names
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message.as_string())
print("names returned: ")
print(names)
time.sleep(10)
print("END")
|
983,155 | b8a07be35121873f0413fdb2692c4160afc49761 | '''Filter to clean and normalize csv headers.'''
import csv
import sys
HEADER_MAP = {
'regionname': 'region_name',
'leacode': 'lea_code',
'schoolcode': 'school_code',
'charterschool': 'charter_school',
'schooltype': 'school_type',
'gradespan': 'grade_span',
'totalresident': 'total_resident_children',
'resident_children': 'total_resident_children',
'schoolserved': 'school_served',
'eligibility_programmodel': 'eligibility_model',
'eligibility_program_model': 'eligibility_model',
'eligibilityjustification': 'eligibility_justification',
'programjustification': 'program_justification',
'served1st_year': 'served_1st_year',
'served1st_yearcomment': 'served_1st_year_comment',
'year': 'school_year',
'schoolyear': 'school_year',
'number_studentsservedtas': 'served_tas_count',
'number_students_servedtas': 'served_tas_count',
'gradesservedtas': 'served_tas_grade',
'grades_served_tas': 'served_tas_grade',
'low_income_students': 'number_low_income_students',
'number_lowincome': 'number_low_income_students',
'percent': 'percent_low_income_students',
'percent_lowincome': 'percent_low_income_students',
}
def clean(row):
ret = []
for c in row:
c = c.replace('\n', ' ')
c = c.replace('-', ' ')
c = c.replace('%', ' percent ')
c = c.replace('#', ' number ')
c = '_'.join(c.split())
c = c.lower()
c = HEADER_MAP.get(c, c)
ret.append(c)
return ret
if __name__ == '__main__':
reader = csv.reader(sys.stdin)
writer = csv.writer(sys.stdout)
header = next(reader)
writer.writerow(clean(header))
for row in reader:
writer.writerow(row)
|
983,156 | a33019ed39879c76bf00969434c4e28f63e8c75d | import csv, vobject
filename = "contacts.CSV"
delimiter=","
# array get - return empty string if None
def ag(a, i):
if a[i] == None:
return ""
else:
return a[i]
def create_vcard(c):
print c
# field index
fi = 0
# Name ( Title, First, Middle, Last, Suffix)
name = [None] * 5
for i in range(0, 5):
if c[fi + i] is not None and len(c[fi + i]) > 0:
name[i] = c[i + fi]
# Organisaion/business ( company, department, job title)
fi += 5 # fi = 5
org = [None] * 4
for i in range(0, 3):
if c[i + fi] is not None and len(c[i + fi]) > 0:
org[i] = c[i + fi]
# Work address (street, street2, street3, city, state, postcode, country)
fi += 3 # fi = 8
work_addr = [None] * 7
for i in range(0, 7):
if c[i + fi] is not None and len(c[i + fi]) > 0:
work_addr[i] = c[i + fi]
# Home address (street, street2, street3, city, state, postcode, country)
fi += 7 # fi = 15
home_addr = [None] * 7
for i in range(0, 7):
if c[i + fi] is not None and len(c[i + fi]) > 0:
home_addr[i] = c[i + fi]
# Other address
fi += 7 # fi = 22
other_addr = [None] * 7
for i in range(0, 7):
if c[i + fi] is not None and len(c[i + fi]) > 0:
other_addr[i] = c[i + fi]
fi += 7 # fi = 29
# Assistant's phone (ignore)
fi += 1 # fi = 30
# Business fax, phone, phone 2
work_ph = [None] * 3
for i in range(0, 3):
if c[i + fi] is not None and len(c[i + fi]) > 0:
work_ph[i] = c[i + fi]
fi += 3 # fi = 33
# Callback, car phone, company main phone (ignore)
fi += 3 # fi = 36
# Home fax, phone, phone 2
home_ph = [None] * 3
for i in range(0, 3):
if c[i + fi] is not None and len(c[i + fi]) > 0:
home_ph[i] = c[i + fi]
fi += 3 # fi = 39
# ISDN? skip
fi +=1 # fi = 40
# Mobile phone (one index)
mobile = [None]
if c[fi] is not None and len(c[fi]) > 0:
mobile = c[fi]
fi += 1 # fi = 41
#Other fax*2, pager (skip)
fi += 2 # fi = 44
# Primary phone
primary_ph = [None]
if c[fi] is not None and len(c[fi]) > 0:
primary_ph = c[fi]
fi += 1 # fi = 45
# radio phone, TTY/TDD, Telex, Account, Anniversary, Assistant's name, billing info (skip)
fi += 7 # fi = 52
# Birthday
birthday = [None]
if c[fi] is not None and len(c[fi]) > 0:
birthday = c[fi]
fi += 1 # fi = 53
#Business PO box, categories, children, directory server (skip)
fi += 4 # fi = 57
#Email address, type, display name
email1 = [None] * 3
for i in range(0, 3):
if c[i + fi] is not None and len(c[i + fi]) > 0:
email1[i] = c[i + fi]
fi += 3 # fi = 60
#Email 2
email2 = [None] * 3
for i in range(0, 3):
if c[i + fi] is not None and len(c[i + fi]) > 0:
email2[i] = c[i + fi]
fi += 3 # fi = 63
# Email 3
email3 = [None] * 3
for i in range(0, 3):
if c[i + fi] is not None and len(c[i + fi]) > 0:
email3[i] = c[i + fi]
fi += 3 # fi = 66
# Gender
gender = [None]
if c[fi] is not None and len(c[fi]) > 0:
gender = c[fi]
fi += 1 # fi = 67
# gov ID number, hobby (skip)
fi += 2 # fi = 69
# Home PO box
home_pobox = [None]
if c[fi] is not None and len(c[fi]) > 0:
home_pobox = c[fi]
fi += 1 # fi = 70
# Initials
initials = [None]
if c[fi] is not None and len(c[fi]) > 0:
initials = c[fi]
fi += 1 # fi = 71
# Internet free busy, keywords, language, location, managers name, mileage (skip)
fi += 6 # fi = 77
# Notes
notes = [None]
if c[fi] is not None and len(c[fi]) > 0:
notes = c[fi]
fi += 1 # fi = 78
# Office loc, org ID number, other pO box, priority, private, referred by, sensetivity, spouse, user 1, user 2, user 3, user 4 (skip)
fi += 12 # fi = 80
# Web page
webpage = [None]
if c[fi] is not None and len(c[fi]) > 0:
webpage = c[fi]
all_info = [name, org, work_addr, home_addr,
other_addr, work_ph, home_ph, mobile, primary_ph, birthday,
email1, email2, email3, gender, home_pobox, initials, notes, webpage]
print(all_info)
#print(name)
def convert_to_vcard(csv_filename):
rows = []
with open(csv_filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar='"', quoting=csv.QUOTE_ALL, skipinitialspace=True)
row_index = 0
# Each row
for row in reader:
# Get the column names
column_index = 0
if row_index == 0:
name_index = 0
for name in row:
print str(name_index) + " " + name
rows.append(name.replace("\"", ""))
name_index += 1
# Start reading contacts per row
else:
#print
#print "NEW CONTACT: ---------"
# see https://en.wikipedia.org/wiki/VCard and https://github.com/eventable/vobject
contact_details = [None] * len(rows)
contact_col_i = 0
# For each contact's detail column values
for column in row:
j = vobject.vCard()
col_no_qt = column.replace("\"", "")
#print ( rows[contact_col_i] + ": " + col_no_qt)
# Add to contacts array
contact_details[contact_col_i] = col_no_qt
contact_col_i += 1
#print "END NEW CONTACT"
create_vcard(contact_details)
row_index += 1
pass
if __name__ == "__main__":
convert_to_vcard(filename) |
983,157 | 5a3ba5ce1f4c1e52bd19d4adce7e825ca01b73e1 | # SETTINGS PAGE
'''
Number of stars - int (0, 1000000)
Background Color - int[index of array of options] (black, white, blue, yellow, )
Star Color - int[index of array of options] (black, white, blue, yellow, )
Rotate Text - int (-180, 180)
Curve Text - int (0, 45)
Size Text - int
Connect Constellation Stars - bool
Image Size
'''
Star_Limit = (0, 1000000) #Unknown Limits
Background_Colors = {
"Night Sky": (7, 11, 15),
"Black": (0, 0, 0),
"White": (255, 255, 255),
"Red": (255, 0, 0),
"Green": (0, 255, 0),
"Blue": (0, 0, 255)
}
Star_Colors = {
"Blue": (154, 175, 255),
"Blue-White": (202, 215, 255),
"White": (248, 247, 255),
"Yellow-White": (255, 244, 234),
"Yellow": (255, 242, 161),
"Orange": (255, 196, 111),
"Red": (255, 96, 96)
}
Rotation_Limit = (-180, 180) #Unknown Limits
Curve_Limit = (-45, 45) #Unknown Limits
Text_Limit = (8, 40) #Unknown Limits
Show_Constellation_Lines = False
Starfield_Image_Size = (800, 800)
Options_Width = 200 |
983,158 | 122efd3f2e6c04d1fdb7921b5749a621652fa147 |
def inflect(verb, person, number):
prn = pronomi[person][number]
base = verb[:-3]
if verb[-3:] == "are":
spec = verbi_spec[person][number][0]
if verb[-3:] == "ere":
spec = verbi_spec[person][number][1]
if verb[-3:] == "ire":
spec = verbi_spec[person][number][2]
com = verbi_com[person][number]
return (prn+" "+ base+spec+com)
|
983,159 | 29ab6fa982897107759b447dd37daef6adb14ecc | #!/usr/bin/python
"""Usage:
ruleparser.py -i <file_to_parse> -s <string_to_parse>... [-c] [--dir][--vim][--scan <target>]
ruleparser.py -i <file_to_parse> [-o <file_output>] -s <string_to_parse>... [-c] [--dir][--vim]
ruleparser.py -i <file_to_parse> -s <string_to_parse>... [-c][--dir][--vim]
ruleparser.py -i <file_to_parse> --dir
ruleparser.py --scan <nmap_option>...
ruleparser.py -h
Options:
-i <file_to_parse> specify file containing rules to be parsed
-s <string_to_parse> string(s) to match inside the filename
-c display number of matching rules based on input string
-o <file_output> specify output file
--dir display directory location of <file_to_parse>
-h
"""
#The same structure will be used for the main script to provide command-line interface
#Options to be added:
# --scan -> enable scan using Nmap and Xprobe
# --target -> remote IP address/range to be scanned
import os,re,subprocess
from docopt import docopt
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.0.1')
print(arguments)
result_dict = docopt(__doc__, version='0.0.1')
try:
string_to_parse = result_dict['<string_to_parse>']
rules_list = []
string_array = result_dict['<string_to_parse>']
regex_pattern = ''.join([string+"|" for string in string_array])
regex_pattern = regex_pattern[:-1]
string_pattern = re.compile('.*(%s).*'%regex_pattern)
if result_dict['-i'] == True:
file_input = result_dict['<file_to_parse>']
file_input_read = open(file_input, "r")
if (result_dict['-s'] is not None) or (result_dict['<string_to_parse>'] != []):
for line in file_input_read:
if re.search(string_pattern,line) and (line.startswith("alert", 0, 5)):
line = line.strip('\n')
rules_list.append(line)
#for line in rules_list:
# print line
print rules_list
if result_dict['-c'] == True:
rule_count = len(rules_list)
print ("Number of matching rule(s): " + str(rule_count) + " ")
if result_dict['--dir'] == True:
print os.path.abspath(""+file_input+"")
if result_dict['-o'] == True:
output = result_dict['<file_output>']
new_file = open(output, "w+")
for rules in rules_list:
new_file.write(rules+"\n")
print new_file
new_file.close()
if result_dict['--vim'] == True:
proc = subprocess.Popen("vim " + result_dict['<file_to_parse>'] + "", shell=True)
#parse nmap options from cmd line
#add "-" to all --scan arguments since it accepts it without "-"
#e.g. ss A -> -ss -A
if result_dict['--scan'] == True:
proc = subprocess.Popen("nmap -sS -A -p1-65535 -oN nmap_scan 127.0.0.1 > /dev/null", shell=True)
file_input_read.close()
except IOError:
print "Invalid filename"
|
983,160 | 4f3ab9a8bdefdbd0f051d6de0376e2ca29e2f16c | from django.contrib.auth.models import User
from django.db import connection
from django.db import models
from django.utils.html import format_html
from .utils import Decimal, quantize
class InfoModel(models.Model):
address = models.CharField('联系地址', max_length=100, blank=True, null=True)
phone_number = models.CharField('联系电话', max_length=20)
add_date = models.DateField('添加日期', auto_now_add=True)
class Meta:
abstract = True
class Customer(InfoModel):
"""
购买商品的客户
"""
user_name = models.CharField('客户姓名', max_length=15)
def __str__(self):
return self.user_name + ': ' + self.phone_number
class Meta:
verbose_name = '客户'
verbose_name_plural = '客户'
class ModelServiceMixin(object):
def _get_my_fields(self):
return [f.name for f in self._meta.fields]
class Category(models.Model):
"""
商品分类
"""
remark = models.TextField('描述信息', blank=True, null=True)
name = models.CharField('类别名称', max_length=20)
add_date = models.DateField('添加日期', auto_now_add=True)
super_category = models.ForeignKey("Category", verbose_name='所属分类', null=True, blank=True,
related_name='parent_category')
def __str__(self):
return self.name
class Meta:
verbose_name = '类别'
verbose_name_plural = '类别'
ordering = ['add_date']
def set_user_name_verbose_name(class_name, parents, attributes):
user_name = attributes.get('user_name', None)
if user_name:
user_name.verbose_name = '供货商姓名'
return type(class_name, parents, attributes)
class Shop(InfoModel):
"""
进货商
"""
user_name = models.CharField('供货商姓名', max_length=15)
shop_name = models.CharField('供货商名称', max_length=20, unique=True)
shop_address = models.CharField('供货商地址', max_length=100)
def __str__(self):
return self.shop_name
class Meta:
verbose_name = '供货商'
verbose_name_plural = '供货商'
ordering = ['shop_name']
from datetime import datetime
class GoodsManager(models.Manager):
def remain_statistic(self):
current_year = datetime.now().year
check_count_sql = "select count(id) from store_goods as t where t.update_date like '{}%';"
statistic_sql = "select sum(t.remain * t.average_price) from store_goods as t where t.update_date like '{}%';"
with connection.cursor() as cursor:
# import pdb
# pdb.set_trace()
ret = []
years = []
for year in range(current_year - 4, current_year + 1):
cursor.execute(check_count_sql.format(year))
rows = cursor.fetchall()
if rows[0][0] == 0:
continue
cursor.execute(statistic_sql.format(year))
rows = cursor.fetchall()
ret.append(round(rows[0][0], 2))
years.append(year)
return {'value': [{'name': '统计到小数点后两位', 'data': ret}], 'x': years}
class Goods(ModelServiceMixin, models.Model):
"""
商品
"""
goods_name = models.CharField('商品名称', max_length=15, unique=True)
average_price = models.DecimalField('进价', default=0, max_digits=10, decimal_places=2)
last_price = models.DecimalField('售价', default=0, max_digits=10, decimal_places=2)
unit_name = models.CharField('单位', max_length=10)
updater = models.ForeignKey(User, editable=False, verbose_name='添加人')
update_date = models.DateField('更新日期', auto_now_add=True)
recent_sell = models.DateField('最近售出日期', blank=True, null=True)
is_delete = models.BooleanField('下架', default=False)
category = models.ManyToManyField(Category, verbose_name='所属类别')
shop = models.ForeignKey(Shop, verbose_name='供应商名称', blank=True, null=True)
remain = models.DecimalField('数目', max_digits=6, decimal_places=2, default=0)
last_time = models.DateField('有效期', blank=True, null=True)
statistic_objects = GoodsManager()
objects = models.Manager()
def __str__(self):
return self.goods_name
class Meta:
verbose_name = '商品'
verbose_name_plural = '商品'
ordering = ['last_time', 'goods_name', 'update_date']
@property
def count(self):
if self.num:
return quantize(self.num * self.last_price)
return Decimal(0)
def sell_amount(self):
return quantize(self.remain * self.last_price)
def in_amount(self):
return quantize(self.remain * self.average_price)
def own_amount(self):
return self.sell_amount() - self.in_amount()
sell_amount.short_description = '销售总价'
in_amount.short_description = '进货总价'
own_amount.short_description = '利润'
class GoodsAddRecord(models.Model):
"""
增加库存的记录
"""
goods = models.ForeignKey(Goods, verbose_name='商品名称', related_name='record_goods')
shop = models.ForeignKey(Shop, verbose_name='供应商', related_name='record_shop')
number = models.DecimalField('数目', max_digits=6, decimal_places=2)
remark = models.TextField('说明信息', blank=True, null=True)
updater = models.ForeignKey(User, verbose_name='操作员')
date = models.DateTimeField('日期', auto_now_add=True)
new_price = models.DecimalField('新进价', blank=True, null=True, max_digits=10, decimal_places=2)
class Meta:
verbose_name = '增加库存记录'
verbose_name_plural = '增加库存记录'
ordering = ['goods', 'number']
def __str__(self):
return "%s--%s" % (self.shop, self.goods)
class ReturnRecord(models.Model):
"""
退订单表
"""
TYPE_IN_CHOICES = (
(0, '操作失误'),
(1, '退货'),
)
customer = models.ForeignKey(Customer, verbose_name='退货用户', blank=False, null=True, related_name='return_customer')
goods = models.ForeignKey(Goods, verbose_name='商品名称', related_name='return_goods')
shop = models.ForeignKey(Shop, verbose_name='供货商名称', related_name='return_shop')
amount = models.DecimalField('数目', max_digits=6, decimal_places=2)
type = models.IntegerField('退送原因', choices=TYPE_IN_CHOICES)
updater = models.ForeignKey(User, verbose_name='操作员')
date = models.DateTimeField('日期', auto_now_add=True)
remark = models.TextField('说明信息', blank=True, null=True)
reset_price = models.DecimalField('重置价格', blank=True, null=True, max_digits=10, decimal_places=2)
class Meta:
verbose_name = '退货处理'
verbose_name_plural = '退货处理'
ordering = ['goods', 'amount']
def __str__(self):
return "%s--%s--%s" % (self.shop, self.goods, self.amount)
class TransferGoods(models.Model):
"""
不进入库存
"""
from_shop = models.ForeignKey(Shop, related_name='from_shop', verbose_name='供应商')
to_shop = models.ForeignKey(Shop, related_name='to_name', verbose_name='销售商')
goods = models.ForeignKey(Goods, verbose_name='商品名称')
change_num = models.DecimalField('数目', max_digits=6, decimal_places=2)
from_price = models.DecimalField('进价', default=0, max_digits=10, decimal_places=2)
to_price = models.DecimalField('售价', default=0, max_digits=10, decimal_places=2)
updater = models.ForeignKey(User, verbose_name='操作人员')
date = models.DateTimeField('日期', auto_now_add=True)
remark = models.TextField('说明信息', blank=True, null=True)
class Meta:
verbose_name = '直接交易记录'
verbose_name_plural = '直接交易记录'
ordering = ['goods', 'change_num']
def __str__(self):
return "%s--%s--%s--%s" % (self.from_shop, self.to_shop, self.goods, self.change_num)
class Report(models.Model):
title = models.CharField('标题', max_length=100, default='志平电子配件销售清单')
alias = models.CharField('模板名字', max_length=20, default='默认模板')
ad = models.TextField('广告语')
phone = models.CharField('电话号码', max_length=50, default='7566409 13755519477')
address = models.CharField('地址', max_length=50, default='芦溪县凌云南路太阳城B栋良友旁')
remark = models.TextField('附加信息', blank=True, null=True,
default='银行卡号: 6222.0215 0400 3618 261\n中国银行: 6216 6165 0600 0292 464')
date = models.DateTimeField('日期', auto_now_add=True)
tag = models.BooleanField('默认模板', default=True)
def __str__(self):
return self.alias
class Meta:
verbose_name = '设置清单'
verbose_name_plural = '设置清单'
ordering = ['-date']
class Order(models.Model):
customer = models.ForeignKey(Customer, verbose_name='客户名称')
all_price = models.DecimalField('总价', default=0, max_digits=10, decimal_places=2)
all_profit = models.DecimalField('总利润', default=0, max_digits=10, decimal_places=2)
is_delete = models.BooleanField('是否取消订单', default=False)
updater = models.ForeignKey(User, verbose_name='操作人员')
date = models.DateTimeField('日期', auto_now_add=True)
report = models.ForeignKey(Report, verbose_name='清单模板')
def __str__(self):
return self.name
class Meta:
verbose_name = '订单记录'
verbose_name_plural = '订单记录'
ordering = ['-date']
class ArrearsPrice(models.Model):
arrears_price = models.DecimalField('欠款额', max_digits=10, decimal_places=2)
customer = models.ForeignKey(Customer, verbose_name='客户姓名')
is_arrears = models.BooleanField('清除欠款', default=False) # 是否欠款
date = models.DateTimeField('日期', auto_now_add=True)
def __str__(self):
return str(self.arrears_price)
class Meta:
verbose_name = '欠款记录'
verbose_name_plural = '欠款记录'
ordering = ['-date']
class SellRecordManager(models.Manager):
def month_statistic(self, year):
with connection.cursor() as cursor:
sql_str = "select count(t.sell_num) as count, substr(t.date,6,2) as month, " \
"sum(t.sell_price * t.sell_num) as sell_total, sum(t.average_price * t.sell_num ) as average_total " \
"from store_goodssellrecord as t where t.date like %s group by substr(t.date,1,7);"
cursor.execute(sql_str, [year + '%'])
result_list = []
for row in cursor.fetchall():
p = {'count': row[0], 'month': row[1], 'sells': row[2], 'averages': row[3]}
result_list.append(p)
return result_list
def year_statistic(self):
with connection.cursor() as cursor:
sql_str = "select count(t.sell_num) as count, substr(t.date,1,4) as year, " \
"sum(t.sell_price * t.sell_num) as sell_total, sum(t.average_price * t.sell_num ) as average_total " \
"from store_goodssellrecord as t group by substr(t.date,1,4);"
cursor.execute(sql_str)
result_list = []
for row in cursor.fetchall():
p = {'count': row[0], 'year': row[1], 'sells': row[2], 'averages': row[3]}
result_list.append(p)
return result_list
def day_statistic(self, year):
with connection.cursor() as cursor:
sql_str = "select count(t.sell_num) as count, substr(t.date,1,10) as date, " \
"sum((t.sell_price - t.average_price)* t.sell_num) as profit_total " \
"from store_goodssellrecord as t where t.date like %s group by substr(t.date,1,10);"
cursor.execute(sql_str, [year + '%'])
result_list = []
for row in cursor.fetchall():
p = {'count': row[0], 'date': row[1], 'profits': row[2]}
result_list.append(p)
return result_list
class RecordHistory(models.Model):
date = models.DateTimeField('日期', null=False, blank=False)
customer = models.ForeignKey(Customer, verbose_name='客户姓名', related_name='record_customer', null=False, blank=False)
report = models.ForeignKey(Report, related_name='record_report', null=True, blank=False)
arrears = models.ForeignKey(ArrearsPrice, related_name='record_arrears', null=True, blank=True)
def view_record(self):
return format_html('<a href="/store/view_record/{}"><i class="icon-eye-open"></i></a>'.format(self.id))
view_record.short_description = '查看订单'
def __str__(self):
return self.customer.user_name
class Meta:
verbose_name = '历史订单查看'
verbose_name_plural = '历史订单查看'
ordering = ['-date']
class GoodsSellRecord(models.Model):
"""
卖出商品记录
"""
goods = models.ForeignKey(Goods, verbose_name='商品名称', related_name='goods')
sell_num = models.DecimalField('数目', max_digits=6, decimal_places=2)
average_price = models.DecimalField('进价', null=True, blank=True, max_digits=10, decimal_places=2)
sell_price = models.DecimalField('售价', null=True, blank=True, max_digits=10, decimal_places=2)
customer = models.ForeignKey(Customer, verbose_name='客户姓名', related_name='customer', null=True, blank=True)
remark = models.TextField('描述信息', blank=True, null=True)
updater = models.ForeignKey(User, verbose_name='操作人员', related_name='admin')
date = models.DateTimeField('日期', auto_now_add=True)
arrears = models.ForeignKey(ArrearsPrice, verbose_name='欠款额', related_name='arrears', null=True, blank=True)
record = models.ForeignKey(RecordHistory, related_name='report_many_record', null=True, blank=False)
# def account_actions(self, obj):
# return format_html(
# '<a class="button" href="{}">Deposit</a> '
# '<a class="button" href="{}">Withdraw</a>',
# reverse('admin:account-deposit', args=[obj.pk]),
# reverse('admin:account-withdraw', args=[obj.pk]),
# )
#
# account_actions.short_description = 'Account Actions'
# account_actions.allow_tags = True
statistic_objects = SellRecordManager()
objects = models.Manager()
@property
def profit(self):
"""
获取利润
:return:
"""
profit = self.sell_num * (self.sell_price - self.average_price)
return profit
@property
def receivable(self):
"""
销售总额
:return:
"""
receivable = self.sell_num * self.sell_price
return receivable
class Meta:
verbose_name = '销售记录'
verbose_name_plural = '销售记录'
ordering = ['-date', 'goods', 'sell_num']
def __str__(self):
return self.goods.goods_name
|
983,161 | 4009890194f386d800bbfa869bdbfeeef2d58e50 | """Utility functions for continuous animation of mobjects."""
from __future__ import annotations
__all__ = [
"assert_is_mobject_method",
"always",
"f_always",
"always_redraw",
"always_shift",
"always_rotate",
"turn_animation_into_updater",
"cycle_animation",
]
import inspect
from collections.abc import Callable
import numpy as np
from manim.constants import DEGREES, RIGHT
from manim.mobject.mobject import Mobject
from manim.opengl import OpenGLMobject
def assert_is_mobject_method(method):
assert inspect.ismethod(method)
mobject = method.__self__
assert isinstance(mobject, (Mobject, OpenGLMobject))
def always(method, *args, **kwargs):
assert_is_mobject_method(method)
mobject = method.__self__
func = method.__func__
mobject.add_updater(lambda m: func(m, *args, **kwargs))
return mobject
def f_always(method, *arg_generators, **kwargs):
"""
More functional version of always, where instead
of taking in args, it takes in functions which output
the relevant arguments.
"""
assert_is_mobject_method(method)
mobject = method.__self__
func = method.__func__
def updater(mob):
args = [arg_generator() for arg_generator in arg_generators]
func(mob, *args, **kwargs)
mobject.add_updater(updater)
return mobject
def always_redraw(func: Callable[[], Mobject]) -> Mobject:
"""Redraw the mobject constructed by a function every frame.
This function returns a mobject with an attached updater that
continuously regenerates the mobject according to the
specified function.
Parameters
----------
func
A function without (required) input arguments that returns
a mobject.
Examples
--------
.. manim:: TangentAnimation
class TangentAnimation(Scene):
def construct(self):
ax = Axes()
sine = ax.plot(np.sin, color=RED)
alpha = ValueTracker(0)
point = always_redraw(
lambda: Dot(
sine.point_from_proportion(alpha.get_value()),
color=BLUE)
)
tangent = always_redraw(
lambda: TangentLine(
sine,
alpha=alpha.get_value(),
color=YELLOW,
length=4)
)
self.add(ax, sine, point, tangent)
self.play(alpha.animate.set_value(1), rate_func=linear, run_time=2)
"""
mob = func()
mob.add_updater(lambda _: mob.become(func()))
return mob
def always_shift(mobject, direction=RIGHT, rate=0.1):
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
mobject.add_updater(lambda m, dt: m.shift(dt * rate * normalize(direction)))
return mobject
def always_rotate(mobject, rate=20 * DEGREES, **kwargs):
mobject.add_updater(lambda m, dt: m.rotate(dt * rate, **kwargs))
return mobject
def turn_animation_into_updater(animation, cycle=False, **kwargs):
"""
Add an updater to the animation's mobject which applies
the interpolation and update functions of the animation
If cycle is True, this repeats over and over. Otherwise,
the updater will be popped upon completion
"""
mobject = animation.mobject
animation.suspend_mobject_updating = False
animation.begin()
animation.total_time = 0
def update(m, dt):
run_time = animation.get_run_time()
time_ratio = animation.total_time / run_time
if cycle:
alpha = time_ratio % 1
else:
alpha = np.clip(time_ratio, 0, 1)
if alpha >= 1:
animation.finish()
m.remove_updater(update)
return
animation.interpolate(alpha)
animation.update_mobjects(dt)
animation.total_time += dt
mobject.add_updater(update)
return mobject
def cycle_animation(animation, **kwargs):
return turn_animation_into_updater(animation, cycle=True, **kwargs)
|
983,162 | d5215c449617e7c87fb2addfb9a670a5699cea79 | import time
start_time = time.time()
from datetime import datetime
import pandas as pd
lib_load_time = time.time() - start_time
print("Libraries loading time: %s s." % lib_load_time)
start_time = time.time()
bitstamp = pd.read_csv("data/bitstamp.csv")
file_load_time = time.time() - start_time
print("File loading time: %s s." % file_load_time)
start_time = time.time()
bitstamp['Datetime'] = pd.to_datetime(bitstamp['Timestamp'], unit='s')
preprocess_exec_time = time.time() - start_time
print("Preprocessing time: %s s." % preprocess_exec_time)
N = 10
start_time = time.time()
for i in range(N):
df = bitstamp.groupby([bitstamp.Datetime.dt.year, bitstamp.Datetime.dt.month]).agg({'Close': 'mean', 'High': 'max'})
exec_time = (time.time() - start_time) / N
print("Processing time: %s s." % exec_time)
task_names = ["Library loading"] + ["File loading"] + ["Preprocessing"] + ["Processing"]
task_types = ["Loading"] + ["Execution"] * 3
task_exec_times = [lib_load_time, file_load_time, preprocess_exec_time, exec_time]
new_df = pd.DataFrame({"DateTime":[datetime.now().isoformat()]*len(task_names), "Language":["Python"]*len(task_names), "Dataset":["Bitstamp"]*len(task_names), "TaskNames":task_names, "TaskTypes":task_types, "TaskExecTimes":task_exec_times})
try:
out_df = pd.concat([pd.read_csv("logs/log.csv"), new_df], sort=False)
except Exception as e:
out_df = new_df
out_df.to_csv("logs/log.csv", index=False)
|
983,163 | ed776aba88d01cbbec7d44b0fa37e98f9aba87fa | import numpy as np
class PrimitiveObject(object):
def save_primitive_matrix(self,primitive_mtx):
self.primitive_mtx = primitive_mtx
self.discrete_primitive_mtx = primitive_mtx
self.num_primitives = np.shape(self.primitive_mtx)[1]
def save_primitive_names(self,names):
self.primitive_names = names
if len(self.primitive_names) != self.num_primitives:
Exception('Incorrect number of Primitive Names')
def bike_human_nums(object_names):
names = object_names.split(' , ')[1:]
num_person = 0
num_bicycles = 0
for i in range(np.shape(names)[0]):
name = names[i]
if ('person' in name) or ('man' in name) or ('woman' in name) or ('girl' in name) or ('boy' in name) or ('people' in name):
num_person = num_person+1
if ('cycle' in name) or ('bike' in name) or ('bicycle' in name):
num_bicycles = num_bicycles+1
if (num_bicycles == 0) or (num_person == 0):
return 0
if num_person == num_bicycles:
return 2
elif num_person <= num_bicycles:
return 0
else:
return 1
def bike_human_distance(object_names, object_x, object_y):
names = object_names.split(' , ')[1:]
person_position = np.array([[0,0],[0,0]])
bicycle_position = np.array([[0,0],[0,0]])
for i in range(np.shape(names)[0]):
name = names[i]
if ('person' in name) or ('man' in name) or ('woman' in name) or ('girl' in name) or ('boy' in name) or ('people' in name):
person_position = np.concatenate((person_position, np.array([[object_x[i],object_y[i]]])))
if ('cycle' in name) or ('bike' in name) or ('bicycle' in name):
bicycle_position = np.concatenate((bicycle_position, np.array([[object_x[i],object_y[i]]])))
person_position = person_position[2:,:]
bicycle_position = bicycle_position[2:,:]
if (np.shape(bicycle_position)[0] == 0) or (np.shape(person_position)[0] == 0):
return -1
import itertools
if len(bicycle_position) >= len(person_position):
list1 = [list(coord) for coord in bicycle_position]
list2 = [list(coord) for coord in person_position]
else:
list2 = [list(coord) for coord in bicycle_position]
list1 = [list(coord) for coord in person_position]
coord_comb = [list1, list2]
person_bike_pairs = itertools.product(*coord_comb)
dists = []
for pair in person_bike_pairs:
for coord1, coord2 in pair:
dists.append(np.linalg.norm(coord1-coord2))
return np.min(dists)
def bike_human_size(object_names, object_area):
names = object_names.split(' , ')[1:]
person_area = np.array([0])
bicycle_area = np.array([0])
for i in range(np.shape(names)[0]):
name = names[i]
if ('person' in name) or ('man' in name) or ('woman' in name) or ('girl' in name) or ('boy' in name) or ('people' in name):
person_area = np.concatenate((person_area, [object_area[i]]))
if ('cycle' in name) or ('bike' in name) or ('bicycle' in name):
bicycle_area = np.concatenate((bicycle_area, [object_area[i]]))
person_area = person_area[1:]
bicycle_area = bicycle_area[1:]
if (np.shape(bicycle_area)[0] == 0) or (np.shape(person_area)[0] == 0):
area_diff = -1
area_diff = -1
for i in range(np.shape(bicycle_area)[0]):
try:
area_diff_temp = np.max((np.abs(bicycle_area[i]-person_area[:])))
area_diff = np.max(area_diff_temp, area_diff)
except:
continue
return area_diff |
983,164 | 70340603d2a466b46266c865f25d504525f25f94 | # =============================================================================
# # 교호작용 기법(interaction)
# =============================================================================
- 변수간의 결합*
- 설명변수끼리의 상호곱 변수 생성, 그 중 의미있는 변수를 추출하기 위함
- 적은 설명변수로 많은 설명변수로의 확장 가능 (설명변수 추가 없이)
- 초기 변수 연구 단계에서 고려되는 기법
=> 수업 목표 : "의미있는 교호작용(변수간 결합)을 찾는 것이 목적"
아파트 가격 <= 지하주차장 면적 * 평균강수량
아파트 가격 <= 지하주차장 면적 + ... +
run profile1
from sklearn.preprocessing import PolynomialFeatures
# interaction - iris data
# 1.데이터 로딩
df_iris = iris()
# 2.데이터 분리
train_x,test_x,train_y,test_y = train_test_split(df_iris['data'],
df_iris['target'],
random_state=0)
# 3. interaction 모델 생성
m_poly = PolynomialFeatures(degree=2) # degree=2, 2차원
# 4. 모델에 데이터 학습 -> 설명변수 변환기준 찾기
#fitting : (확장된 모델알려준다)변수명 추출
m_poly.fit(train_x)
# 5. 실 데이터 변환 (교호작용 기준에 맞게 설명변수 변환)
# transform : 확장된 형식에 맞춰 리폼(실데이터에 대한 변환)
train_x_poly = m_poly.transform(train_x)
# 6. 변환된 변수 형태 확인
m_poly.get_feature_names() # 실제 변수명X
m_poly.get_feature_names(설명변수) # 설명변수명 출력
m_poly.get_feature_names(df_iris['feature_names']) # 실제 변수명으로 추출
['sepal length', 'sepal width', 'peral length', 'petal width']
fitting
y x1 x2 => y x1 x2 x1^2 x2^2 x1x2
1 2 1 2 1 4 2
2 3 2 3 4 9 6
transform : 확장된 형식에 맟줘 리폼(실데이터에 대한 변환)
fitting : (확장된 모델알려준다)변수명 추출
# 7. 확장된 데이터셋으로 knn모델 적용
m_knn=knn(n_neighbors=3)
m_knn.fit(train_x, train_y)
m_knn.score(test_x, test_y) # 4개 설명 변수 예측값 - 97.37%
# 다시보기
m_knn2=knn(n_neighbors=3)
m_knn2.fit(train_x_poly, train_y)
test_x_poly = m_poly.transform(test_x)
m_knn2.score(test_x_poly, test_y) # 확장된 설명변수 예측값 - 97.37%
# 8. 확장된 설명변수 중 의미있는 교호작용 추출
m_rf = rf()
m_rf.fit(train_x_poly, train_y)
df_iris_poly_col = m_poly.get_feature_names(df_iris['feature_names'])
s1 = Series(m_rf.feature_importances_, index = df_iris_poly_col)
s1.sort_values(ascending=False)
|
983,165 | b126f477701cde4d3ea46c2284f4e88dfb3002a4 | version https://git-lfs.github.com/spec/v1
oid sha256:88870e25e36ee073ad0411a803e459e7f51df82517dd5e223467df65b4804c8b
size 1953
|
983,166 | 7e83ef4796ae054e05bc069a933969df1918c440 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 08:18:56 2019
@author: CHARLES
"""
from PyQt4.QtCore import Qt, SIGNAL, QDate
from PyQt4.QtGui import QPlainTextEdit, QTreeWidgetItem, QTreeWidget, QWidget, QTextDocument, QTextCursor, QImage, QFileDialog, QFont, QPixmap, QTabWidget, QComboBox, QRadioButton, QDateEdit, QTextEdit, QCheckBox, QHBoxLayout, QGroupBox, QGridLayout, QDialog, QApplication, QPushButton, QLineEdit, QFormLayout, QLabel, QVBoxLayout, QSizePolicy
from connect import Db
from datetime import datetime
import time
class FormClassSubject(QDialog):
def __init__(self, student, term, parent=None):
super(FormClassSubject, self).__init__(parent)
self.student = student
self.term = term
self.db_class = 'student_class'+str(self.term)
self.db_subject = 'student_subject'+str(self.term)
student = self.pullData('students', 1, {'id':self.student})
term = self.pullData('terms', 1, {'id':self.term})
session = self.pullData('session', 1 , {'id':term['sessionID']})
subjects = self.pullData('datas', '' , {'pubID':3})
student_class = self.pullData(self.db_class, 1 , {'studentID':self.student})
student_subject = self.pullData(self.db_subject, '' , {'studentID':self.student})
subjects_arr = self.convert_arr(student_subject)
self.session = str(str(session['name'])+" "+str(term['name']+" Term")).title()
self.fullname = str(str(student['surname'])+" "+str(student['firstname'])).title()
fullnameLbl = QLabel(self.fullname)
fullnameLbl.setFont(QFont("Candara", 14, QFont.Bold))
termLbl = QLabel(self.session)
termLbl.setFont(QFont("Candara", 12, QFont.Bold))
classLbl = QLabel('Select Class')
self.classCombo = QComboBox()
pullClass = self.pullData('datas', '', {'pubID':1})
self.class_arr = {}
ko = 0
for r in pullClass:
pullClassUnit = self.pullData('datas', '', {'subID':r['id']})
for f in pullClassUnit:
self.classCombo.addItem(str(r['abbrv']).upper()+" "+str(f['abbrv']).upper())
self.class_arr[ko] = f['id']
ko += 1
tree = QTreeWidget()
tree.setHeaderLabel("Select Subjects")
self.sub_arr = {}
parent = QTreeWidgetItem(tree)
parent.setText(0, "Subjects")
parent.setFlags(parent.flags() | Qt.ItemIsTristate | Qt.ItemIsUserCheckable)
if subjects and len(subjects) > 0:
for val in subjects:
child = QTreeWidgetItem(parent)
child.setFlags(child.flags() | Qt.ItemIsUserCheckable)
child.setText(0, str(val['name']).upper())
self.sub_arr[val['id']] = child
if int(val['id']) in subjects_arr:
child.setCheckState(0, Qt.Checked)
else:
child.setCheckState(0, Qt.Unchecked)
ko += 1
tree1 = QTreeWidget()
tree1.setHeaderLabel("Remove Subjects")
self.sub1_arr = {}
parent1 = QTreeWidgetItem(tree1)
parent1.setText(0, "Subjects")
parent1.setFlags(parent1.flags() | Qt.ItemIsTristate | Qt.ItemIsUserCheckable)
if student_subject and len(student_subject) > 0:
for val in student_subject:
st_nam = self.pullData('datas', 1, {'id':val['subjectID']})
child = QTreeWidgetItem(parent1)
child.setFlags(child.flags() | Qt.ItemIsUserCheckable)
child.setText(0, str(st_nam['name']).upper())
self.sub1_arr[st_nam['id']] = child
child.setCheckState(0, Qt.Checked)
ko += 1
h_box = QHBoxLayout()
h_box.addWidget(classLbl)
h_box.addWidget(self.classCombo)
self.pb = QPushButton()
self.pb.setObjectName("close")
self.pb.setText("Close")
self.pb1 = QPushButton()
self.pb1.setObjectName("Submit")
self.pb1.setText("Submit")
self.connect(self.pb1, SIGNAL("clicked()"), lambda: self.button_click())
self.connect(self.pb, SIGNAL("clicked()"), lambda: self.button_close())
h_box1 = QHBoxLayout()
h_box1.addWidget(self.pb)
h_box1.addWidget(self.pb1)
self.v_box = QVBoxLayout()
self.v_box.addWidget(fullnameLbl)
self.v_box.addWidget(termLbl)
self.v_box.addLayout(h_box)
self.v_box.addWidget(tree)
self.v_box.addWidget(tree1)
self.v_box.addLayout(h_box1)
if student_class and student_class['classID'] > 0 :
stID = self.class_arr.keys()[self.class_arr.values().index(student_class['classID'])]
self.classCombo.setCurrentIndex(stID)
self.setLayout(self.v_box)
self.setWindowTitle("Class")
def pullData(self, db, sid, arr):
g = Db()
data = g.selectn(db, '', sid, arr)
return data
def convert_arr(self, arr):
ar = []
for a in arr:
ar.append(a['subjectID'])
return ar
def button_close(self):
self.reject()
def button_click(self):
subject = self.getValue()
#clasz = self.classCombo.itemData(self.classCombo.currentIndex())
clasz = self.class_arr[self.classCombo.currentIndex()]
## set class
g = Db()
sel = g.selectn(self.db_class, '', 1, {'studentID':self.student})
if sel and sel['id'] > 0:
if int(sel['classID']) == clasz:
pass
else:
g.update(self.db_class, {'classID':clasz} , {'id': sel['id']})
else:
g.insert(self.db_class, {'studentID':self.student, 'classID':clasz})
if len(subject[0]) > 0:
for a in subject[0]:
sel = g.selectn(self.db_subject, '', 1, {'studentID':self.student, 'subjectID':a})
if sel and int(sel['id']) > 0:
pass
else:
g.insert(self.db_subject, {'studentID':self.student, 'subjectID':a})
if len(subject[1]) > 0:
for a in subject[1]:
g.delete(self.db_subject, {'studentID':self.student, 'subjectID':a})
## set subject
self.accept()
def getValue(self):
k1 = []
for i in self.sub_arr:
if self.sub_arr[i].checkState(0) == Qt.Checked:
k1.append(i)
k2 = []
for i in self.sub1_arr:
if self.sub1_arr[i].checkState(0) == Qt.Unchecked:
k2.append(i)
return [k1, k2]
class FormStudentMedical(QDialog):
def __init__(self, student, term, edit=None, parent=None):
super(FormStudentMedical, self).__init__(parent)
self.student = student
self.term = term
self.db_class = 'student_class'+str(self.term)
self.db_subject = 'student_subject'+str(self.term)
student = self.pullData('students', 1, {'id':self.student})
term = self.pullData('terms', 1, {'id':self.term})
session = self.pullData('session', 1 , {'id':term['sessionID']})
self.session = str(str(session['name'])+" "+str(term['name']+" Term")).title()
self.fullname = str(str(student['surname'])+" "+str(student['firstname'])).title()
self.sessionID = session['id']
fullnameLbl = QLabel(self.fullname)
fullnameLbl.setFont(QFont("Candara", 14, QFont.Bold))
termLbl = QLabel(self.session)
termLbl.setFont(QFont("Candara", 12, QFont.Bold))
ailmentLbl = QLabel('Ailment/Allergies')
treatmentLbl = QLabel('Treatment/Medication')
self.ailmentData = QPlainTextEdit()
self.treatmentData = QPlainTextEdit()
self.pb = QPushButton()
self.pb.setObjectName("close")
self.pb.setText("Close")
self.pb1 = QPushButton()
self.pb1.setObjectName("Add")
self.pb1.setText("Add")
self.pb2 = QPushButton()
self.pb2.setObjectName("Edit")
self.pb2.setText("Edit")
self.connect(self.pb1, SIGNAL("clicked()"), lambda: self.button_click())
self.connect(self.pb2, SIGNAL("clicked()"), lambda: self.button_edit())
self.connect(self.pb, SIGNAL("clicked()"), lambda: self.button_close())
self.dateLbl = QLabel('Choose Date:')
currentDate = QDate()
self.dateData = QDateEdit()
self.dateData.setDate(currentDate.currentDate())
self.dateData.setCalendarPopup(True)
h_box = QHBoxLayout()
h_box.addWidget(self.dateLbl)
h_box.addWidget(self.dateData)
h_box1 = QHBoxLayout()
h_box1.addWidget(self.pb)
h_box1.addWidget(self.pb1)
h_box1.addWidget(self.pb2)
self.v_box = QVBoxLayout()
self.v_box.addWidget(fullnameLbl)
self.v_box.addWidget(termLbl)
self.v_box.addLayout(h_box)
self.v_box.addWidget(ailmentLbl)
self.v_box.addWidget(self.ailmentData)
self.v_box.addWidget(treatmentLbl)
self.v_box.addWidget(self.treatmentData)
self.v_box.addLayout(h_box1)
if edit and len(edit) > 0:
self.edit = edit
self.editRow(edit)
self.pb1.hide()
self.pb2.show()
else:
self.edit = None
self.pb1.show()
self.pb2.hide()
self.setLayout(self.v_box)
self.setWindowTitle("Medical Report Form")
def pullData(self, db, sid, arr):
g = Db()
data = g.selectn(db, '', sid, arr)
return data
def convert_arr(self, arr):
ar = []
for a in arr:
ar.append(a['subjectID'])
return ar
def editRow(self, a):
e = a.split('_')
g = Db()
self.mainrow = e[1]
self.mainses = e[0]
db = 'school_medicals'+str(e[0])
data = g.selectn(db, '', 1, {'id':e[1]})
if data and len(data) > 0:
self.ailmentData.clear()
self.ailmentData.insertPlainText(str(data['ailment']))
self.treatmentData.clear()
self.treatmentData.insertPlainText(str(data['treatment']))
def button_close(self):
self.reject()
def button_click(self):
ailment = self.ailmentData.toPlainText()
treatment = self.treatmentData.toPlainText()
_date = self.dateData.date().toPyDate()
_date = time.mktime(_date.timetuple())
db = 'school_medicals'+str(self.sessionID)
if len(ailment) > 0 and len(treatment) > 0:
arr ={}
arr['studentID'] = self.student
arr['ailment'] = ailment
arr['treatment'] = treatment
arr['datepaid'] = _date
g = Db()
g.insert(db, arr)
## set subject
self.getValue()
def button_edit(self):
ailment = self.ailmentData.toPlainText()
treatment = self.treatmentData.toPlainText()
_date = self.dateData.date().toPyDate()
_date = time.mktime(_date.timetuple())
db = 'school_medicals'+str(self.mainses)
if len(ailment) > 0 and len(treatment) > 0:
arr ={}
arr['ailment'] = ailment
arr['treatment'] = treatment
arr['datepaid'] = _date
g = Db()
g.update(db, arr, {'id':self.mainrow})
## set subject
self.getValue()
def getValue(self):
self.accept()
class FormStudentConduct(QDialog):
def __init__(self, student, term, edit=None, parent=None):
super(FormStudentConduct, self).__init__(parent)
self.student = student
self.term = term
self.db_class = 'student_class'+str(self.term)
self.db_subject = 'student_subject'+str(self.term)
student = self.pullData('students', 1, {'id':self.student})
term = self.pullData('terms', 1, {'id':self.term})
session = self.pullData('session', 1 , {'id':term['sessionID']})
self.session = str(str(session['name'])+" "+str(term['name']+" Term")).title()
self.fullname = str(str(student['surname'])+" "+str(student['firstname'])).title()
self.sessionID = session['id']
fullnameLbl = QLabel(self.fullname)
fullnameLbl.setFont(QFont("Candara", 14, QFont.Bold))
termLbl = QLabel(self.session)
termLbl.setFont(QFont("Candara", 12, QFont.Bold))
actionLbl = QLabel('Action')
reactionLbl = QLabel('Award/Prize etc.')
issuerLbl = QLabel('Issuer')
self.actionData = QPlainTextEdit()
self.reactionData = QPlainTextEdit()
self.staffData = QLineEdit()
self.staffData.setPlaceholderText('Staff Name or Deparment or Organisation')
self.pb = QPushButton()
self.pb.setObjectName("close")
self.pb.setText("Close")
self.pb1 = QPushButton()
self.pb1.setObjectName("Add")
self.pb1.setText("Add")
self.pb2 = QPushButton()
self.pb2.setObjectName("Edit")
self.pb2.setText("Edit")
self.connect(self.pb1, SIGNAL("clicked()"), lambda: self.button_click())
self.connect(self.pb2, SIGNAL("clicked()"), lambda: self.button_edit())
self.connect(self.pb, SIGNAL("clicked()"), lambda: self.button_close())
self.dateLbl = QLabel('Choose Date:')
currentDate = QDate()
self.dateData = QDateEdit()
self.dateData.setDate(currentDate.currentDate())
self.dateData.setCalendarPopup(True)
h_box = QHBoxLayout()
h_box.addWidget(self.dateLbl)
h_box.addWidget(self.dateData)
h_box2 = QHBoxLayout()
h_box2.addWidget(issuerLbl)
h_box2.addWidget(self.staffData)
h_box1 = QHBoxLayout()
h_box1.addWidget(self.pb)
h_box1.addWidget(self.pb1)
h_box1.addWidget(self.pb2)
self.v_box = QVBoxLayout()
self.v_box.addWidget(fullnameLbl)
self.v_box.addWidget(termLbl)
self.v_box.addLayout(h_box)
self.v_box.addWidget(actionLbl)
self.v_box.addWidget(self.actionData)
self.v_box.addWidget(reactionLbl)
self.v_box.addWidget(self.reactionData)
self.v_box.addLayout(h_box2)
self.v_box.addLayout(h_box1)
if edit and len(edit) > 0:
self.edit = edit
self.editRow(edit)
self.pb1.hide()
self.pb2.show()
else:
self.edit = None
self.pb1.show()
self.pb2.hide()
self.setLayout(self.v_box)
self.setWindowTitle("Conduct Report Form")
def pullData(self, db, sid, arr):
g = Db()
data = g.selectn(db, '', sid, arr)
return data
def convert_arr(self, arr):
ar = []
for a in arr:
ar.append(a['subjectID'])
return ar
def editrow(self, a):
e = a.split('_')
self.mainrow = e[1]
self.mainses = e[0]
g = Db()
db = 'school_conducts'+str(self.mainses)
data = g.selectn(db, '', 1, {'id':self.mainrow})
if data and len(data) > 0:
self.ailmentData.clear()
self.ailmentData.insertPlainText(str(data['ailment']))
self.staffData.setText(str(data['staffname']))
self.treatmentData.clear()
self.treatmentData.insertPlainText(str(data['treatment']))
def button_close(self):
self.reject()
def button_click(self):
action = self.actionData.toPlainText()
reaction = self.reactionData.toPlainText()
staff = self.staffData.text()
_date = self.dateData.date().toPyDate()
_date = time.mktime(_date.timetuple())
db = 'school_conducts'+str(self.sessionID)
if len(action) > 0 and len(reaction) > 0:
arr ={}
arr['studentID'] = self.student
arr['action'] = action
arr['reaction'] = reaction
arr['datepaid'] = _date
arr['staffname'] = staff
arr['state'] = 0
g = Db()
g.insert(db, arr)
## set subject
self.getValue()
def button_edit(self):
action = self.actionData.toPlainText()
reaction = self.reactionData.toPlainText()
staff = self.staffData.text()
_date = self.dateData.date().toPyDate()
_date = time.mktime(_date.timetuple())
db = 'school_conducts'+str(self.sessionID)
if len(action) > 0 and len(reaction) > 0:
arr ={}
arr['action'] = action
arr['reaction'] = reaction
arr['datepaid'] = _date
arr['staffname'] = staff
g = Db()
g.update(db, arr, {'id':self.edit})
## set subject
self.getValue()
def getValue(self):
self.accept()
class FormStudentMisconduct(QDialog):
def __init__(self, student, term, edit=None, parent=None):
super(FormStudentMisconduct, self).__init__(parent)
self.student = student
self.term = term
self.db_class = 'student_class'+str(self.term)
self.db_subject = 'student_subject'+str(self.term)
student = self.pullData('students', 1, {'id':self.student})
term = self.pullData('terms', 1, {'id':self.term})
session = self.pullData('session', 1 , {'id':term['sessionID']})
self.session = str(str(session['name'])+" "+str(term['name']+" Term")).title()
self.fullname = str(str(student['surname'])+" "+str(student['firstname'])).title()
self.sessionID = session['id']
fullnameLbl = QLabel(self.fullname)
fullnameLbl.setFont(QFont("Candara", 14, QFont.Bold))
termLbl = QLabel(self.session)
termLbl.setFont(QFont("Candara", 12, QFont.Bold))
actionLbl = QLabel('Action')
reactionLbl = QLabel('Corrective/Punitive Measure')
issuerLbl = QLabel('Issuer')
self.actionData = QPlainTextEdit()
self.reactionData = QPlainTextEdit()
self.staffData = QLineEdit()
self.staffData.setPlaceholderText('Staff Name or Deparment or Organisation')
self.pb = QPushButton()
self.pb.setObjectName("close")
self.pb.setText("Close")
self.pb1 = QPushButton()
self.pb1.setObjectName("Add")
self.pb1.setText("Add")
self.pb2 = QPushButton()
self.pb2.setObjectName("Edit")
self.pb2.setText("Edit")
self.connect(self.pb1, SIGNAL("clicked()"), lambda: self.button_click())
self.connect(self.pb2, SIGNAL("clicked()"), lambda: self.button_edit())
self.connect(self.pb, SIGNAL("clicked()"), lambda: self.button_close())
self.dateLbl = QLabel('Choose Date:')
currentDate = QDate()
self.dateData = QDateEdit()
self.dateData.setDate(currentDate.currentDate())
self.dateData.setCalendarPopup(True)
h_box = QHBoxLayout()
h_box.addWidget(self.dateLbl)
h_box.addWidget(self.dateData)
h_box2 = QHBoxLayout()
h_box2.addWidget(issuerLbl)
h_box2.addWidget(self.staffData)
h_box1 = QHBoxLayout()
h_box1.addWidget(self.pb)
h_box1.addWidget(self.pb1)
h_box1.addWidget(self.pb2)
self.v_box = QVBoxLayout()
self.v_box.addWidget(fullnameLbl)
self.v_box.addWidget(termLbl)
self.v_box.addLayout(h_box)
self.v_box.addWidget(actionLbl)
self.v_box.addWidget(self.actionData)
self.v_box.addWidget(reactionLbl)
self.v_box.addWidget(self.reactionData)
self.v_box.addLayout(h_box2)
self.v_box.addLayout(h_box1)
if edit and len(edit) > 0:
self.edit = edit
self.editRow(edit)
self.pb1.hide()
self.pb2.show()
else:
self.edit = None
self.pb1.show()
self.pb2.hide()
self.setLayout(self.v_box)
self.setWindowTitle("Misconduct Report Form")
def pullData(self, db, sid, arr):
g = Db()
data = g.selectn(db, '', sid, arr)
return data
def convert_arr(self, arr):
ar = []
for a in arr:
ar.append(a['subjectID'])
return ar
def editrow(self, a):
e = a.split('_')
g = Db()
self.mainrow = e[1]
self.mainses = e[0]
db = 'school_conducts'+str(self.mainses)
data = g.selectn(db, '', 1, {'id':self.mainrow})
if data and len(data) > 0:
self.ailmentData.clear()
self.ailmentData.insertPlainText(str(data['action']))
self.staffData.setText(str(data['staffname']))
self.treatmentData.clear()
self.treatmentData.insertPlainText(str(data['reaction']))
def button_close(self):
self.reject()
def button_click(self):
action = self.actionData.toPlainText()
reaction = self.reactionData.toPlainText()
staff = self.staffData.text()
_date = self.dateData.date().toPyDate()
_date = time.mktime(_date.timetuple())
db = 'school_conducts'+str(self.sessionID)
if len(action) > 0 and len(reaction) > 0:
arr ={}
arr['studentID'] = self.student
arr['action'] = action
arr['reaction'] = reaction
arr['datepaid'] = _date
arr['staffname'] = staff
arr['state'] = 1
g = Db()
g.insert(db, arr)
## set subject
self.getValue()
def button_edit(self):
action = self.actionData.toPlainText()
reaction = self.reactionData.toPlainText()
staff = self.staffData.text()
_date = self.dateData.date().toPyDate()
_date = time.mktime(_date.timetuple())
db = 'school_conducts'+str(self.sessionID)
if len(action) > 0 and len(reaction) > 0:
arr ={}
arr['action'] = action
arr['reaction'] = reaction
arr['datepaid'] = _date
arr['staffname'] = staff
g = Db()
g.update(db, arr, {'id':self.edit})
## set subject
self.getValue()
def getValue(self):
self.accept() |
983,167 | cc7025fb03138cba668972177affeb6396bbf29a | IDs = ['rmyxgdlihczskunpfwbgqoeybv', 'rmyxgdlksczskunpfwbjqkeatv', 'rmybgdxibczskunpfwbjqoeatv', 'rmyxgdlirczskuopfwbjqzeatv', 'rmyxedlrhczskunpfwbyqoeatv', 'rmyxfdlicczskunpfwbxqoeatv', 'rmyxgvlihkzskunpfwbsqoeatv', 'rmyxgdaihczvkunpfwblqoeatv', 'nmyxgolihczskunpfwbjqieatv', 'rhyxgdcihczskunifwbjqoeatv', 'rmfxgdlihczskunpfwbvqgeatv', 'smyxgdlihczskunsiwbjqoeatv', 'rmyxgdcihcxskunpfwbrqoeatv', 'rmyxgdlihczckuiqfwbjqoeatv', 'rmyxxdwihczskunifwbjqoeatv', 'rkzxgdlihczskunpfwhjqoeatv', 'rmypgdlihczskunpfwbrqoeafv', 'rmyxgplihczvkunpkwbjqoeatv', 'rqyxgdlihdzskjnpfwbjqoeatv', 'rmyxgdlihczskqnpswbjqoeaov', 'mcyxgdlihczmkunpfwbjqoeatv', 'rmyxgdlohczspunpowbjqoeatv', 'tmyxgdlihczskunpfwbeqoeltv', 'rmyxgdlibccskunpfwbjqoegtv', 'rmyxgdlehczsaunpfwboqoeatv', 'rmaxgdlihczseunpfwbjqojatv', 'rmyxgdlijczskynpfwbjboeatv', 'kmlxgdlilczskunpfwbjqoeatv', 'rmsxgdlshczskenpfwbjqoeatv', 'rmbxgdlihcmskgnpfwbjqoeatv', 'rayxgdlihczskunpfwbjqoeaef', 'umyxgdlisczskunpfdbjqoeatv', 'rmyxgdlihczskunsfwbjqieatg', 'rmbxgdlihczhkunpfwbjqoeamv', 'rmyxgdlihczskeypfwbjqxeatv', 'rmyxgkrihczskunptwbjqoeatv', 'rmyxgdlihczskunpawbjqoexiv', 'rmyxgdlihcrskqnpfwbjqceatv', 'rmyxgblihczskjnpfwbjqieatv', 'rmyggdlidczskunofwbjqoeatv', 'rmyxgdlghczskunphwbjqomatv', 'rmqxgdbihczskunpfnbjqoeatv', 'rvyxgdlihczsgunpfwbjqoeanv', 'royxgdlnhczskqnpfwbjqoeatv', 'rmyxgdlihczskugpfwbkqreatv', 'rmyxfdlihczskunppwejqoeatv', 'rqyxgdlipczskunpfwbjqoeqtv', 'rmyxgdlicczskunpnwbjqotatv', 'rmyxodlihczskxnpfwijqoeatv', 'rmyxrdyihczskunpftbjqoeatv', 'rmtxgdyihwzskunpfwbjqoeatv', 'tmyxcdliiczskunpfwbjqoeatv', 'rmyxgdlihczskmnpfwbjjoeadv', 'rmyxgdnihczskunpqwbjqojatv', 'bmyxgdlihczskcnpfwboqoeatv', 'rmysgdlihcyskudpfwbjqoeatv', 'rmyxgdtihczsmuupfwbjqoeatv', 'rmyxgdlihczssunpffbjqolatv', 'rmyogdlihczsklnpfwbjqoxatv', 'rmyxgjlihczskunpfwsjqoyatv', 'rmyxgalshczskunpfwbuqoeatv', 'rmyfgdlihczskunqfwbiqoeatv', 'tmyxgdlihczskunotwbjqoeatv', 'rmyxpdzihczskuopfwbjqoeatv', 'rmyfgdlihczskunpfrbgqoeatv', 'rmyxgdlwhczskhnofwbjqoeatv', 'rmyxgdlihczsmudpfrbjqoeatv', 'rmyxgdlihczokanpfwbjqooatv', 'rmyxrdlihczskunppwjjqoeatv', 'rmyxgdjihczskwnpowbjqoeatv', 'mmyxgdlihczikunpfwbjqoeamv', 'rmyxgflihczshunpwwbjqoeatv', 'rmytghlihczskunpfwbjqoeatk', 'rmyxgdlipczmbunpfwbjqoeatv', 'rmyxgdlihczkkonpfwbjqomatv', 'rmfxgslihczskunpfwujqoeatv', 'dmyxgdlihczykunqfwbjqoeatv', 'rmyxgalihcbskunpgwbjqoeatv', 'rmyxgdlinczqkunpfwbjqopatv', 'rmyxgdlihwzslunplwbjqoeatv', 'rmypgdlihczskdtpfwbjqoeatv', 'rmsxgdxieczskunpfwbjqoeatv', 'rmyxgdlihczskwnpfxrjqoeatv', 'rmyxgdlihzzskunpflbjpoeatv', 'rslxgdlihczsnunpfwbjqoeatv', 'rmyxgdlmcczskunpfwbjqoealv', 'fmkxgdbihczskunpfwbjqoeatv', 'rmyxgdiigczxkunpfwbjqoeatv', 'rjyxgnlqhczskunpfwbjqoeatv', 'ymyxgolihczskunpfmbjqoeatv', 'hmyxgdlihczskuncfwbjqoejtv', 'rmyxgqlihczzkunpfwbjqojatv', 'rmgfgdlihczskunpfwbjgoeatv', 'rmyxgdlfhczskunpfwbjqweaxv', 'rmoxtdlihczskunpfwdjqoeatv', 'ruyxgdlihczskunpfmbjnoeatv', 'rmnxgflehczskunpfwbjqoeatv', 'rmyugdlihczskunpfwfjroeatv', 'rmyxddbihczskunpfwbjqoeutv', 'rmyxgdlipczskunofbbjqoeatv', 'gmyxgdlihczskunpfkbjroeatv', 'rmyxgdllhcpskunpfwbjqqeatv', 'rmyxgdlihchskunpfwbjqoelcv', 'mmyxldlihczskuncfwbjqoeatv', 'ryyxgdlxhczskcnpfwbjqoeatv', 'rmyxpdlihczskyntfwbjqoeatv', 'rmhxgdlibczskwnpfwbjqoeatv', 'rmyxgdlihczskunpfwojbkeatv', 'qmyxgdlihczskunpfwbjqoyatm', 'rmyxgdlzhczskunpfwbjqoealr', 'rmyegdliqczskunpfgbjqoeatv', 'umyxgdlihczsvunpfwbfqoeatv', 'rmyxgdoihfzskunpfmbjqoeatv', 'rmyxgdlihcdskanpmwbjqoeatv', 'rmyxgdyihczskunpfrbjqoeaov', 'rcyxgdlihczskuegfwbjqoeatv', 'rmyxgdlihgwskunpfwbjkoeatv', 'rpyxgdlihmzskunpfwbjqoeatp', 'rmyxgdlihhzskunpfwbjaoeapv', 'rmyxgdsrhczskunpflbjqoeatv', 'rmrxgdlihczskunpvwbjqoeabv', 'rmcxgylihczskunpfwbjyoeatv', 'rmkxgdlyhczsounpfwbjqoeatv', 'rmyxgdqihczskunmfwbjqoratv', 'rmyxgdlihczskunpfibjqofath', 'rmyxgdliqczskunpqwbjqoeaev', 'rmhxgdlizcjskunpfwbjqoeatv', 'rmyxgdlfhcwskunpfwbjqoeaqv', 'rmyxgdlchclskunpfwbdqoeatv', 'rmyxgdluhczswunpfwbjqoeatt', 'rmyxgdlzqczskunpfwbjqoeatq', 'rmdxgdlihszskunpfwbwqoeatv', 'rmyxgdlihszsvunpfwbjqueatv', 'rmyxgdlhhczskunpffbjaoeatv', 'rmrxgdlphczskunpfwbjqreatv', 'hmyngdxihczskunpfwbjqoeatv', 'rmyxgdlizczpkunpfwbyqoeatv', 'rmyxbdlihyzskunlfwbjqoeatv', 'rmyxgdlipczsqunnfwbjqoeatv', 'rmyxgdlihcsskunpfxbjqoaatv', 'rmyxgdljhcznkunpfwbjqfeatv', 'rmaxgdlihczspunpfwbjqoqatv', 'rsyxgdlihczskunpfwbjqoehcv', 'rmyxgjlicczskunpfwbjqoeitv', 'rwymgvlihczskunpfwbjqoeatv', 'rmyxgdlipfzskunpfwbjqweatv', 'rmyxgglihczskunpgwbjqoealv', 'royxgdlihczskhnpfwbyqoeatv', 'rmyxgdlihczskvnpfabkqoeatv', 'rmyxgdlihczskunpfwhjwzeatv', 'jlyxgdlihczskunpfwbjqzeatv', 'rmyxgdlihccskunpfwwjqopatv', 'rmyxgxlihczskuupfwbjqoeahv', 'rmyxgdcihcbskungfwbjqoeatv', 'tmyxgdlihczskunpfwbjmoeftv', 'rkyxgdlioczskmnpfwbjqoeatv', 'rmyxgdlrhczskulpfwbjaoeatv', 'rmysgdlihczikunphwbjqoeatv', 'rmyxgdlihczskuvpfwbjqoeyty', 'fmyxgdlihczscunpfqbjqoeatv', 'rfyxgdlihzzrkunpfwbjqoeatv', 'rmyxgdlikczskunpfwbjqolath', 'rmyxqdlihjzskunpfwbjqoeamv', 'rmuxodiihczskunpfwbjqoeatv', 'rmyygdliucuskunpfwbjqoeatv', 'rmyxgdliwczskuppawbjqoeatv', 'rmyxgdlihczskunprwbjqgehtv', 'imyvgdlihczskunpfwbjqouatv', 'rgyxgdluhczskunpflbjqoeatv', 'rmgxgdlihczsdunpfwwjqoeatv', 'gdyxgdlihczskunpfwbjqoeavv', 'rmyxgdlihczskunpfwljjoektv', 'rmexgdlihczskunpfwxjqoeytv', 'rmyxqdlihcyskuwpfwbjqoeatv', 'rmyxgdlihczskunpfiyjqcebtv', 'amyngdlihczskunpfwbjqseatv', 'rmzxgdlihczykubpfwbjqoeatv', 'rmyxgdlihczhkuopfwbjsoeatv', 'rmyxgdlihczskunpfwbaqowztv', 'rmgxgdlihczslunpfwbjeoeatv', 'rmytgdlzhczskunrfwbjqoeatv', 'rmyxgdtihczskunafobjqoeatv', 'rmyxgdlihczskuflfbbjqoeatv', 'rmdxgdlihczskunpfwbjqoealj', 'rbyxgdlihczskuppdwbjqoeatv', 'rmyxhdiihcwskunpfwbjqoeatv', 'rmmggdlfhczskunpfwbjqoeatv', 'rmbxgblihczskuypfwbjqoeatv', 'rmyxgslihczsjunpjwbjqoeatv', 'rmyxgdlohczsaunpfwbjboeatv', 'rmaxgdhihczskunpfwbjooeatv', 'rmyxidlihczskunpfgbuqoeatv', 'rmyxgdlihfzckznpfwbjqoeatv', 'rmaqgdpihczskunpfwbjqoeatv', 'rmyvgdlirczskunpfobjqoeatv', 'rmdxgdlihczlkunpxwbjqoeatv', 'rmyxgdlihczseunpfwbjvdeatv', 'rmyxgdlihczskuhpfwbjqneath', 'rmyxrdlihciskunpfwbjqoratv', 'rmyxgdmihczsqunpftbjqoeatv', 'rmyxgdlbhczskulpfbbjqoeatv', 'rmoxgdlihczskunpfwbjqoeesv', 'rmyxgdlihczskuijfwejqoeatv', 'rmyxgdlihczskunpfwnkqoxatv', 'rmyxgdvihmzskuupfwbjqoeatv', 'rkyxedlihczskunpfcbjqoeatv', 'rmyxgdjihczskunprwbjqieatv', 'omyxgqgihczskunpfwbjqoeatv', 'rmyxydlihczskunpfwkjqoentv', 'rmbxgdlicczskunpfwbjqteatv', 'emyxgdlihczskugpfwbjqneatv', 'dmyxgflihczskunpfwbjqjeatv', 'umyxgdlihczskunpfwbjloextv', 'rmyxgdlihczsbunpfwbyqpeatv', 'rmyxgdrihczsvunpcwbjqoeatv', 'qmyxgdlihcwsknnpfwbjqoeatv', 'ymyxgdlihczskunpfsbjqowatv', 'rmyxgdlbhczskunpnvbjqoeatv', 'rmyxfdlixczskunpfwbjqoertv', 'rmyygdlihszrkunpfwbjqoeatv', 'rmyxgxlihcpskunpfwbjqoeanv', 'rmyxgdlihczskjnpfwbjqoprtv', 'rmyxgdlisczfkunpfwbjqoeath', 'rmyxgdlihczskunpfkbjqoeaji', 'rmyxgylihczskunpfwbfqoeatl', 'rmsxgdbihczskunpfwtjqoeatv', 'smyxgdlihczskunpfwbjqcwatv', 'rmyxgdlihczskunppjljqoeatv', 'rmyxgdlihczskulpfdbjooeatv', 'rmyxgdlihczskunpfibjqcebtv', 'rmyxadlihczskunpgwbjyoeatv', 'rmyxgdlihczdkunpvwbjqoeytv', 'rmyxgdlihcvskunpfwbjxohatv', 'rmyxgplihczskunpfgbjqoeauv', 'rmyxgdlihcysrunmfwbjqoeatv', 'rmyygdlihczskunpfwbjqvewtv', 'rmyxgdlihczsmunpfwdjnoeatv', 'rmyxgdbibczskunpfwbjuoeatv', 'rmyfgdlihczskubpfwbjqoeatp', 'rmyxgdlihczskuopfzijqoeatv', 'rmyqgdlihczskunpwwbjqoeanv', 'imyxgdlihczskunpfwbjqoqytv', 'rmyxgdlixcoskbnpfwbjqoeatv', 'rmyxgrlihccskunpfwbjqteatv', 'rdyxgdlihcpskunpfwbjqoratv', 'rmyxgdlihkzskunpfwbjmoeatj', 'rmyxgslihczskcnpfjbjqoeatv', 'rmyxgdlihczsqunqfwdjqoeatv', 'rjyxgdlyhczbkunpfwbjqoeatv', 'rmyxudlihczjkunpfwbjqzeatv']
num_of_two = 0
num_of_three = 0
x = 0
for word in IDs:
x = x + 1
two = False
three = False
for letter in word:
if letter in word:
if word.count(letter) == 2 and not two:
print(str(x) + ' num word. ' + letter + ' occurs 2 times in ' + word)
num_of_two = num_of_two + 1
two = True
if word.count(letter) == 3 and not three:
print(str(x) + ' num word. ' + letter + ' occurs 3 times in ' + word)
num_of_three = num_of_three +1
three = True
if two and three:
break
print('The number 2 occured: ' + str(num_of_two) + ' times')
print('The number 3 occured: ' + str(num_of_three) + ' times')
total = num_of_three * num_of_three
print(total)
|
983,168 | b373b0eb9bcc1233cf3b57bc2cf064442ffa4440 | '''
Created on Sep 22, 2019
@author: anwarul azim
'''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome()
driver.get('http://automationpractice.com/')
driver.maximize_window()
driver.find_element_by_link_text('Women').click()
cat1 = driver.find_element_by_xpath(".//*[@id='layered_category_8']")
cat1.click()
|
983,169 | fc3a1cba773c82ed73ebae9a0d3c73fc9692c484 | """
URL <-> resource conversion.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jun 28, 2011.
"""
from pyparsing import ParseException
from pyramid.compat import url_unquote
from pyramid.compat import urlparse
from pyramid.threadlocal import get_current_registry
from pyramid.traversal import find_resource
from pyramid.traversal import traversal_path
from everest.compat import parse_qsl
from everest.interfaces import IResourceUrlConverter
from everest.querying.base import EXPRESSION_KINDS
from everest.querying.filterparser import parse_filter
from everest.querying.interfaces import IFilterSpecificationVisitor
from everest.querying.interfaces import IOrderSpecificationVisitor
from everest.querying.orderparser import parse_order
from everest.querying.refsparser import parse_refs
from everest.resources.interfaces import ICollectionResource
from everest.resources.interfaces import IMemberResource
from everest.resources.interfaces import IResource
from everest.resources.utils import get_root_collection
from zope.interface import implementer # pylint: disable=E0611,F0401
from zope.interface import providedBy as provided_by # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['ResourceUrlConverter',
'UrlPartsConverter',
]
@implementer(IResourceUrlConverter)
class ResourceUrlConverter(object):
"""
Performs URL <-> resource instance conversion.
See http://en.wikipedia.org/wiki/Query_string for information on characters
supported in query strings.
"""
def __init__(self, request):
# The request is needed for access to app URL, registry, traversal.
self.__request = request
def url_to_resource(self, url):
"""
Returns the resource that is addressed by the given URL.
:param str url: URL to convert
:return: member or collection resource
:note: If the query string in the URL has multiple values for a
query parameter, the last definition in the query string wins.
"""
parsed = urlparse.urlparse(url)
parsed_path = parsed.path # namedtupble problem pylint: disable=E1101
rc = find_resource(self.__request.root, traversal_path(parsed_path))
if ICollectionResource in provided_by(rc):
# In case we found a collection, we have to filter, order, slice.
parsed_query = parsed.query # namedtuple problem pylint: disable=E1101
params = dict(parse_qsl(parsed_query))
filter_string = params.get('q')
if not filter_string is None:
rc.filter = \
UrlPartsConverter.make_filter_specification(filter_string)
order_string = params.get('sort')
if not order_string is None:
rc.order = \
UrlPartsConverter.make_order_specification(order_string)
start_string = params.get('start')
size_string = params.get('size')
if not (start_string is None or size_string is None):
rc.slice = \
UrlPartsConverter.make_slice_key(start_string, size_string)
elif not IMemberResource in provided_by(rc):
raise ValueError('Traversal found non-resource object "%s".' % rc)
return rc
def resource_to_url(self, resource, quote=False):
"""
Returns the URL for the given resource.
:param resource: Resource to create a URL for.
:param bool quote: If set, the URL returned will be quoted.
:raises ValueError: If the given resource is floating (i.e., has
the parent attribute set to `None`)
"""
ifc = provided_by(resource)
if not IResource in ifc:
raise TypeError('Can not generate URL for non-resource "%s".'
% resource)
elif resource.__parent__ is None:
raise ValueError('Can not generate URL for floating resource '
'"%s".' % resource)
if ICollectionResource in ifc:
query = {}
if not resource.filter is None:
query['q'] = \
UrlPartsConverter.make_filter_string(resource.filter)
if not resource.order is None:
query['sort'] = \
UrlPartsConverter.make_order_string(resource.order)
if not resource.slice is None:
query['start'], query['size'] = \
UrlPartsConverter.make_slice_strings(resource.slice)
if query != {}:
options = dict(query=query)
else:
options = dict()
if not resource.is_root_collection:
# For nested collections, we check if the referenced root
# collection is exposed (i.e., has the service as parent).
# If yes, we return an absolute URL, else a nested URL.
root_coll = get_root_collection(resource)
if not root_coll.has_parent:
url = self.__request.resource_url(resource)
else:
url = self.__request.resource_url(root_coll, **options)
else:
url = self.__request.resource_url(resource, **options)
else:
if not resource.is_root_member:
# For nested members, we check if the referenced root
# collection is exposed (i.e., has the service as parent).
# If yes, we return an absolute URL, else a nested URL.
root_coll = get_root_collection(resource)
if not root_coll.has_parent:
url = self.__request.resource_url(resource)
else:
par_url = self.__request.resource_url(root_coll)
url = "%s%s/" % (par_url, resource.__name__)
else:
url = self.__request.resource_url(resource)
if not quote:
url = url_unquote(url)
return url
class UrlPartsConverter(object):
"""
Helper class providing functionality to convert parts of a URL to
specifications and vice versa.
"""
@classmethod
def make_filter_specification(cls, filter_string):
"""
Converts the given CQL filter expression into a filter specification.
"""
try:
return parse_filter(filter_string)
except ParseException as err:
raise ValueError('Expression parameters have errors. %s' % err)
@classmethod
def make_filter_string(cls, filter_specification):
"""
Converts the given filter specification to a CQL filter expression.
"""
registry = get_current_registry()
visitor_cls = registry.getUtility(IFilterSpecificationVisitor,
name=EXPRESSION_KINDS.CQL)
visitor = visitor_cls()
filter_specification.accept(visitor)
return str(visitor.expression)
@classmethod
def make_order_specification(cls, order_string):
"""
Converts the given CQL sort expression to a order specification.
"""
try:
return parse_order(order_string)
except ParseException as err:
raise ValueError('Expression parameters have errors. %s' % err)
@classmethod
def make_order_string(cls, order_specification):
"""
Converts the given order specification to a CQL order expression.
"""
registry = get_current_registry()
visitor_cls = registry.getUtility(IOrderSpecificationVisitor,
name=EXPRESSION_KINDS.CQL)
visitor = visitor_cls()
order_specification.accept(visitor)
return str(visitor.expression)
@classmethod
def make_slice_key(cls, start_string, size_string):
"""
Converts the given start and size query parts to a slice key.
:return: slice key
:rtype: slice
"""
try:
start = int(start_string)
except ValueError:
raise ValueError('Query parameter "start" must be a number.')
if start < 0:
raise ValueError('Query parameter "start" must be zero or '
'a positive number.')
try:
size = int(size_string)
except ValueError:
raise ValueError('Query parameter "size" must be a number.')
if size < 1:
raise ValueError('Query parameter "size" must be a positive '
'number.')
return slice(start, start + size)
@classmethod
def make_slice_strings(cls, slice_key):
"""
Converts the given slice key to start and size query parts.
"""
start = slice_key.start
size = slice_key.stop - start
return (str(start), str(size))
@classmethod
def make_refs_options(cls, refs_string):
"""
Converts the given CQL resource references string to a dictionary of
attribute representer options.
"""
try:
return parse_refs(refs_string)
except ParseException as err:
raise ValueError('Refs string has errors. %s' % err)
|
983,170 | 4312d200e0540128cca265e40414e2cbb9d55a23 | """
1、 The histogram of OVI column density logN for HVCs.
2、最终得到的图片为:snapshot_155.hdf5,多谱线拟合, VLSR>100km s,b包含仪器, N vs Doppler b,Sembach OVI HVCs Column density histgram only fitting value.pdf
"""
import h5py
import numpy as np
import math
import scipy.interpolate
#import select_snapshot_number
from decimal import *
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from astropy.modeling import models, fitting
PROTONMASS = 1.67262178e-24
MSUN = 1.989e33
MPC = 3.085678e24
KPC = 3.085678e21
ECHARGE = 4.80320425e-10 # 3.0e9 ?
EMASS = 9.10938215e-28
CLIGHT = 2.99792458e10
kB=1.3806505e-16 # 用的单位制是厘米克秒制 k=(1.38e-23)*(e3)*(e4). is the Boltzmann constant in CGS units
h=4.1356676969e-15 #单位为ev·s
OVI_N_paper=np.array([13.80, 13.57, 14.24, 13.76, 13.55, 13.91, 14.06, 13.85, 14.38, 14.05, 13.83, 14.14, 13.81, 14.29, 14.41, 14.44, 14.47, 14.18, 14.22, 14.22, 14.13, 14.20, 14.00, 14.19, 14.16, 13.24, 13.72, 13.87, 14.05, 13.23, 13.88, 13.30, 13.87, 14.14, 13.67, 13.44, 13.67, 13.72, 13.06, 14.28, 14.17, 13.92, 13.96, 14.14, 14.12, 13.88, 14.08, 14.59, 14.28, 14.35, 14.18, 13.64, 13.58, 13.81, 13.83, 13.51, 14.19, 14.28, 14.30, 13.75, 13.97, 13.75, 13.96, 14.12, 13.28, 13.97, 14.10, 13.98, 13.68, 13.78, 14.31, 14.13, 14.25, 13.85, 14.44, 14.47, 13.17, 13.52, 14.33, 14.18, 13.95, 13.45, 13.42, 13.92])
plt.style.use('classic')
plt.ylim(0,0.2)
plt.xlabel("HVCs log$N$(cm$^{-2}$)", fontsize = 20)
plt.ylabel("Frequency", fontsize = 20)
n, bins, patche = plt.hist(OVI_N_paper, 15, facecolor='blue', alpha=0.6, density=True,label = 'Sembach Samples') #n为纵坐标值,bins为横坐标的左 边值,且比n多一个位数
print(np.array(OVI_N_paper).min())# 13.06
print(np.array(OVI_N_paper).max())# 14.59
for item in patche:
item.set_height(item.get_height()/sum(n))
#hl=plt.legend(loc='upper right',frameon=False,fontsize='xx-large')
#画垂直线详见网址:https://www.cnblogs.com/onemorepoint/p/7484210.html
median_Sembach=np.median(OVI_N_paper)
print(median_Sembach) #13.97
plt.vlines(median_Sembach, 0, 0.7, colors = "blue", linestyles = "dotted",label='Sembach Median')
#hl=plt.legend(loc='upper right',frameon=False,fontsize='xx-large')
data_HVCs=np.loadtxt("snapshot_155.hdf5,多谱线拟合, VLSR>100km s,b包含仪器, N vs Doppler b.txt")
log_OVI_N_HVCs=data_HVCs[:,1]
n, bins, patche = plt.hist(log_OVI_N_HVCs, 15, facecolor='green', alpha=0.8, density=True, label = 'Our Simulation') #n为纵坐标值,bins为横坐标的左 边值,且比n多一个位数
print(np.array(log_OVI_N_HVCs).min())# 13.267167470758285
print(np.array(log_OVI_N_HVCs).max())# 14.750482028822288
for item in patche:
item.set_height(item.get_height()/sum(n))
#hl=plt.legend(loc='upper right',frameon=False,fontsize='xx-large')
#画垂直线详见网址:https://www.cnblogs.com/onemorepoint/p/7484210.html
median_ours=np.median(log_OVI_N_HVCs)
print(median_ours) #13.728546572134842
plt.vlines(median_ours, 0, 0.7, colors = "green", linestyles = "dashed",label='Our Simulation Median',alpha=1)
hl=plt.legend(loc='upper left',frameon=False,fontsize='large')
plt.title("multiple $N$, Our median logN=%.2f cm$^{-2}$, Sembach median logN=%.2f cm$^{-2}$"%(median_ours, median_Sembach), fontsize='medium')
plt.savefig("snapshot_155.hdf5,多谱线拟合, VLSR>100km s,b包含仪器, N vs Doppler b,Sembach OVI HVCs Column density histgram only fitting value.pdf",format='pdf', dpi=1000)
|
983,171 | 264a255696acdb5b70a343ddca20bfda80b65fde | import sys
import numpy as np
from matplotlib import pyplot
from takemusic.common import fileio
samplerate, sound = fileio.read_wave(sys.argv[1], nchannels=1)
nsamples_per_sec = 100
slidewidth = int(samplerate/nsamples_per_sec)
windowsize = 2048
cutfreq = 120 #in hz
cutpoint = int(cutfreq*windowsize/samplerate)
threshold = 0.1
count_min = 15
point = 0
bass_count = [0] * nsamples_per_sec
count = 0
bass_list = []
energy_ratio_max = 0.1
energy_ratio_min = 0.001
energy_cofficient = 1.0/energy_ratio_min
r = int(energy_ratio_max/energy_ratio_min)
energy_ratio_list = [0] * r
while(point < sound.shape[0]-windowsize):
ftdata = np.abs(np.fft.fft(sound[point:point+windowsize]))
all_energy = np.sum(ftdata)
if(all_energy == 0.0):
point += slidewidth
continue
bass = np.sum(ftdata[:cutpoint])
sys.stdout.write("{:<20} ".format(bass))
energy_ratio = float(bass)/all_energy
if(energy_ratio > threshold and count > count_min):
sys.stdout.write(" * {}".format(count))
if(count < nsamples_per_sec):
bass_count[count] += 1
count = 0
if(energy_ratio < energy_ratio_max):
energy_ratio_list[int(energy_ratio*energy_cofficient)] += 1
sys.stdout.write('\n')
count += 1
point += slidewidth
#print("on beats:{}".format(float(energy_ratio_sum_on_beats)/on_beats_count))
print("max:{}".format(np.argmax(bass_count)))
pyplot.subplot(211)
pyplot.plot(np.arange(len(energy_ratio_list))/energy_cofficient,
energy_ratio_list)
pyplot.subplot(212)
pyplot.plot(np.arange(len(bass_count)), bass_count)
#b = bass_list[int(sys.argv[2]):int(sys.argv[3])]
#pyplot.plot(range(len(b)), b)
pyplot.show()
|
983,172 | 0f5ece86b15f28b9ff1a95f285feb8fe6bff052f | from app import db
from app.tujuanex.models import User
from flask import Blueprint, jsonify, redirect, request, url_for
from flask_jwt_extended import (create_access_token, get_jwt_identity,
jwt_required)
auth = Blueprint('auth',__name__)
@auth.route("/login",methods=['GET','POST'])
def login():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
username = request.json.get("username",None)
password = request.json.get("password",None)
user = User.query.filter_by(username=username).first()
if user is None:
return jsonify({"msg":"Bad username or password"}),401
if not user.verify_password(password):
return jsonify({"msg":"Bad username or password"}),401
access_token = create_access_token(identity=username)
return jsonify(access_token=access_token),200
@auth.route("/register",methods=['GET','POST'])
def register():
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
#get the username and password from the json body
username = request.json.get("username",None)
password = request.json.get("password",None)
email = request.json.get("email",None)
phone_number = request.json.get("phone",None)
if not username:
return jsonify({"msg":"Missing username parameter"}),400
if not password:
return jsonify({"msg":"Missing password parameter"}),400
if not email:
return jsonify({"msg":"Missing email parameter"}),400
user = User.query.filter_by(username=username).first()
if user is not None:
return jsonify({"msg":"username already exists"})
user = User.query.filter_by(email=email).first()
if user is not None:
return jsonify({"msg":"email already registered"})
#register the user
user = User(username=username,email=email)
user.password = password
try:
db.session.add(user)
db.session.commit()
except Exception as e:
return jsonify({"msg":str(e)}),500
return jsonify({"msg":"user created successfully"}),201
@auth.route("/forgot-password",methods=['GET','POST'])
def forgot_password():
if request.method == 'POST':
email = request.json.get("email",None)
if not email:
return jsonify({"msg":"email is required"}),500
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({"msg":"user does not exists"}),404
reset_token = ""
return jsonify({"msg":"an email with password reset link has been sent to you"})
return jsonify({"msg":"invalid request method"}),401
@auth.route("/reset-password/<token>",methods=['GET','POST'])
def reset_password(token):
#TODO Check the token if its valid
user_id = token #get user id based on token
return jsonify({"msg":"password reset successfully"})
|
983,173 | ccfdc3c6da4a1beb3b5fb8f8a584b362daea5860 | #!/usr/bin/python
'''
Implement USB HIDAPI interface to Dream Cheeky line of USB controlled
missile launchers.
10/29/2020
'''
import hid
import os
import time
# Dream Cheeky USB Missile Launcher vendor and product ID
USB_VID = 0x0A81
USB_PID = 0x0701
# constants
WRITE_VALUE = 0x01
# command to set mode
ROTATE_CW_CMD = 0x08
ROTATE_CCW_CMD = 0x04
ELEV_UP_CMD = 0x02
ELEV_DOWN_CMD = 0x01
FIRE_CMD = 0x10
# received status from device
STATUS_OK = 0x00
STATUS_LIMIT_ELEV_DOWN = 0x01
STATUS_LIMIT_ELEV_UP = 0x02
STATUS_LIMIT_ELEV = (STATUS_LIMIT_ELEV_UP | STATUS_LIMIT_ELEV_DOWN)
STATUS_LIMIT_ROTATE_CCW = 0x04
STATUS_LIMIT_ROTATE_CW = 0x08
STATUS_LIMIT_ROTATE = (STATUS_LIMIT_ROTATE_CCW | STATUS_LIMIT_ROTATE_CW)
STATUS_FIRE_DONE = 0x10
# continue command
CONTINUE_CMD = 0x40
# stop command
STOP_CMD = 0x20
def printStatus(status):
if (status & STATUS_LIMIT_ELEV_DOWN):
print('STATUS_LIMIT_ELEV_DOWN')
elif (status & STATUS_LIMIT_ELEV_UP):
print('STATUS_LIMIT_ELEV_UP')
elif (status & STATUS_LIMIT_ROTATE_CCW):
print('STATUS_LIMIT_ROTATE_CCW')
elif (status & STATUS_LIMIT_ROTATE_CW):
print('STATUS_LIMIT_ROTATE_CW')
def checkLimit(device, checkStatus):
"""
Check if motor limit has been reached. If the limit is found then
a boolean False is returned.
Args:
device ([type]): object to USB hidapi
checkStatus ([type]): device single byte return code bitmask value specifying
if limit has been reached for motor.
"""
d = device.read(1)
if d:
print(d)
status = d[0]
printStatus(status)
if (checkStatus & status):
return False
return True
def execute(device, cmd, status, runTime):
"""
Execute the command. User specifies CW or CCW and time to run rotation.
Args:
device ([type]): [description]
cmd ([type]): [description]
status ([type]): status code to check against. The device will issue
these status if motor limit is encountered
runTime ([type]): [description] motor time, if not supplied then
optional 100 msec used.
"""
device.write([WRITE_VALUE, cmd])
startTime = time.time()
while (time.time() - startTime) < runTime:
device.write([WRITE_VALUE, CONTINUE_CMD])
if checkLimit(device, status) == False:
print('WARN execute check limit reached')
break
device.write([WRITE_VALUE, STOP_CMD])
def fireMissile(device, runTime = 0.1):
"""
Fire ze missile.
"""
device.write([WRITE_VALUE, FIRE_CMD])
device.write([WRITE_VALUE, CONTINUE_CMD])
time.sleep(0.01)
# 7 runs, is this to pump?
for _ in range(0, 7):
device.write([WRITE_VALUE, FIRE_CMD])
device.write([WRITE_VALUE, CONTINUE_CMD])
device.write([WRITE_VALUE, CONTINUE_CMD])
time.sleep(0.01)
startTime = time.time()
while (time.time() - startTime) < runTime:
device.write([WRITE_VALUE, CONTINUE_CMD])
d = device.read(1)
if d:
print(d)
status = d[0]
if status & STATUS_FIRE_DONE:
print('fire status exit found')
device.write([WRITE_VALUE, 0x20])
break
time.sleep(0.01)
for _ in range(0, 10):
device.write([WRITE_VALUE, CONTINUE_CMD])
def rotateCW(device, runTime = 0.1):
"""
Rotate the launcher clockwise direction.
"""
return execute(device, ROTATE_CCW_CMD, STATUS_LIMIT_ROTATE, runTime)
def rotateCCW(device, runTime = 0.1):
"""
Rotate the launcher counter clockwise direction.
"""
return execute(device, ROTATE_CW_CMD, STATUS_LIMIT_ROTATE, runTime)
def elevateUp(device, runTime = 0.1):
"""
Elevate the launcher by moving UP direction.
Args:
device ([type]): [description]
runTime ([type]): [description]
"""
return execute(device, ELEV_UP_CMD, STATUS_LIMIT_ELEV_UP, runTime)
def elevateDown(device, runTime = 0.1):
"""
Elevate the launcher by moving UP direction.
Args:
device ([type]): [description]
runTime ([type]): [description]
"""
return execute(device, ELEV_DOWN_CMD, STATUS_LIMIT_ELEV_DOWN, runTime)
def init():
"""
Initialize USB connection to the device
Returns:
[type]: [description]
"""
try:
h = hid.device()
h.open(USB_VID, USB_PID)
h.set_nonblocking(1)
except IOError as ex:
print('ERROR: could not establish connection to device')
print(ex)
return None
return h
def exit(h):
"""
Close USB connection to device
Args:
h ([type]): [description]
Returns:
[type]: [description]
"""
try:
h.close()
except IOError as ex:
print('ERROR: could not close hidapi device connection')
print(ex)
return False
return True
def testMove():
try:
print("Opening the device")
h = hid.device()
h.open(USB_VID, USB_PID)
print("Manufacturer: %s" % h.get_manufacturer_string())
print("Product: %s" % h.get_product_string())
print("Serial No: %s" % h.get_serial_number_string())
# enable non-blocking mode
h.set_nonblocking(1)
# write some data to the device
print("Rotate CW")
rotateCW(h, 1.0)
print("Rotate CCW")
rotateCCW(h, 1.0)
print("Elevate Up")
elevateUp(h, 1.0)
print("Elevate Down")
elevateDown(h, 1.0)
time.sleep(2)
print("Closing the device")
h.close()
except IOError as ex:
print(ex)
print("You probably don't have the hard coded device. Update the hid.device line")
print("in this script with one from the enumeration list output above and try again.")
print("Done")
def testFire():
try:
print("Opening the device")
h = hid.device()
h.open(USB_VID, USB_PID)
print("Manufacturer: %s" % h.get_manufacturer_string())
print("Product: %s" % h.get_product_string())
print("Serial No: %s" % h.get_serial_number_string())
# enable non-blocking mode
h.set_nonblocking(1)
# here
fireMissile(h, 10.0)
print("Closing the device")
h.close()
except IOError as ex:
print(ex)
print("You probably don't have the hard coded device. Update the hid.device line")
print("in this script with one from the enumeration list output above and try again.")
print("Done")
def listUsbHidDevices():
"""
List attached USB HID devices
"""
for d in hid.enumerate():
keys = list(d.keys())
keys.sort()
for key in keys:
print("%s : %s" % (key, d[key]))
print()
if __name__ == "__main__":
listUsbHidDevices()
testMove()
# testFire() |
983,174 | 4e674ca95ef50ac924ef673de3ecf33b7554910b | #!/usr/bin/env python
import time
import roslib; roslib.load_manifest('ur_modern_driver')
import rospy
import actionlib
import sys
import subprocess
import os
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from math import pi
JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
Q1 = [1.2563,-1.3635,2.2785,-2.4885,-1.5416,2.8602]
Q2 = [1.2563,-1.3629,2.2787,-2.4892,-1.5416,2.8602]
Q3 = [1.2563,-1.3623,2.2789,-2.49,-1.5416,2.8602]
Q4 = [1.2563,-1.3618,2.2791,-2.4907,-1.5416,2.8602]
Q5 = [1.2563,-1.3612,2.2793,-2.4915,-1.5416,2.8602]
Q6 = [1.2563,-1.3606,2.2795,-2.4922,-1.5416,2.8603]
Q7 = [1.2563,-1.3601,2.2797,-2.493,-1.5416,2.8603]
Q8 = [1.2563,-1.3595,2.2798,-2.4937,-1.5416,2.8603]
Q9 = [1.2563,-1.3589,2.28,-2.4945,-1.5416,2.8603]
Q10 = [1.2563,-1.3584,2.2802,-2.4952,-1.5416,2.8603]
Q11 = [1.2563,-1.3578,2.2804,-2.496,-1.5416,2.8603]
Q12 = [1.2563,-1.3573,2.2806,-2.4967,-1.5416,2.8603]
Q13 = [1.2563,-1.3567,2.2808,-2.4975,-1.5416,2.8603]
Q14 = [1.2563,-1.3561,2.281,-2.4983,-1.5416,2.8603]
Q15 = [1.2563,-1.3556,2.2811,-2.499,-1.5416,2.8603]
Q16 = [1.2563,-1.355,2.2813,-2.4998,-1.5416,2.8603]
Q17 = [1.2563,-1.3544,2.2815,-2.5005,-1.5416,2.8603]
Q18 = [1.2563,-1.3539,2.2817,-2.5013,-1.5416,2.8603]
Q19 = [1.2563,-1.3533,2.2819,-2.502,-1.5416,2.8603]
Q20 = [1.2563,-1.3527,2.2821,-2.5028,-1.5416,2.8603]
Q21 = [1.2563,-1.3521,2.2822,-2.5035,-1.5416,2.8603]
Q22 = [1.2563,-1.3516,2.2824,-2.5043,-1.5416,2.8603]
Q23 = [1.2563,-1.351,2.2826,-2.505,-1.5416,2.8603]
Q24 = [1.2563,-1.3504,2.2828,-2.5058,-1.5416,2.8603]
Q25 = [1.2563,-1.3499,2.283,-2.5065,-1.5416,2.8603]
Q26 = [1.2563,-1.3493,2.2831,-2.5073,-1.5416,2.8603]
Q27 = [1.2563,-1.3487,2.2833,-2.508,-1.5416,2.8603]
Q28 = [1.2563,-1.3482,2.2835,-2.5088,-1.5416,2.8603]
Q29 = [1.2563,-1.3476,2.2837,-2.5095,-1.5416,2.8603]
Q30 = [1.2563,-1.347,2.2839,-2.5103,-1.5416,2.8603]
Q31 = [1.2563,-1.3464,2.284,-2.511,-1.5416,2.8603]
Q32 = [1.2563,-1.3459,2.2842,-2.5118,-1.5416,2.8603]
Q33 = [1.2563,-1.3453,2.2844,-2.5125,-1.5416,2.8603]
Q34 = [1.2563,-1.3447,2.2846,-2.5133,-1.5416,2.8603]
Q35 = [1.2563,-1.3441,2.2848,-2.514,-1.5416,2.8603]
Q36 = [1.2563,-1.3436,2.2849,-2.5148,-1.5416,2.8603]
Q37 = [1.2563,-1.343,2.2851,-2.5155,-1.5416,2.8603]
Q38 = [1.2563,-1.3424,2.2853,-2.5163,-1.5416,2.8603]
Q39 = [1.2563,-1.3418,2.2855,-2.517,-1.5416,2.8603]
Q40 = [1.2563,-1.3413,2.2856,-2.5178,-1.5416,2.8603]
Q41 = [1.2563,-1.3407,2.2858,-2.5185,-1.5416,2.8603]
Q42 = [1.2563,-1.3401,2.286,-2.5193,-1.5416,2.8603]
Q43 = [1.2563,-1.3395,2.2862,-2.52,-1.5416,2.8603]
Q44 = [1.2563,-1.3389,2.2863,-2.5208,-1.5416,2.8603]
Q45 = [1.2563,-1.3384,2.2865,-2.5215,-1.5416,2.8603]
Q46 = [1.2563,-1.3378,2.2867,-2.5223,-1.5416,2.8603]
Q47 = [1.2563,-1.3372,2.2868,-2.523,-1.5416,2.8603]
Q48 = [1.2563,-1.3366,2.287,-2.5238,-1.5416,2.8603]
Q49 = [1.2563,-1.3361,2.2872,-2.5246,-1.5416,2.8603]
Q50 = [1.2563,-1.3355,2.2874,-2.5253,-1.5416,2.8603]
Q51 = [1.2563,-1.3349,2.2875,-2.5261,-1.5416,2.8603]
Q52 = [1.2563,-1.3343,2.2877,-2.5268,-1.5416,2.8603]
Q53 = [1.2563,-1.3337,2.2879,-2.5276,-1.5416,2.8603]
Q54 = [1.2563,-1.3331,2.288,-2.5283,-1.5416,2.8603]
Q55 = [1.2563,-1.3326,2.2882,-2.5291,-1.5416,2.8603]
Q56 = [1.2563,-1.332,2.2884,-2.5298,-1.5416,2.8603]
Q57 = [1.2563,-1.3314,2.2885,-2.5306,-1.5416,2.8603]
Q58 = [1.2563,-1.3308,2.2887,-2.5313,-1.5416,2.8603]
Q59 = [1.2563,-1.3302,2.2889,-2.5321,-1.5416,2.8603]
Q60 = [1.2563,-1.3296,2.289,-2.5328,-1.5416,2.8603]
Q61 = [1.2563,-1.3291,2.2892,-2.5336,-1.5416,2.8603]
Q62 = [1.2564,-1.3285,2.2894,-2.5343,-1.5416,2.8603]
Q63 = [1.2564,-1.3279,2.2895,-2.5351,-1.5416,2.8603]
Q64 = [1.2564,-1.3273,2.2897,-2.5358,-1.5416,2.8603]
Q65 = [1.2564,-1.3267,2.2899,-2.5366,-1.5416,2.8603]
Q66 = [1.2564,-1.3261,2.29,-2.5373,-1.5416,2.8603]
Q67 = [1.2564,-1.3255,2.2902,-2.5381,-1.5416,2.8603]
Q68 = [1.2564,-1.3249,2.2904,-2.5388,-1.5416,2.8603]
Q69 = [1.2564,-1.3244,2.2905,-2.5396,-1.5416,2.8603]
Q70 = [1.2564,-1.3238,2.2907,-2.5403,-1.5416,2.8603]
Q71 = [1.2564,-1.3232,2.2909,-2.5411,-1.5416,2.8603]
Q72 = [1.2564,-1.3226,2.291,-2.5418,-1.5416,2.8603]
Q73 = [1.2564,-1.322,2.2912,-2.5426,-1.5416,2.8603]
Q74 = [1.2564,-1.3214,2.2913,-2.5433,-1.5416,2.8603]
Q75 = [1.2564,-1.3208,2.2915,-2.5441,-1.5416,2.8603]
Q76 = [1.2564,-1.3202,2.2917,-2.5448,-1.5416,2.8603]
Q77 = [1.2564,-1.3196,2.2918,-2.5456,-1.5416,2.8603]
Q78 = [1.2564,-1.319,2.292,-2.5464,-1.5416,2.8603]
Q79 = [1.2564,-1.3185,2.2921,-2.5471,-1.5416,2.8603]
Q80 = [1.2564,-1.3179,2.2923,-2.5479,-1.5416,2.8603]
Q81 = [1.2564,-1.3173,2.2925,-2.5486,-1.5416,2.8603]
Q82 = [1.2564,-1.3167,2.2926,-2.5494,-1.5416,2.8603]
Q83 = [1.2564,-1.3161,2.2928,-2.5501,-1.5416,2.8603]
Q84 = [1.2564,-1.3155,2.2929,-2.5509,-1.5416,2.8603]
Q85 = [1.2564,-1.3149,2.2931,-2.5516,-1.5416,2.8603]
Q86 = [1.2564,-1.3143,2.2933,-2.5524,-1.5416,2.8603]
Q87 = [1.2564,-1.3137,2.2934,-2.5531,-1.5416,2.8603]
Q88 = [1.2564,-1.3131,2.2936,-2.5539,-1.5416,2.8603]
Q89 = [1.2564,-1.3125,2.2937,-2.5546,-1.5416,2.8603]
Q90 = [1.2564,-1.3119,2.2939,-2.5554,-1.5416,2.8603]
Q91 = [1.2564,-1.3113,2.294,-2.5561,-1.5416,2.8603]
Q92 = [1.2564,-1.3107,2.2942,-2.5569,-1.5416,2.8603]
Q93 = [1.2564,-1.3101,2.2943,-2.5576,-1.5416,2.8603]
Q94 = [1.2564,-1.3095,2.2945,-2.5584,-1.5416,2.8603]
Q95 = [1.2564,-1.3089,2.2946,-2.5591,-1.5416,2.8603]
Q96 = [1.2564,-1.3083,2.2948,-2.5599,-1.5416,2.8603]
Q97 = [1.2564,-1.3077,2.2949,-2.5606,-1.5416,2.8603]
Q98 = [1.2564,-1.3071,2.2951,-2.5614,-1.5416,2.8603]
Q99 = [1.2564,-1.3065,2.2953,-2.5621,-1.5416,2.8603]
Q100 = [1.2564,-1.3059,2.2954,-2.5629,-1.5416,2.8603]
Q101 = [1.2564,-1.3053,2.2956,-2.5636,-1.5416,2.8603]
client = None
def move():
global joints_pos
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
joint_states = rospy.wait_for_message("joint_states", JointState)
joints_pos = joint_states.position
g.trajectory.points = [
JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0.0)),
JointTrajectoryPoint(positions=Q1, velocities=[0]*6, time_from_start=rospy.Duration(5)),
JointTrajectoryPoint(positions=Q2, velocities=[0]*6, time_from_start=rospy.Duration(5.1)),
JointTrajectoryPoint(positions=Q3, velocities=[0]*6, time_from_start=rospy.Duration(5.2)),
JointTrajectoryPoint(positions=Q4, velocities=[0]*6, time_from_start=rospy.Duration(5.3)),
JointTrajectoryPoint(positions=Q5, velocities=[0]*6, time_from_start=rospy.Duration(5.4)),
JointTrajectoryPoint(positions=Q6, velocities=[0]*6, time_from_start=rospy.Duration(5.5)),
JointTrajectoryPoint(positions=Q7, velocities=[0]*6, time_from_start=rospy.Duration(5.6)),
JointTrajectoryPoint(positions=Q8, velocities=[0]*6, time_from_start=rospy.Duration(5.7)),
JointTrajectoryPoint(positions=Q9, velocities=[0]*6, time_from_start=rospy.Duration(5.8)),
JointTrajectoryPoint(positions=Q10, velocities=[0]*6, time_from_start=rospy.Duration(5.9)),
JointTrajectoryPoint(positions=Q11, velocities=[0]*6, time_from_start=rospy.Duration(6)),
JointTrajectoryPoint(positions=Q12, velocities=[0]*6, time_from_start=rospy.Duration(6.1)),
JointTrajectoryPoint(positions=Q13, velocities=[0]*6, time_from_start=rospy.Duration(6.2)),
JointTrajectoryPoint(positions=Q14, velocities=[0]*6, time_from_start=rospy.Duration(6.3)),
JointTrajectoryPoint(positions=Q15, velocities=[0]*6, time_from_start=rospy.Duration(6.4)),
JointTrajectoryPoint(positions=Q16, velocities=[0]*6, time_from_start=rospy.Duration(6.5)),
JointTrajectoryPoint(positions=Q17, velocities=[0]*6, time_from_start=rospy.Duration(6.6)),
JointTrajectoryPoint(positions=Q18, velocities=[0]*6, time_from_start=rospy.Duration(6.7)),
JointTrajectoryPoint(positions=Q19, velocities=[0]*6, time_from_start=rospy.Duration(6.8)),
JointTrajectoryPoint(positions=Q20, velocities=[0]*6, time_from_start=rospy.Duration(6.9)),
JointTrajectoryPoint(positions=Q21, velocities=[0]*6, time_from_start=rospy.Duration(7)),
JointTrajectoryPoint(positions=Q22, velocities=[0]*6, time_from_start=rospy.Duration(7.1)),
JointTrajectoryPoint(positions=Q23, velocities=[0]*6, time_from_start=rospy.Duration(7.2)),
JointTrajectoryPoint(positions=Q24, velocities=[0]*6, time_from_start=rospy.Duration(7.3)),
JointTrajectoryPoint(positions=Q25, velocities=[0]*6, time_from_start=rospy.Duration(7.4)),
JointTrajectoryPoint(positions=Q26, velocities=[0]*6, time_from_start=rospy.Duration(7.5)),
JointTrajectoryPoint(positions=Q27, velocities=[0]*6, time_from_start=rospy.Duration(7.6)),
JointTrajectoryPoint(positions=Q28, velocities=[0]*6, time_from_start=rospy.Duration(7.7)),
JointTrajectoryPoint(positions=Q29, velocities=[0]*6, time_from_start=rospy.Duration(7.8)),
JointTrajectoryPoint(positions=Q30, velocities=[0]*6, time_from_start=rospy.Duration(7.9)),
JointTrajectoryPoint(positions=Q31, velocities=[0]*6, time_from_start=rospy.Duration(8)),
JointTrajectoryPoint(positions=Q32, velocities=[0]*6, time_from_start=rospy.Duration(8.1)),
JointTrajectoryPoint(positions=Q33, velocities=[0]*6, time_from_start=rospy.Duration(8.2)),
JointTrajectoryPoint(positions=Q34, velocities=[0]*6, time_from_start=rospy.Duration(8.3)),
JointTrajectoryPoint(positions=Q35, velocities=[0]*6, time_from_start=rospy.Duration(8.4)),
JointTrajectoryPoint(positions=Q36, velocities=[0]*6, time_from_start=rospy.Duration(8.5)),
JointTrajectoryPoint(positions=Q37, velocities=[0]*6, time_from_start=rospy.Duration(8.6)),
JointTrajectoryPoint(positions=Q38, velocities=[0]*6, time_from_start=rospy.Duration(8.7)),
JointTrajectoryPoint(positions=Q39, velocities=[0]*6, time_from_start=rospy.Duration(8.8)),
JointTrajectoryPoint(positions=Q40, velocities=[0]*6, time_from_start=rospy.Duration(8.9)),
JointTrajectoryPoint(positions=Q41, velocities=[0]*6, time_from_start=rospy.Duration(9)),
JointTrajectoryPoint(positions=Q42, velocities=[0]*6, time_from_start=rospy.Duration(9.1)),
JointTrajectoryPoint(positions=Q43, velocities=[0]*6, time_from_start=rospy.Duration(9.2)),
JointTrajectoryPoint(positions=Q44, velocities=[0]*6, time_from_start=rospy.Duration(9.3)),
JointTrajectoryPoint(positions=Q45, velocities=[0]*6, time_from_start=rospy.Duration(9.4)),
JointTrajectoryPoint(positions=Q46, velocities=[0]*6, time_from_start=rospy.Duration(9.5)),
JointTrajectoryPoint(positions=Q47, velocities=[0]*6, time_from_start=rospy.Duration(9.6)),
JointTrajectoryPoint(positions=Q48, velocities=[0]*6, time_from_start=rospy.Duration(9.7)),
JointTrajectoryPoint(positions=Q49, velocities=[0]*6, time_from_start=rospy.Duration(9.8)),
JointTrajectoryPoint(positions=Q50, velocities=[0]*6, time_from_start=rospy.Duration(9.9)),
JointTrajectoryPoint(positions=Q51, velocities=[0]*6, time_from_start=rospy.Duration(10)),
JointTrajectoryPoint(positions=Q52, velocities=[0]*6, time_from_start=rospy.Duration(10.1)),
JointTrajectoryPoint(positions=Q53, velocities=[0]*6, time_from_start=rospy.Duration(10.2)),
JointTrajectoryPoint(positions=Q54, velocities=[0]*6, time_from_start=rospy.Duration(10.3)),
JointTrajectoryPoint(positions=Q55, velocities=[0]*6, time_from_start=rospy.Duration(10.4)),
JointTrajectoryPoint(positions=Q56, velocities=[0]*6, time_from_start=rospy.Duration(10.5)),
JointTrajectoryPoint(positions=Q57, velocities=[0]*6, time_from_start=rospy.Duration(10.6)),
JointTrajectoryPoint(positions=Q58, velocities=[0]*6, time_from_start=rospy.Duration(10.7)),
JointTrajectoryPoint(positions=Q59, velocities=[0]*6, time_from_start=rospy.Duration(10.8)),
JointTrajectoryPoint(positions=Q60, velocities=[0]*6, time_from_start=rospy.Duration(10.9)),
JointTrajectoryPoint(positions=Q61, velocities=[0]*6, time_from_start=rospy.Duration(11)),
JointTrajectoryPoint(positions=Q62, velocities=[0]*6, time_from_start=rospy.Duration(11.1)),
JointTrajectoryPoint(positions=Q63, velocities=[0]*6, time_from_start=rospy.Duration(11.2)),
JointTrajectoryPoint(positions=Q64, velocities=[0]*6, time_from_start=rospy.Duration(11.3)),
JointTrajectoryPoint(positions=Q65, velocities=[0]*6, time_from_start=rospy.Duration(11.4)),
JointTrajectoryPoint(positions=Q66, velocities=[0]*6, time_from_start=rospy.Duration(11.5)),
JointTrajectoryPoint(positions=Q67, velocities=[0]*6, time_from_start=rospy.Duration(11.6)),
JointTrajectoryPoint(positions=Q68, velocities=[0]*6, time_from_start=rospy.Duration(11.7)),
JointTrajectoryPoint(positions=Q69, velocities=[0]*6, time_from_start=rospy.Duration(11.8)),
JointTrajectoryPoint(positions=Q70, velocities=[0]*6, time_from_start=rospy.Duration(11.9)),
JointTrajectoryPoint(positions=Q71, velocities=[0]*6, time_from_start=rospy.Duration(12)),
JointTrajectoryPoint(positions=Q72, velocities=[0]*6, time_from_start=rospy.Duration(12.1)),
JointTrajectoryPoint(positions=Q73, velocities=[0]*6, time_from_start=rospy.Duration(12.2)),
JointTrajectoryPoint(positions=Q74, velocities=[0]*6, time_from_start=rospy.Duration(12.3)),
JointTrajectoryPoint(positions=Q75, velocities=[0]*6, time_from_start=rospy.Duration(12.4)),
JointTrajectoryPoint(positions=Q76, velocities=[0]*6, time_from_start=rospy.Duration(12.5)),
JointTrajectoryPoint(positions=Q77, velocities=[0]*6, time_from_start=rospy.Duration(12.6)),
JointTrajectoryPoint(positions=Q78, velocities=[0]*6, time_from_start=rospy.Duration(12.7)),
JointTrajectoryPoint(positions=Q79, velocities=[0]*6, time_from_start=rospy.Duration(12.8)),
JointTrajectoryPoint(positions=Q80, velocities=[0]*6, time_from_start=rospy.Duration(12.9)),
JointTrajectoryPoint(positions=Q81, velocities=[0]*6, time_from_start=rospy.Duration(13)),
JointTrajectoryPoint(positions=Q82, velocities=[0]*6, time_from_start=rospy.Duration(13.1)),
JointTrajectoryPoint(positions=Q83, velocities=[0]*6, time_from_start=rospy.Duration(13.2)),
JointTrajectoryPoint(positions=Q84, velocities=[0]*6, time_from_start=rospy.Duration(13.3)),
JointTrajectoryPoint(positions=Q85, velocities=[0]*6, time_from_start=rospy.Duration(13.4)),
JointTrajectoryPoint(positions=Q86, velocities=[0]*6, time_from_start=rospy.Duration(13.5)),
JointTrajectoryPoint(positions=Q87, velocities=[0]*6, time_from_start=rospy.Duration(13.6)),
JointTrajectoryPoint(positions=Q88, velocities=[0]*6, time_from_start=rospy.Duration(13.7)),
JointTrajectoryPoint(positions=Q89, velocities=[0]*6, time_from_start=rospy.Duration(13.8)),
JointTrajectoryPoint(positions=Q90, velocities=[0]*6, time_from_start=rospy.Duration(13.9)),
JointTrajectoryPoint(positions=Q91, velocities=[0]*6, time_from_start=rospy.Duration(14)),
JointTrajectoryPoint(positions=Q92, velocities=[0]*6, time_from_start=rospy.Duration(14.1)),
JointTrajectoryPoint(positions=Q93, velocities=[0]*6, time_from_start=rospy.Duration(14.2)),
JointTrajectoryPoint(positions=Q94, velocities=[0]*6, time_from_start=rospy.Duration(14.3)),
JointTrajectoryPoint(positions=Q95, velocities=[0]*6, time_from_start=rospy.Duration(14.4)),
JointTrajectoryPoint(positions=Q96, velocities=[0]*6, time_from_start=rospy.Duration(14.5)),
JointTrajectoryPoint(positions=Q97, velocities=[0]*6, time_from_start=rospy.Duration(14.6)),
JointTrajectoryPoint(positions=Q98, velocities=[0]*6, time_from_start=rospy.Duration(14.7)),
JointTrajectoryPoint(positions=Q99, velocities=[0]*6, time_from_start=rospy.Duration(14.8)),
JointTrajectoryPoint(positions=Q100, velocities=[0]*6, time_from_start=rospy.Duration(14.9)),
JointTrajectoryPoint(positions=Q101, velocities=[0]*6, time_from_start=rospy.Duration(15))]
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def main():
global client
try:
rospy.init_node("simple_move", anonymous=True, disable_signals=True)
client = actionlib.SimpleActionClient('arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
print "Waiting for server..."
client.wait_for_server()
print "Connected to server"
move()
print "Trajectory finished"
except KeyboardInterrupt:
rospy.signal_shutdown("KeyboardInterrupt")
raise
if __name__ == '__main__': main()
|
983,175 | bab5a83a3c9bbbdeed7f9ae6f855232cca02ccba | #!/usr/bin/python
import Image, ImageDraw, tempfile, random
def _show(im):
f = tempfile.mkstemp('.png')[1]
im.save(file(f, 'w'))
x = Image.open(f)
x.show()
if __name__=="__main__":
w = 800
h = 600
i = 1000
mux = w/2
muy = h/2
sigma = 60
rand = random.Random()
im = Image.new('1', (w,h), 1)
d = ImageDraw.Draw(im)
lines = []
for i in range(i):
tooclose = 0
#x = round(rand.gauss(mux, sigma))
#y = round(rand.gauss(muy, sigma))
x = rand.randint(0,w-1)
y = rand.randint(0,h-1)
dir = rand.randint(0,1)
len = int(rand.gauss(100, 20))
if dir:
for j in range((x-3)%w, (x+3)%w):
if im.getpixel((j, y)) == 0: tooclose = 1
for j in range((x+len-3)%w, (x+len+3)%w):
if im.getpixel((j, y)) == 0: tooclose = 1
else:
for j in range((y-3)%h, (y+3)%h):
if im.getpixel((x, j)) == 0: tooclose = 1
for j in range((y+len-3)%h, (y+len-3)%h):
if im.getpixel((x, j)) == 0: tooclose = 1
if not tooclose:
if dir:
d.line([(x,y), (x+len, y)])
else:
d.line([(x,y), (x, y+len)])
_show(im)
im.save(file('g.png','w'))
|
983,176 | 347936ad1a4d1cdc043c51d02cf3b47e6f429496 | # -*- coding: utf-8 -*-
# Erstellt von Roy Ledermüller mit PyCharm Community Edition
# Erstelldatum: 05.05.2017
# Projektname: CAE-PA
# Getestet mit Python 3.5
from enum import Enum
from opcua import Client, ua, Node
import os, sys, time, json
import main.settings
import xml.etree.ElementTree as et
from main.xmlmodels import *
import threading
class OpcService:
def __init__(self, node, client):
self.node = node
self.stateNode = self.node.get_child(["1:CurrentState"])
self.client = client
self.commands = node.get_child(["1:Commands"])
self.name = str(node.get_display_name().Text.decode("utf-8", "ignore"))
self.stateMap = {}
for state in node.get_child(["1:States"]).get_children():
self.stateMap[state.nodeid] = STATE_MAP[state.get_display_name().Text]
self.StateHandler = StateChangeHandler(self)
self.sub = client.create_subscription(200, self.StateHandler)
self.handle = self.sub.subscribe_data_change(self.stateNode)
self.parameterNode = self.node.get_child(["1:ParameterList"])
self.parameters = {}
for p in self.parameterNode.get_children():
self.parameters[p.get_display_name()] = p
def setParam(self, name, value):
self.parameters[name].set_value(value)
@property
def State(self):
return self.__state
def setState(self, nodeval):
self.__state = self.stateMap[nodeval]
self.Methods = []
if self.__state in RUN_STATES:
self.Methods.append(OpcMethod.HOLD)
if self.__state in NORMAL_STATES:
self.Methods.append(OpcMethod.ABORT)
if self.__state in ACTIVE_STATES:
self.Methods.append(OpcMethod.STOP)
if self.__state == OpcState.IDLE:
self.Methods.append(OpcMethod.START)
elif self.__state == OpcState.RUNNING:
self.Methods.append(OpcMethod.PAUSE)
elif self.__state == OpcState.PAUSED:
self.Methods.append(OpcMethod.RESUME)
elif self.__state == OpcState.HELD:
self.Methods.append(OpcMethod.UNHOLD)
elif self.__state == OpcState.ABORTED:
self.Methods.append(OpcMethod.CLEAR)
elif self.__state == OpcState.ABORTED or self.__state == OpcState.STOPPED or self.__state == OpcState.COMPLETE:
self.Methods.append(OpcMethod.RESET)
def StateChange(self):
nodeval = self.stateNode.get_value()
self.setState(nodeval)
def callMethod(self, method):
self.commands.call_method("1:"+method)
def getMethods(self):
return self.Methods
class OpcClient(Client):
"""
Default Client-> connects to a Server-Module with Services
"""
def __init__(self, adress, type):
'''
:param adress: Server Adress
'''
super(OpcClient, self).__init__(adress)
self.connect()
self.root = self.get_root_node()
self.ServiceList = {} # list with opcServices
self.opcName = type
for service in self.root.get_child(["0:Objects","1:"+type,"1:ServiceList"]).get_children():
obj = OpcService(service, self)
self.ServiceList[obj.name] = obj
def getService(self, serviceName):
return self.ServiceList[serviceName]
def __del__(self):
self.disconnect()
class OpcPlant:
"""
The OpcPlant contains all the Modules with their Services.
"""
def __init__(self, nodes):
self.parts = {}
for node in nodes:
self.parts[node['name']] = OpcClient("opc.tcp://"+node['adress']+":" + node['port'], node['name'])
class StateChangeHandler(object):
"""
Subscription Handler. To receive events from server for a subscription
data_change and event methods are called directly from receiving thread.
Do not do expensive, slow or network operation there. Create another
thread if you need to do such a thing
"""
def __init__(self, service):
self.service = service
def datachange_notification(self, node, val, data):
self.service.setState(val)
class RecipeState(Enum):
WAIT = 1
RUN = 2
FAILED = 3
ABORTED = 4
COMPLETED = 5
class RecipeCommand(Enum):
START = 1
STOP = 2
PAUSE = 3
class RecipeElementThread(threading.Thread):
def __init__(self,stdout, node, condition):
threading.Thread.__init__(self)
self.stdout = stdout
self.stderr = None
self.node = node
self.node.state = RecipeElementState.WAITING
self.condition = condition
def run(self):
print('start Element Thread, '+self.node.name+':'+self.node.methodName)
if self.node.opcServiceNode.State not in self.node.type.start:
self.condition.acquire()
self.node.state = RecipeElementState.ABORTED
self.condition.notify() # wake the parent block handler thread
self.condition.release()
return
self.node.state = RecipeElementState.RUNNING
print('call Method')
self.node.opcServiceNode.callMethod(self.node.methodName)
validStates = self.node.type.running + self.node.type.start + self.node.type.complete
while self.node.opcServiceNode.State not in self.node.type.complete:
stateCopy = self.node.opcServiceNode.State
if stateCopy not in validStates:
self.condition.acquire()
print(self.node.opcServiceNode.State in self.node.type.complete)
print(self.node.opcServiceNode.State in self.node.type.running)
self.node.state = RecipeElementState.ABORTED
self.condition.notify()
self.condition.release() # wake the parent block handler thread
return
self.condition.acquire()
self.node.state = RecipeElementState.COMPLETED
self.condition.notify()
self.condition.release() # wake the parent block handler thread
print('end Element Thread, ' + self.node.name)
class RecipeTreeThread(threading.Thread):
"""
Separated Thread iterating trought the Block Elements.
Waits till all parallel elements are finished.
"""
def __init__(self, stdout, recipeRoot, condition):
"""
:param stdout: normally sys.stdout
:param recipeRoot: Parent XmlNode
:param condition: Thread.Condition to wait until a Thread is finished
"""
threading.Thread.__init__(self)
self.stdout = stdout
self.stderr = None
self.root = recipeRoot
self.condition = condition
def run(self):
print('start Tree Thread, '+self.root.name)
self.root.state = RecipeElementState.RUNNING
if self.executeServiceTree(self.root):
self.root.state = RecipeElementState.COMPLETED
else:
self.root.state = RecipeElementState.ABORTED
print('end Tree Thread, '+self.root.name)
self.condition.acquire() # lock
self.condition.notify()
self.condition.release() # unlock
def executeService(self, serviceNode):
condition = threading.Condition()
condition.acquire() # lock
re = RecipeElementThread(sys.stdout, serviceNode, condition)
re.start()
condition.wait(serviceNode.timeout) # unlock, relock on notify or timeout
condition.release()
if serviceNode.state != RecipeElementState.COMPLETED:
serviceNode.state = RecipeElementState.ABORTED
if re.is_alive():
logger.error('TimeOut Thrown, MethodCall went too long')
# TODO Set Kill instructions
# re.stop()
else:
logger.error('Service State has not a valid CompleteState for the Method')
return False
def executeServiceTree(self, parentNode):
"""
:param parentNode: XmlRecipeBlock
:return: True, if all went good
"""
# for ParallelBlocks, could be optimized if started directly
# but Threadhandling differs (see executeService)
if isinstance(parentNode, XmlRecipeServiceInstance):
self.executeService(parentNode)
if parentNode.state == RecipeElementState.ABORTED:
self.state = parentNode.state
return False
else:
return True
for elementId in parentNode.sortList:
node = parentNode.childs[elementId]
if isinstance(node, XmlRecipeBlock):
if node.blockType == 'ParallelerBlock':
threadCount = len(node.childs)
condition = threading.Condition()
condition.acquire() # lock
threads = []
for p in node.childs.values():
thread = RecipeTreeThread(self.stdout, p, condition)
threads.append(thread)
thread.start()
#wait for every block finished
while threadCount > 0:
condition.wait() # unlock, lock if awaken
threadCount -=1
# all Threads finished, check normal Complete
condition.release()
for thread in threads:
if thread.root.state != RecipeElementState.COMPLETED:
logger.error('Child Thread failed')
node.state = RecipeElementState.ABORTED
return False
node.state = RecipeElementState.COMPLETED
if node.blockType == 'SeriellerBlock':
runningNormal = self.executeServiceTree(node)
if runningNormal == False:
logger.error('Serial-Block failed')
node.state = RecipeElementState.ABORTED
return False
node.state = RecipeElementState.COMPLETED
elif isinstance(node, XmlRecipeServiceInstance):
self.executeService(node)
if node.state == RecipeElementState.ABORTED:
return False
return True
class RecipeRootThread(threading.Thread):
"""
Extra Thread to separate Blocking from Non-Blocking.
Server -> RecipeHandler -> starts Root Thread -> respsonse
Root Thread waits until Recipe is finished. Simply start a Tree Thread.
"""
def __init__(self,stdout, recipe):
threading.Thread.__init__(self)
self.stdout = stdout
self.recipeParser = recipe
def run(self):
runBlockNode = self.recipeParser.recipe.runBlock
print('start Root Recipe Thread, '+self.recipeParser.recipe.name)
condition = threading.Condition()
condition.acquire() # lock
thread = RecipeTreeThread(self.stdout, runBlockNode, condition)
thread.start()
condition.wait() # unlock, relock if awaken
condition.release() # unlock
RecipeHandler.instance.finishRecipe()
print('end Root Recipe Thread, ' + self.recipeParser.recipe.name)
class RecipeFileObject:
def __init__(self, filename):
self.fileName = filename
def getParsed(self, topology, plant):
return XmlRecipeParser(self.fileName, topology, plant)
class TopologyFileObject:
def __init__(self, filename):
self.fileName = filename
def getParsed(self, plant):
return XmlTopologyParser(self.fileName, plant)
class RecipeHandler:
"""
Singleton Pattern
The Handler is responsible for the Recipes and the Topologies.
Also the Execution of the Recipes.
It is initialized with an OpcPlant
"""
anlage = None
instance = None
def __init__(self):
if not RecipeHandler.instance:
RecipeHandler.instance = RecipeHandler.__RecipeHandler(RecipeHandler.anlage)
def __getattr__(name):
return getattr(RecipeHandler.instance, name)
class __RecipeQueueThread(threading.Thread):
def __init__(self, stdout, recipeQueue):
threading.Thread.__init__(self)
self.stdout = stdout
self.stderr = None
self.recipeQueue = recipeQueue
def run(self):
"""
Executes all ThreadElements after another.
Was just for debugging.
:return:
"""
for re in self.recipeQueue:
startTime = time.clock()
elapsedTime = 0
re.start()
re.join(timeout = re.timeout)
while re.isAlive():
pass
if re.service.State in re.type.complete:
self.state = RecipeElementState.COMPLETED
else:
self.state = RecipeElementState.ABORTED
class __RecipeHandler:
def parseRecipe (self,filename):
"""
Loads a Recipe from the Recipe-Directory with given Filename and returnes the parsed RecipeFileObject.
:param filename: Name of a File in the Recipe Directory
:return: parsed XmlRecipeParser
"""
return RecipeFileObject(os.path.join(main.settings.RECIPE_DIR,filename)).getParsed(self.actualTopology, self.anlage)
def __init__(self, anlage):
self.recipes = []
self.actualRecipeThread = None
self.completeRecipe = None
self.anlage = anlage
# for file in os.listdir(main.settings.RECIPE_DIR):
# self.recipes.append(Recipe(os.path.join(main.settings.RECIPE_DIR,file)))
self.topologyId = 0
self.topologies = []
for file in os.listdir(main.settings.TOPOLOGY_DIR):
self.topologies.append(TopologyFileObject(os.path.join(main.settings.TOPOLOGY_DIR,file)))
self.topologyId += 1
parsedTopology = self.topologies[0].getParsed(self.anlage)
if parsedTopology.isValid:
self.actualTopology = parsedTopology
else:
logger.error("Invalid Topology")
self.active = False
def startRecipeFromFilename(self, filename):
"""
Create RecipeFileObject and starts it with an RecipeRootThread,
if the FileObject is valid and no other Recipe is active.
:param filename: Name of a File in the Recipe Directory
:return: XmlRecipeParser
"""
recipe = RecipeFileObject(os.path.join(main.settings.RECIPE_DIR, filename)).getParsed(self.actualTopology, self.anlage)
if recipe.isValid:
if self.actualRecipeThread == None:
thread = RecipeRootThread(sys.stdout, recipe)
self.actualRecipeThread = thread
thread.start()
else:
self.message = 'Rezept läuft noch.'
return False
else:
self.message = recipe.message
return False
return True
def finishRecipe(self):
"""
"Callback" is started by RecipeRootThread when all RecipeSteps are finished or the Recipe is aborted.
:return:
"""
self.completeRecipe = self.actualRecipeThread
self.actualRecipeThread = None
def getServices(self, recipeNode):
"""
Gets a List with all Service-Calls from a given Recipe-Tree. Order is probably not right and parallelBlocks are serialized.
Just for Debugging.
:param recipeNode:
:return:
"""
services = []
for child in recipeNode.childs.values():
if isinstance(child, XmlRecipeBlock):
services.append(self.getServices(child))
elif isinstance(child, XmlRecipeServiceInstance):
services.append(child)
return services
def startRecipeWithQueue(self, recipeElements):
"""
Starts a QueueThread with the given Limain.settings.
:param recipeElements: List of RecipeElementThreads
:return: None
"""
thread = RecipeHandler.__RecipeQueueThread(sys.stdout, recipeElements)
thread.start()
"""
TODO Upload files with json and check xml syntax (not the topology)
and dont read directory every Get-Request (make the lists persistent)
"""
def saveUploadedRecipe(self, file):
"""
Copies the uploaded File from the tempDir to the RecipeDir
:param file: Uploaded File Handle
:return:
"""
filename = str(file)
with open(os.path.join(main.settings.RECIPE_DIR, filename), 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def saveUploadedTopology(self, file):
"""
Copies the uploaded File from the tempDir to the RecipeDir
:param file: Uploaded File Handle
:return:
"""
filename = str(file)
with open(os.path.join(main.settings.TOPOLOGY_DIR, filename), 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk) |
983,177 | 4f05e84d65679b2a22f2dfcb178116012664028a | # Generated by Django 2.0.5 on 2020-02-15 22:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rhyme', '0018_add_export_columns_to_song'),
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=63, unique=True)),
('genre', models.CharField(max_length=63)),
],
),
migrations.AlterField(
model_name='song',
name='artist',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='rhyme.Artist'),
),
]
|
983,178 | 9d054959918eaf72d7c9d8da678804662eab83ca | from molmass import Formula
RawData = input("1st gas Formula, mass(g), 2nd Formula, mass(g), T(C), V(L): ")
while(RawData != "quit"):
SplitData = RawData.split(" ")
if(len(SplitData) == 1):
F1 = Formula(SplitData[0])
print(F1.mass)
elif(len(SplitData) == 2):
F1 = Formula(SplitData[0])
M1 = float(SplitData[1])
mol = M1/F1.mass
print("Mol: ", mol)
elif(len(SplitData) == 4):
F1 = Formula(SplitData[0])
M1 = float(SplitData[1])
F2 = Formula(SplitData[2])
M2 = float(SplitData[3])
mol1 = M1/F1.mass
mol2 = M2/F2.mass
molT = mol1 + mol2
print(F1.formula, F2.formula, "total: ",molT)
print(F1.formula, "ratio: ",mol1/molT," mol 1: ",mol1)
print(F2.formula, "ratio: ",mol2/molT," mol 2: ",mol2)
elif(len(SplitData) == 6):
F1 = Formula(SplitData[0])
M1 = float(SplitData[1])
F2 = Formula(SplitData[2])
M2 = float(SplitData[3])
T1 = float(SplitData[4]) + 273.15
V1 = float(SplitData[5])
mol1 = M1/F1.mass
mol2 = M2/F2.mass
molT = mol1 + mol2
P = molT * 0.0820574 * T1 / (V1)
print(F1.formula, F2.formula, "total: ",molT,"\n")
print(F1.formula, "ratio: ",mol1/molT," mol 1: ",mol1)
print(F1.formula, "P1 ratio: ",mol1/molT * P)
print(F2.formula, "ratio: ",mol2/molT," mol 2: ",mol2)
print(F2.formula, "P2 ratio: ",mol2/molT * P)
print("Total Pressure: ", P)
RawData = input("1st gas Formula, mass, 2nd Formula, mass: ") |
983,179 | 9abfc57c197606c9990d55c818109b5cd508864c | class Human:
def__init__ (self, sex ='male')
self._sex = male
def eat(self):
print("eating food")
def talk(self):
print("Likes talking while eating")
def define_sex(self, sex):
self._sex = sex
def get_sex(self):
return self._sex
def main():
frank= Human()
print(frank.get_sex())
if __name__ == '__main__':
main()
|
983,180 | 3a827de300cfd876775c4049035751295ed45e37 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 00:04:13 2017
@author: Seth Guberman - sguberman@gmail.com
"""
def mpos(delim=', '):
"""
Reformat the original Moby parts of speech file into plain ascii text.
delim: (str) delimiter to use in the output file
"""
print('Reformatting parts of speech file...')
with open('mpos/mobyposi.txt', 'rb') as orig, open('mpos.txt', 'w') as mod:
orig_bytes = orig.read()
count = 0
skipped = 0
for line in orig_bytes.split(b'\r'): # original file has \r lines
try:
# Split on the original delimiter and decode to str
word, tag = (x.decode() for x in line.split(b'\xd7'))
mod.write('{}{}{}\n'.format(word, delim, tag))
count += 1
except UnicodeDecodeError: # weird character in entry
skipped += 1
except ValueError: # weird line in file
pass
print('...Reformatted {} entries, skipped {}.'.format(count, skipped))
def mhyph(delim='`'):
"""
Reformat the original Moby hyphenates file to plain ascii text.
delim: (str) delimitter to use in the output file
"""
print('Reformatting original hyphenates file...')
with open('mhyph/mhyph.txt', 'rb') as orig, open('mhyph.txt', 'w') as mod:
orig_bytes = orig.read()
count = 0
skipped = 0
for line in orig_bytes.split(b'\r\n'):
try:
syllables = (x.decode() for x in line.split(b'\xa5'))
mod.write('{}\n'.format(delim.join(syllables)))
count += 1
except UnicodeDecodeError: # weird character in entry
skipped += 1
except ValueError: # weird line in file
pass
print('...Reformatted {} entries, skipped {}.'.format(count, skipped))
if __name__ == '__main__':
mpos()
mhyph()
|
983,181 | 10b86f6a21ff40e677abbc9ee76c70717867a4d2 | # Game codes here -- @nirdteq
#ceo: profnird
#co-founder: charles
#Developer: YourNameHere
#resources & reference : https://pygame-zero.readthedocs.io/en/stable/
#color RGB reference : colorspire.com/rgb-color-wheel/
#-----------------------------------------------------------
print("Welcome")
print("Do well to leave comment on every stage to help others")
print("files management is very important,\n Your project will be used later times")
print("Be the master of the craft solve problems and obey guidance too")
#GameWindow is created automatically.
#Discuss your interface with your guide(nird) and partner.
#Get images,files and game description in order and set before you start dev.
#------------------------------------------------------------------------
#Start developing Game from Here
#this is a basic window design.
WIDTH = 500
HEIGHT = 500
#this creates a background-color of yellow.
def draw():
screen.fill((225, 222, 0))
|
983,182 | 73fd7f2c6e7fc9595d2723d7c0b881f8e56d8724 | from typing import List
from base.tree.tree_node import TreeNode
class Solution:
def numColor(self, root: TreeNode) -> int:
self.colors = []
self.num_color(root)
return len(set(self.colors))
def num_color(self,root:TreeNode):
if root:
if root.val:
self.colors.append(root.val)
if root.left:
self.num_color(root.left)
if root.right:
self.num_color(root.right)
if __name__ == "__main__":
func = Solution().numColor
root = TreeNode.from_strs("[1,3,2,1,null,2]")
print(func(root)) |
983,183 | 5a7059178987a97ecbd124193cbf40807cc45b60 | from django.contrib import admin
from .models import UserSurvey, WasteTracking, VolunteerTracking
# Register your models here.
admin.site.register(UserSurvey)
admin.site.register(WasteTracking)
admin.site.register(VolunteerTracking) |
983,184 | a192c806cc2629e938b5ba212e28ebce1beb3445 | from ..common_files.common_infos import CommonInfos
class InfosForProcess(CommonInfos):
def __init__(self,
T: 'global transmissibility matrix without boundary conditions',
pms: 'global multiscale presure',
g_flux_grav_faces,
gids: 'global gids',
g_faces: 'global_faces',
g_neig_internal_faces: 'all neig internal faces',
remaped_internal_faces,
solver
):
self.T = T
self.pms = pms
self.g_flux_grav_faces = g_flux_grav_faces
self.gids = gids
self.g_faces = g_faces
self.g_neig_internal_faces = g_neig_internal_faces
self.remaped_internal_faces = remaped_internal_faces
self.solver = solver
|
983,185 | b9747fbc4ba45f612744b347361ff89c00ed4637 | T = int(raw_input())
for i in xrange(1,T+1):
K,C,S = map(int,raw_input().split(' '))
print "Case #%d:" %(i),
for j in xrange(1,K+1):
print j,
print ""
|
983,186 | 524a5a12240caba270782c94accb05b8566edb52 | import sys, pickle, pymc, numpy
from matplotlib import pyplot
def simTarget(outFile, outPng):
# 7 bases
N = 7
hyperPrior_mu = pymc.Uniform("target_mu_hyper", 0, 1)
hyperPrior_precision = pymc.Uniform("target_precision_hyper",
0.0001, 100)
values = numpy.array( 100*[False] + 5*[True]
+ 50*[False] + 5*[True]
+ 2000*[False] + 5*[True]
+ 100*[False] + 7*[True]
+ 100*[False] + 8*[True]
+ 100*[False] + 10*[True]
+ 1000*[False] + 90*[True] )
idx = [0]*105 + [1]*55 + [2]*2005 + [3]*107 \
+ [4]*108 + [5]*110 + [6]*1090
target_freq = pymc.Normal('target_freq', mu=hyperPrior_mu,
tau=hyperPrior_precision, size=N)
observations = pymc.Bernoulli("obs", target_freq[idx], observed=True,
value=values)
model = pymc.Model([hyperPrior_mu, hyperPrior_precision,
target_freq, observations])
mcmc = pymc.MCMC(model)
mcmc.sample(iter=100000, burn=15000)
t = mcmc.trace("target_mu_hyper")[:]
fig = pyplot.figure()
pyplot.title("Posterior distribution of target freq mu")
pyplot.hist(t, bins=25,
histtype="stepfilled", normed=True)
pyplot.savefig(outPng)
pyplot.close(fig)
with open(outFile, 'w') as fout:
pickle.dump(t, fout)
if __name__ == '__main__':
targetTraceFile, outPng = sys.argv[1:]
simTarget(targetTraceFile, outPng)
|
983,187 | c671714b74fe2a6e9ae7cca0887d07ea46df33c3 | #!/usr/bin/env python
from shapely.geometry import Point
from functools import partial
import pyproj
from shapely.ops import transform
point1 = Point(9.0, 50.0)
print point1
project = partial(
pyproj.transform,
pyproj.Proj(init='epsg:4326'),
pyproj.Proj(init='epsg:32632'))
point2 = transform(project, point1)
print point2
|
983,188 | e1c60066ab556e48020668cff984769d019c8a09 | from flask_wtf import FlaskForm
from flask_pagedown.fields import PageDownField
from wtforms import FileField, StringField, SubmitField, TextAreaField, BooleanField, SelectField
from flask_wtf.file import FileRequired, FileAllowed
from wtforms.validators import DataRequired, Email, Length, Regexp, ValidationError
from ..models import User
from .. import uploaded_files, photos
"""
Main forms of Filestagram
"""
class FilterForm(FlaskForm):
query = StringField(validators=[DataRequired(), Length(1, 255)])
submit = SubmitField("Search")
class PostForm(FlaskForm):
"""
FlaskForm for uploading images, including a file field that only accepts images,
and a title that decribes the image.
"""
file = FileField(validators=[FileAllowed(photos, 'Images only!'),
FileRequired('File was empty!')])
body = PageDownField(validators=[DataRequired()],render_kw = {"placeholder":"Leave your thought"})
submit = SubmitField("Submit")
class PostFormFile(FlaskForm):
"""
FlaskForm for uploading general files, including a file field that accepts all
types of files.
"""
file = FileField(validators=[FileAllowed(uploaded_files, 'Certain types only!'),
FileRequired('File was empty!')])
submit = SubmitField("Upload files")
class CommentForm(FlaskForm):
"""
FlaskForm for commenting on users' posts, including a field that supports markdown.
"""
body = PageDownField(validators=[DataRequired()],render_kw = {"placeholder":"Leave your comment"})
submit = SubmitField("Comment")
class NameForm(FlaskForm):
"""
FlaskForm for user names.
"""
name = StringField("What's your name?",validators=[DataRequired(), Email()],render_kw = {"placeholder": "Enter User Name"})
submit = SubmitField("Submit")
class EditProfileForm(FlaskForm):
"""
FlaskForm for editing user profiles, including a file field of user's profile image,
location, and personal introduction.
"""
photo = FileField(validators=[FileAllowed(photos, "Only image supported")])
location = StringField("Location", render_kw = {"placeholder": "Location"})
about_me = TextAreaField("About me", render_kw = {"placeholder": "Introduce yourself"})
submit = SubmitField("Save Changes")
|
983,189 | 183c11f1db8f580e48de0a89ccbfea4dd5349f90 | # chat_server.py
import sys
import socket
import select
import os.path
import os
HOST = ''
SOCKET_Dict = {} # {sock: [username, codigo de status]}
groups = {} #{nomegrupo: [users do grupo]}
SOCKET_LIST = [] # [sockets ativas]
Block = {} #{user : [usersbloqueados]}
RECV_BUFFER = 4096
PORT = 9009
def chat_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
"""nome = server_socket.recv(RECV_BUFFER)
SOCKET_Dict.setDefault(server_socket, nome)
"""
SOCKET_LIST.append(server_socket)
os.system('clear')
print "Chat server started on port " + str(PORT)
while 1:
ready_to_read,ready_to_write,in_error = select.select(SOCKET_LIST,[],[],0)
for sock in ready_to_read:
if sock == server_socket:
sockfd, addr = server_socket.accept()
nome = sockfd.recv(RECV_BUFFER)
Block.setdefault(sockfd, [])
SOCKET_LIST.append(sockfd)
print "Client (%s) connected" % nome
for a in SOCKET_Dict.keys():
sockfd.send("[" + SOCKET_Dict[a][0] + "] is online\n")
SOCKET_Dict.setdefault(sockfd, [nome, ''])
broadcast(server_socket, sockfd, "[%s] is online\n" % nome)
else:
try:
data = sock.recv(RECV_BUFFER)
if("/help/" in data):
help(sock, 0)
elif("/block/" in data):
data = data.split("/")
if (len(data) != 4):
help(sock, 0)
for a in SOCKET_Dict.keys():
if (data[2] == SOCKET_Dict[a][0]):
block(sock, data[2], 0)
elif("/ban/" in data):
data = data.split("/")
if (len(data) != 5):
help(sock, 0)
else:
ban(sock, data[2], data[3])
elif("/unblock/" in data):
data = data.split("/")
if (len(data) != 4):
help(sock, 0)
for a in SOCKET_Dict.keys():
if (data[2] == SOCKET_Dict[a][0]):
block(sock, data[2], 1)
elif("/exit/" in data):
SOCKET_Dict[sock][1] = ("")
sock.send("Saiu da conversacao anterior.\n")
elif ("/pm/" in data):
data = data.split("/")
if(len(data) != 4):
help(sock, 0)#print("Erro, mensagem privada: /pm/<user>/<message>")
for a in SOCKET_Dict.keys():
if (data[2] == SOCKET_Dict[a][0]):
SOCKET_Dict[sock][1] = ("/" + data[1] + "/" + data[2] + "/")
read(server_socket, sock, data[2])
elif ("/create/" in data):
data = data.split("/")
if(len(data) != 4):
help(sock, 0)#print("Erro, mensagem privada: /pm/<user>/<message>")
elif(data[2] in groups.keys()):
help(sock, 2)#print("grupo ja existe)
else:
create(sock, data[2])
elif ("/invite/" in data):
data = data.split("/")
if(len(data) != 5):
help(sock, 0)#print("Erro, mensagem privada: /pm/<user>/<message>"
else:
invite(sock, data[2], data[3])
elif ("/group/" in data):
data = data.split("/")
if (SOCKET_Dict[sock][0] in (groups[data[2]])):
SOCKET_Dict[sock][1] = ("/" + data[1] + "/" + data[2] + "/")
readg(server_socket, sock, data[2])
else:
sock.send("Nao tem permissao para aceder a esse grupo.\n")
elif("/pm/" in SOCKET_Dict[sock][1]):
private(server_socket, sock, data)
elif("/group/" in SOCKET_Dict[sock][1]):
codigo = SOCKET_Dict[sock][1].split("/")
if(SOCKET_Dict[sock][0] in groups[codigo[2]]):
codigo = SOCKET_Dict[sock][1].split("/")
messageg(sock,codigo[2], data)
else:
help(sock,0)
# exception
except:
help(sock,0)
continue
server_socket.close()
def readg(server_socket, sock, nomeg):
s = nomeg +".txt"
if(os.path.isfile(s)):
fich=open(s, 'r')
else:
sock.send("O grupo nao esta criado.\n")
return
fich.seek(0)
sock.send(fich.read())
fich.close()
def block(sock, nome, flag): # flag = 0 -> Bloqueia || flag = 1 -> Desbloqueia || flag = 2 -> Return False se Blockeado
if (flag == 0):
Block[sock].append(nome)
sock.send("O user " + nome + " foi bloqueado.\n")
elif (flag == 1):
Block[sock].remove(nome)
sock.send("O user " + nome + " foi desbloqueado.\n")
elif (flag == 2):
if(nome in Block[sock]):
sock.send("Tens esse user bloqueado, try /unblock/" + nome + "/.\n")
return False
nome1 = SOCKET_Dict[sock][0]
for a in SOCKET_Dict.keys():
if (nome in SOCKET_Dict[a]):
sock1 = a
if (nome1 in Block[sock1]):
sock.send("Foste bloquado por esse user. \n")
return False
return True
def ban(sock, nomeg, nome):
if (SOCKET_Dict[sock][0] == groups[nomeg][0]):
if (nome in groups[nomeg]):
sock.send("O user " + nome + " foi removido!\n")
groups[nomeg].remove(nome)
else:
sock.send("Nao podes remover users.\n")
def messageg(sock, nomeg, message):
fich=open(nomeg+".txt",'a')
fich.write("[" + SOCKET_Dict[sock][0] + "] " + message)
for no in groups[nomeg]:
for aux in SOCKET_Dict.keys():
if(SOCKET_Dict[aux][0] == no):
if(("/group/" + nomeg + "/") == SOCKET_Dict[aux][1] and aux != sock):
try:
aux.send("[" + SOCKET_Dict[sock][0] + "] " + message)
except:
aux.close()
# broken socket, remove it
if aux in SOCKET_Dict:
SOCKET_Dict.pop(aux)
SOCKET_LIST.remove(aux)
elif(aux != sock):
try:
aux.send("Tem notificacoes do grupo [" + nomeg + "].\n")
except:
aux.close()
# broken socket, remove it
if aux in SOCKET_Dict:
SOCKET_Dict.pop(aux)
SOCKET_LIST.remove(aux)
def create(sock, nomeg):
fich = open(nomeg + ".txt", 'w')
groups.setdefault(nomeg,[SOCKET_Dict[sock][0]])
fich.write("\t\tCONVERSA " + nomeg + "\n")
sock.send("O grupo " + nomeg + " foi criado com sucesso.\n")
print("O grupo "+nomeg + " foi criado.\n")
fich.close()
def invite(sock, nomeg, conv):
if(nomeg not in groups.keys()):
help(sock,1)
elif(conv in groups[nomeg]):
return
elif(SOCKET_Dict[sock][0] not in groups[nomeg]):
help(sock,1)
for a in SOCKET_Dict.keys():
if (SOCKET_Dict[a][0] == conv):
sock.send("O user " + conv + " foi adicionado com sucesso.\n")
groups[nomeg].append(conv)
a.send("Foste adicionado ao grupo " + nomeg + ".\n")
def broadcast (server_socket, sock, message):
for socket in SOCKET_Dict.keys():
if socket != server_socket and socket != sock :
try :
socket.send(message)
except :
socket.close()
if socket in SOCKET_Dict:
SOCKET_Dict.pop(socket)
SOCKET_LIST.remove(socket)
def help(sock ,erro):
if (erro == 0):
sock.send("-"*10 + "HELP" + "-"*10 + "\n Comecar conversa privada: /pm/<user>/\nCriar grupo: /create/<nomegrupo>/\nAdicionar ao grupo: /invite/<nomegrupo>/<user>/\nIniciar conversa de Grupo: /group/<nomegrupo>/\nBloquear\Desbloquear user: /block\unblock/<user>/\nBanir user de grupo: /ban/<nomegrupo>/<user>/\nSair da conversacao anterior: /exit/\n\nNOTA: Confirme se colocou os <user>'s corretamente!\n")
elif(erro ==1):
sock.send("-"*10 + "ERRO" + "-"*10 + "\n" + "Esse user nao existe" + ".\n")
elif(erro ==2):
sock.send("-"*10 + "ERRO" + "-"*10 + "\n" + "Esse nome de grupo ja foi utilizado" + ".\n")
def read(server_socket, sock, nome):
s1 = SOCKET_Dict[sock][0] + nome+ "MsgPrivada.txt"
s2 = nome + SOCKET_Dict[sock][0] + "MsgPrivada.txt"
if(os.path.isfile(s1)):
fich=open(s1, 'r')
elif(os.path.isfile(s2)):
fich=open(s2, 'r')
else:
sock.send("Nao tem mensagens desse user.\n")
return
fich.seek(0)
sock.send(fich.read())
fich.close()
def private (server_socket, sock, message):
codigo = SOCKET_Dict[sock][1].split("/")
nomerecetor = codigo[2]
if (block(sock,nomerecetor,2)):
s1 = SOCKET_Dict[sock][0] + nomerecetor + "MsgPrivada.txt"
s2 = nomerecetor + SOCKET_Dict[sock][0] + "MsgPrivada.txt"
if(os.path.isfile(s1)):
fich=open(s1, 'a')
elif(os.path.isfile(s2)):
fich=open(s2, 'a')
else:
fich=open(s1,'a')
for socket in SOCKET_Dict.keys():
if(nomerecetor == SOCKET_Dict[socket][0]):
if socket != server_socket and socket != sock:
if(("/pm/" + SOCKET_Dict[sock][0] + "/") == SOCKET_Dict[socket][1] ):
try:
socket.send("[" + SOCKET_Dict[sock][0] + "] " + message)
fich.write("[" + SOCKET_Dict[sock][0] + "] " + message)
except:
socket.close()
if socket in SOCKET_Dict:
SOCKET_Dict.pop(socket)
else:
try :
socket.send("Tem uma pm do user " + SOCKET_Dict[sock][0] + "\n")
fich.write("[" + SOCKET_Dict[sock][0] + "] " + message)
except :
socket.close()
if socket in SOCKET_Dict:
SOCKET_Dict.pop(socket)
fich.close()
if __name__ == "__main__":
sys.exit(chat_server())
|
983,190 | f42217fdbabe7b65165ee0b3fbc109534c9ab489 | from __future__ import division, absolute_import, print_function
__all__ = ['styled', 'styled_print']
# noinspection PyUnresolvedReferences
from six.moves.html_parser import HTMLParser
from blessings import Terminal
import six
import sys
term = Terminal()
default_styles = {
'err': term.red,
'ref': term.yellow,
'path': term.yellow,
'rev': term.bold,
'version': term.bold,
'cmd': term.cyan + term.underline, # 'sub': term.cyan,
'echo': term.yellow,
}
class MyHTMLParser(HTMLParser):
def __init__(self, style, styles = None):
HTMLParser.__init__(self)
self.s = ''
self.style = style
self.styles = styles if styles else default_styles
self.style_stack = []
# noinspection PyUnusedLocal
def handle_starttag(self, tag, attrs):
if tag in self.styles:
self.style_stack.append(self.styles[tag])
def handle_endtag(self, tag):
if tag in self.styles:
self.style_stack.pop()
def handle_data(self, data):
if self.style:
self.apply()
self.s += data
def apply(self):
self.s += term.normal
for style in set(self.style_stack):
self.s += style
from punic.config import config
def styled(s, style = None, styles = None):
if style is None:
style = config.color
else:
style = True
parser = MyHTMLParser(style=style, styles = styles)
parser.feed(s)
return parser.s + (term.normal if style else '')
def styled_print(message, sep=' ', end='\n', file=sys.stdout, flush=False, style = None, styles = None, *args):
args = [message] + list(args)
s = sep.join([six.text_type(arg) for arg in args]) + end
s = styled(s, style = style, styles = styles)
file.write(s)
if flush:
file.flush()
# '<head>***</head> Checkout out <title>SwiftLogging</title> at "<version>v1.0.1</version>"')
#
# # instantiate the parser and fed it some HTML
|
983,191 | 1b291a4e00adcd523e8c4f4aee9ae93158a8e4f6 | #!/usr/bin/env python
import os
import unittest
def collect():
start_dir = os.path.abspath(os.path.dirname(__file__))
return unittest.defaultTestLoader.discover(start_dir)
if __name__ == '__main__':
backend = os.environ.get('PEEWEE_TEST_BACKEND') or 'sqlite'
print 'RUNNING PEEWEE TESTS WITH [%s]' % backend
print '=============================================='
unittest.main(module='tests')
|
983,192 | ba3e522b53fe807fb975b1e0a9af85c578ce168a | N, R = *map(int, input().split()),
crcs = [[float(x) for x in input().split()] for _ in range(N)]
for crc in crcs:
x, y, r = crc
print("(x - %.6f)^2 + (y - %.6f)^2 = %.6f^2" % (x, y, r))
|
983,193 | ed9a9cbe0e02857a4037e5a815f8f08201212226 | from django import forms
from django.core.exceptions import ValidationError
class ListOfDictField(forms.Field):
def validate_to_python(self, value):
"""
Validate and clean data.
"""
super(ListOfDictField, self).validate(value)
if value == None:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError('Must be a list or tuple, got {0}'.format(type(value).__name__))
cleaned = []
for index, dct in enumerate(value):
if not isinstance(dct, dict):
raise ValidationError('Item {0}: Must be a list of dicts, got {1}'.format(index, type(value)))
form = self.Form(dct)
if form.is_valid():
cleaned.append(form.cleaned_data)
else:
errors = form.errors.as_text()
raise ValidationError('Item {0}: Invalid format:\n{1}'.format(index, errors))
return cleaned
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.validate_to_python(value)
self.run_validators(value)
return value
class DictField(forms.Field):
def validate_to_python(self, value):
"""
Validate and clean data.
"""
super(DictField, self).validate(value)
if value == None:
return {}
if not isinstance(value, dict):
raise ValidationError('Must be a dict, got {0}'.format(type(value).__name__))
form = self.Form(value)
if form.is_valid():
return form.cleaned_data
else:
errors = form.errors.as_text()
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.validate_to_python(value)
self.run_validators(value)
return value
|
983,194 | a2dddbb4952e3d48571b907a8b1be77e765b88aa | #!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
new_list = []
idx = 0
while idx < list_length:
aws = 0
try:
aws = my_list_1[idx] / my_list_2[idx]
except ZeroDivisionError:
print("division by 0")
except IndexError:
print("out of range")
except TypeError:
print("wrong type")
finally:
new_list.append(aws)
idx = idx + 1
return new_list
|
983,195 | 6203e6a22e2a3418cbafa4cf26a9dbe2140c7648 | #!/usr/bin/python
# cd /home/tegwyn/ultrasonic_classifier/ && echo whales | sudo -S python3 lora_sender.py
# cd /home/tegwyn/ultrasonic_classifier/ && python3 lora_sender.py
# cd /home/tegwyn/ultrasonic_classifier/ && chmod 775 lora_sender.py
"""
================================================
ABElectronics ADC Pi 8-Channel ADC demo
Requires python smbus to be installed
run with: python demo_readvoltage.py
================================================
Initialise the ADC device using the default addresses and sample rate,
change this value if you have changed the address selection jumpers
Sample rate can be 12,14, 16 or 18
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import time
import os
import time
import busio
import digitalio
import board
import adafruit_si7021
from adafruit_tinylora.adafruit_tinylora import TTN, TinyLoRa
import colorama
from colorama import Fore, Back, Style
import sys
import pandas as pd
import os.path
try:
from ADCPi import ADCPi
except ImportError:
print("Failed to import ADCPi from python system path")
print("Importing from parent folder instead")
try:
import sys
sys.path.append('..')
from ADCPi import ADCPi
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
end = "\n"
RED = "\x1b[1;31m"
BLUE='\e[44m'
F_LightGreen = "\x1b[92m"
F_Green = "\x1b[32m"
F_LightBlue = "\x1b[94m"
B_White = "\x1b[107m"
NC = "\x1b[0m" # No Color
Blink = "\x1b[5m"
def main():
'''
Main program function
'''
adc = ADCPi(0x68, 0x69, 12)
# read from adc channels and print to screen
batteryPackRead = float(adc.read_voltage(1))*3.9194
switcherOutRead = float(adc.read_voltage(2))*1.1937
bat_species_val = 2
bat_species_val = [0,0,0,0,0,0,0,0]
total_audio_events = [0,0,0,0,0,0,0,0]
df = pd.read_csv("/home/tegwyn/ultrasonic_classifier/From_R_01.csv")
print("")
print(df)
print("")
n = len(df.columns) # get the number of columns.
number_of_species_detected = n - 1 # One of the columns is labelled 'BLANK'.
for index, row in df[:1].iterrows(): # we check only 1 row in the dataframe
for i in range(1,n):
bat_name = row.index[i]
print(bat_name)
total_audio_events[i-1] = df[bat_name].sum()
print(total_audio_events[i-1])
# convert bat name to a number between 0 and 65,536:
# These must be in aphabetic order ??
if bat_name == "C_PIP":
bat_species_val[i-1] = 17
if bat_name == "HOUSE_KEYS":
bat_species_val[i-1] = 26
if bat_name == "NOCTULA":
bat_species_val[i-1] = 32
if bat_name == "PLECOTUS":
bat_species_val[i-1] = 35
if bat_name == "RHINO_HIPPO":
bat_species_val[i-1] = 71
if bat_name == "S_PIP":
bat_species_val[i-1] = 92
if bat_name == "NATTERERI":
bat_species_val[i-1] = 97
print("")
pin = digitalio.DigitalInOut(board.D4)
# print("Digital IO ok!")
# Try to create an I2C device
i2c = busio.I2C(board.SCL, board.SDA)
# print("I2C ok!")
# Try to create an SPI device
spi = busio.SPI(board.SCLK, board.MOSI, board.MISO)
# print("SPI ok!")
# print("done!\n")
"""
Using TinyLoRa with a Si7021 Sensor.
"""
# Board LED
led = digitalio.DigitalInOut(board.D4)
led.direction = digitalio.Direction.OUTPUT
# Create library object using our bus i2c port for si7021
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_si7021.SI7021(i2c)
# Create library object using our bus SPI port for radio
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Dragino LoRa GPS hat settings:
# Rasperry Pi 4:
# cs = digitalio.DigitalInOut(board.D25)
# irq = digitalio.DigitalInOut(board.D7)
# rst = digitalio.DigitalInOut(board.D17)
# Jetson Nano:
cs = digitalio.DigitalInOut(board.D25)
irq = digitalio.DigitalInOut(board.D8)
rst = digitalio.DigitalInOut(board.D17)
# TTN Device Address, 4 Bytes, MSB
devaddr = bytearray([0x26, 0x01, 0x15, 0x30])
# { 0x26, 0x01, 0x15, 0x30 }
# TTN Network Key, 16 Bytes, MSB
nwkey = bytearray([0x4D, 0xE2, 0x25, 0x50, 0xB5, 0x5D, 0x26, 0xE9,
0x34, 0x73, 0x61, 0x07, 0x5A, 0x64, 0x21, 0xA7])
# { 0x4D, 0xE2, 0x25, 0x50, 0xB5, 0x5D, 0x26, 0xE9, 0x34, 0x73, 0x61, 0x07, 0x5A, 0x64, 0x21, 0xA7 }
# TTN Application Key, 16 Bytess, MSB
app = bytearray([0x73, 0xF6, 0xE1, 0x23, 0x26, 0x0A, 0x34, 0x66,
0x19, 0x8E, 0x27, 0x2A, 0x81, 0xC4, 0x8B, 0xA1])
# { 0x73, 0xF6, 0xE1, 0x23, 0x26, 0x0A, 0x34, 0x66, 0x19, 0x8E, 0x27, 0x2A, 0x81, 0xC4, 0x8B, 0xA1 }
ttn_config = TTN(devaddr, nwkey, app, country='EU')
lora = TinyLoRa(spi, cs, irq, rst, ttn_config)
# Set spreading factor:
lora.set_datarate("SF12BW125")
# Data Packet to send to TTN
data = bytearray(10)
# while True:
temp_val = sensor.temperature
humid_val = sensor.relative_humidity
print (time.strftime("%Y-%d-%b-%H:%M"))
print('Temperature: %0.2f C' % temp_val)
print('relative humidity: %0.1f %%' % humid_val)
print('Bat species array: ', bat_species_val)
print('Bat audio events array: ', total_audio_events)
print('Battery pack volts: ', batteryPackRead)
print("")
# Encode float as int
temp_val = int(temp_val * 100)
humid_val = int(humid_val * 100)
batteryPackRead = int(batteryPackRead * 100)
file = "/home/tegwyn/ultrasonic_classifier/helpers/species_iteration.txt"
if os.path.isfile(file):
with open(file, "r") as fp:
species_iteration = fp.read()
fp.close()
# We dont just send all the bat detection data in one go, instead we iterate continuously over the no. of species.
# This overcomes the problem of data getting lost if transmission fails.
print("current species iteration = ",species_iteration)
print("total number_of species_detected = ", number_of_species_detected)
# Encode payload as bytes
data[0] = (temp_val >> 8) & 0xff
print("data 0: ",data[0])
data[1] = temp_val & 0xff
print("data 1: ",data[1])
data[2] = (humid_val >> 8) & 0xff
print("data 2: ",data[2])
data[3] = humid_val & 0xff
print("data 3: ",data[3])
data[4] = (bat_species_val[int(species_iteration)] >> 8) & 0xff
print("data 4: ",data[4])
data[5] = bat_species_val[int(species_iteration)] & 0xff
print("data 5: ",data[5])
data[6] = (total_audio_events[int(species_iteration)] >> 8) & 0xff
print("data 6: ",data[6])
data[7] = total_audio_events[int(species_iteration)] & 0xff
print("data 7: ",data[7])
data[8] = (batteryPackRead >> 8) & 0xff
print("data 8: ",data[8])
data[9] = batteryPackRead & 0xff
print("data 9: ",data[9])
if int(species_iteration) < (number_of_species_detected -1):
species_iteration = str(int(species_iteration) +1)
else:
species_iteration = "0"
file = "/home/tegwyn/ultrasonic_classifier/helpers/species_iteration.txt"
f= open(file, "w+")
f.write(species_iteration)
f.close()
# Send data packet
# print('Sending packet...')
sys.stderr.write(F_LightBlue+ "Sending packet..." + '\x1b[0m' + end)
lora.send_data(data, len(data), lora.frame_counter)
# print('Packet Sent!')
sys.stderr.write(RED+ str(data) + '\x1b[0m' + end)
sys.stderr.write(F_LightBlue+ "..... Packet Sent!\n" + '\x1b[0m' + end)
led.value = True
lora.frame_counter += 1
time.sleep(2)
led.value = False
exit
if __name__ == "__main__":
main()
|
983,196 | 44547228ba51311c1bef6bddf10394e187fc186e | import io
import signal
import subprocess
import sys
import time
import traceback
global cl
class Colorizer(object):
RED = "\033[31;1m"
GREEN = "\033[32;1m"
YELLOW = "\033[33;1m"
CYAN = "\033[36;1m"
RESET = "\033[0m"
NEWLINE = "\n"
@classmethod
def _colorize(cls, string, color):
return getattr(cls, color) + string + cls.RESET + cls.NEWLINE
@classmethod
def red(cls, string):
return cls._colorize(string, "RED")
@classmethod
def green(cls, string):
return cls._colorize(string, "GREEN")
@classmethod
def yellow(cls, string):
return cls._colorize(string, "YELLOW")
@classmethod
def cyan(cls, string):
return cls._colorize(string, "CYAN")
class FakeStdout(io.StringIO):
"""Fake class to mimic stdout. We can't just use io.StringIO because we need
to fake the ability to write binary files to sys.stdout.buffer (thus this
class has a "buffer" attribute that behaves the same way).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = io.BytesIO()
def getvalue(self):
"""
If self.buffer has a non-unicode value, return that value.
Otherwise, decode the self.buffer value and append it
to self.getvalue().
This is because this function is mimicking the behavior of `sys.stdout`.
`sys.stdout` can be read as either a string or bytes.
When a string is written to `sys.stdout`, it returns a string when doing `getvalue()`.
When bytes are written to `sys.stdout` (by writing to `sys.stdout.buffer`),
it returns bytes when doing `getvalue()`. The reason we need to account for this
case is that there are tests in which a binary file is uploaded, then it is
printed out (by writing to `sys.stdout.buffer`), and then the test reads what's
printed out and makes sure it matches the original file.
"""
try:
buffer_value = self.buffer.getvalue().decode()
except UnicodeDecodeError:
return self.buffer.getvalue()
return super().getvalue() + buffer_value
def run_command(
args,
expected_exit_code=0,
max_output_chars=1024,
env=None,
include_stderr=False,
binary=False,
force_subprocess=False,
cwd=None,
):
# We import the following imports here because codalab_service.py imports TestModule from
# this file. If we kept the imports at the top, then anyone who ran codalab_service.py
# would also have to install all the dependencies that BundleCLI and CodaLabManager use.
from codalab.lib.bundle_cli import BundleCLI
from codalab.lib.codalab_manager import CodaLabManager
def sanitize(string, max_chars=256):
# Sanitize and truncate output so it can be printed on the command line.
# Don't print out binary.
if isinstance(string, bytes):
string = '<binary>'
if len(string) > max_chars:
string = string[:max_chars] + ' (...more...)'
return string
# If we don't care about the exit code, set `expected_exit_code` to None.
print(">>", *map(str, args), sep=" ", end="\t")
sys.stdout.flush()
try:
kwargs = dict(env=env)
if not binary:
kwargs = dict(kwargs, encoding="utf-8")
if include_stderr:
kwargs = dict(kwargs, stderr=subprocess.STDOUT)
if cwd:
kwargs = dict(kwargs, cwd=cwd)
if not force_subprocess:
# In this case, run the Codalab CLI directly, which is much faster
# than opening a new subprocess to do so.
stderr = io.StringIO() # Not used; we just don't want to redirect cli.stderr to stdout.
stdout = FakeStdout()
cli = BundleCLI(CodaLabManager(), stdout=stdout, stderr=stderr)
try:
cli.do_command(args[1:])
exitcode = 0
except SystemExit as e:
exitcode = e.code
output = stdout.getvalue()
else:
output = subprocess.check_output([a.encode() for a in args], **kwargs)
exitcode = 0
except subprocess.CalledProcessError as e:
output = e.output
exitcode = e.returncode
except Exception:
output = traceback.format_exc()
exitcode = 1
if expected_exit_code is not None and exitcode != expected_exit_code:
colorize = Colorizer.red
extra = ' BAD'
else:
colorize = Colorizer.cyan
extra = ''
print(
colorize(" (exit code %s, expected %s%s)" % (exitcode, expected_exit_code, extra)).strip(),
end="\t",
)
print(sanitize(output, max_output_chars).strip())
sys.stdout.flush()
assert (
expected_exit_code == exitcode
), f'Exit codes don\'t match: got {exitcode}, expected {expected_exit_code}'
return output.rstrip()
def cleanup(cl, tag, should_wait=True):
'''
Removes all bundles and worksheets with the specified tag.
:param cl: str
Path to CodaLab command line.
:param tag: str
Specific tag use to search for bundles and worksheets to delete.
:param should_wait: boolean
Whether to wait for a bundle to finish running before deleting (default is true).
:return:
'''
print('Cleaning up bundles and worksheets tagged with {}...'.format(tag))
# Clean up tagged bundles
bundles_removed = 0
while True:
# Query 1000 bundles at a time for removal
query_result = run_command([cl, 'search', 'tags=%s' % tag, '.limit=1000', '--uuid-only'])
if len(query_result) == 0:
break
for uuid in query_result.split('\n'):
if should_wait:
# Wait until the bundle finishes and then delete it
run_command([cl, 'wait', uuid])
run_command([cl, 'rm', uuid, '--force'])
bundles_removed += 1
# Clean up tagged worksheets
worksheets_removed = 0
while True:
query_result = run_command([cl, 'wsearch', 'tag=%s' % tag, '.limit=1000', '--uuid-only'])
if len(query_result) == 0:
break
for uuid in query_result.split('\n'):
run_command([cl, 'wrm', uuid, '--force'])
worksheets_removed += 1
print('Removed {} bundles and {} worksheets.'.format(bundles_removed, worksheets_removed))
class Timer:
"""
Class that uses signal to interrupt functions while they're running
if they run for longer than timeout_seconds.
Can also be used to time how long functions take within its context manager.
Used for the timing tests.
"""
def __init__(self, timeout_seconds=1, handle_timeouts=True, uuid=None):
"""
A class that can be used as a context manager to ensure that code within that context manager times out
after timeout_seconds time and which times the execution of code within the context manager.
Parameters:
timeout_seconds (float): Amount of time before execution in context manager is interrupted for timeout
handle_timeouts (bool): If True, do not timeout, only return the time taken for execution in context manager.
uuid (str): Uuid of bundles running within context manager.
"""
self.handle_timeouts = handle_timeouts
self.timeout_seconds = timeout_seconds
self.uuid = None
def handle_timeout(self, signum, frame):
timeout_message = "Timeout ocurred"
if self.uuid:
timeout_message += " while waiting for %s to run" % self.uuid
raise TimeoutError(timeout_message)
def time_elapsed(self):
return time.time() - self.start_time
def __enter__(self):
self.start_time = time.time()
if self.handle_timeouts:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.setitimer(signal.ITIMER_REAL, self.timeout_seconds, self.timeout_seconds)
# now, reset itimer.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
def __exit__(self, type, value, traceback):
self.time_elapsed = time.time() - self.start_time
if self.handle_timeouts:
signal.alarm(0)
|
983,197 | 716d8868a4182dbf2f469028b6155444079e0311 | # -*- coding: utf-8 -*-
# @Time : 2020/9/16 0016 22:18
# @Author : zhengwei
# @File : 18.py
# @Software: PyCharm
class Solution:
def fourSum(self, nums, target):
nums = sorted(nums)
results = []
for i in range(len(nums)):
if nums[i] == nums[i-1] and i!= 0:
continue
for j in range(i+1, len(nums)):
if nums[j] == nums[j-1] and j!=i+1:
continue
left = j + 1
right = len(nums) - 1
while left < right and left <= len(nums) - 1:
if nums[i] + nums[j] + nums[left] + nums[right] > target:
right = right - 1
elif nums[i]+ nums[j] + nums[left] + nums[right] < target:
left = left + 1
else:
results.append([nums[i], nums[j], nums[left], nums[right]])
right = right - 1
left = left + 1
while right > left and nums[right] == nums[right + 1]:
right = right - 1
while right > left and nums[left] == nums[left - 1]:
left = left + 1
return results
test = Solution()
result = test.fourSum([-1,-5,-5,-3,2,5,0,4], -7)
print(result)
|
983,198 | e489c7d2ee9ebe97b0bef9f41398cf330e31f759 | #!/usr/bin/python
from PIL import Image
for n in range(881,8664):
f = "{:07d}.jpg".format(n)
print(f)
o = Image.open("original/{}".format(f)).convert('RGBA')
g = Image.open("el-greco/{}".format(f)).convert('RGBA')
x = Image.blend(o, g, alpha=0.5)
x = x.convert('RGB')
x.save("staging/{}".format(f))
|
983,199 | 478546ba299769e8b87b463f05b74300fb0645ce | from Lib.base_functions import *
from config import *
from elements import mix
def __mix_create():
element_click('link_text', mix['mix_link_text'])
sleep(0.5)
element_click('link_text', mix['mix_define_link_text'])
sleep(1)
repeat = start_mum
while repeat < end_num:
element_click('link_text', mix['new_link_text'])
sleep(1)
element_send_keys('xpath', mix['name_xpath'], '合成品-' + str(repeat + 1) + '-' + str(run_time))
element_click('xpath', mix['sort_xpath'])
element_click('link_text', mix['sort_item_link_text'])
element_click('xpath', mix['unit_xpath'])
element_click('link_text', mix['unit_item_link_text'])
element_send_keys('xpath', mix['price_xpath'], '26')
element_click('xpath', mix['cinema_xpath'])
element_click('xpath', mix['cinema_check_xpath'])
element_click('link_text', mix['cinema_ok_btn_link_text'])
element_click('xpath', mix['save_btn_xpath'])
sleep(0.3)
element_click('xpath', mix['sure_btn_xpath'])
sleep(0.3)
element_click('xpath', mix['know_btn_xpath'])
sleep(1)
repeat = repeat + 1
def __mix_edit():
element_click('link_text', mix['mix_link_text'])
sleep(0.5)
element_click('link_text', mix['mix_define_link_text'])
sleep(1)
repeat = start_mum
while repeat < end_num:
i = 0
while i < int(repeat / 10):
element_click('class', mix['next_page_class'])
i = i + 1
sleep(0.3)
sleep(0.5)
edit_btn = b.find_elements_by_link_text(mix['edit_btn_link_text'])
print(edit_btn)
print('repeat: ' + str(repeat) + ' repeat%10: ' + str(repeat % 10))
if is_workflow_on:
edit_btn[0].click()
else:
edit_btn[repeat % 10].click()
sleep(1)
element_click('xpath', mix['save_btn_xpath'])
sleep(0.3)
element_click('xpath', mix['know_btn_xpath'])
sleep(1)
repeat = repeat + 1
def __mix_match_edit():
element_click('link_text', mix['mix_link_text'])
sleep(0.5)
element_click('link_text', mix['mix_match_link_text'])
sleep(1)
repeat = start_mum
while repeat < end_num:
i = 0
while i < int(repeat / 10):
element_click('class', mix['next_page_class'])
i = i + 1
sleep(0.3)
sleep(0.5)
edit_btn = b.find_elements_by_link_text(mix['edit_btn_link_text'])
print(edit_btn)
print('repeat: ' + str(repeat) + ' repeat%10: ' + str(repeat % 10))
if is_workflow_on:
edit_btn[0].click()
else:
edit_btn[repeat % 10].click()
sleep(1)
element_click('xpath', mix['add_material_xpath'])
sleep(0.5)
element_click('xpath', mix['checkbox_xpath'])
element_click('link_text', mix['material_ok_btn_link_text'])
sleep(0.5)
element_send_keys('xpath', mix['material_num_xpath'], '1')
element_click('xpath', mix['material_save_btn_xpath'])
sleep(0.3)
element_click('xpath', mix['know_btn_xpath'])
sleep(1)
repeat = repeat + 1
def mix_create():
# 登陆
login()
# 跳转到原材料
goto_goods_management()
# 创建原材料
__mix_create()
# 结束运行
finish()
def mix_edit():
# 登陆
login()
# 跳转到原材料
goto_goods_management()
# 编辑原材料
__mix_edit()
# 结束运行
finish()
def mix_match_edit():
# 登陆
login()
# 跳转到原材料
goto_goods_management()
# 编辑原材料
__mix_match_edit()
# 结束运行
finish()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.