index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
992,600 | b590b925684fa75d9d136910d8f573472d13eefb | import tensorflow as tf
import random
import cv2
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.contrib.learn.python.learn.datasets.mnist import extract_images, extract_labels
tf.set_random_seed(777)
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
from keras.utils import np_utils
from PIL import Image
from numpy import *
#해당 주석에서 말하는 단계는 카톡에 올려준 사진을 기준으로 한다!
#입력 이미지 shape(28x28)
img_rows, img_cols = 28,28
path1 = 'C:/Users/gang3/Desktop/JPG-PNG-to-MNIST-NN-Format/JPG-PNG-to-MNIST-NN-Format/training-images/' #path of folder of images
path2 = 'C:/Users/gang3/Desktop/JPG-PNG-to-MNIST-NN-Format/JPG-PNG-to-MNIST-NN-Format/training-images-resized/' #path of folder to save images
listing = os.listdir(path1)
num_samples=size(listing)
print('데이터 전처리 시작~')
for file in listing:
im = Image.open(path1 + '/' + file)
img = im.resize((img_rows,img_cols))
gray = img.convert('L')
gray.save(path2 +'/' + file, "bmp")
print('데이터 전처리중~ing'+file)
imlist = os.listdir(path2)
im1 = array(Image.open('C:/Users/gang3/Desktop/JPG-PNG-to-MNIST-NN-Format/JPG-PNG-to-MNIST-NN-Format/training-images' + '/'+ imlist[0])) # open one image to get size
m,n = im1.shape[0:2]#height,width
imnbr = len(imlist)
immatrix = array([array(Image.open('C:/Users/gang3/Desktop/JPG-PNG-to-MNIST-NN-Format/JPG-PNG-to-MNIST-NN-Format/training-images-resized'+ '/' + im2)).flatten()
for im2 in imlist],'f')
#글자 레이블 세팅
label=np.ones((num_samples,),dtype = int)
#원본 데이터 레이블링
label[0:7996]=0#ㄱ
label[7997:12825]=1#ㄲ
label[12826:21145]=2#ㄴ
label[21146:21370]=3#ㄷ
label[21371:21545]=4#ㄸ
label[21546:21710]=5#ㄹ
label[21711:21804]=6#ㅁ
label[21805:21960]=7#ㅂ
label[21961:22104]=8#ㅅ
label[22105:22151]=9#ㅆ
label[22152:22407]=10#ㅇ
label[22408:22487]=11#ㅍ
label[22488:22544]=12#ㅋ
label[22545:22600]=13#ㅌ
label[22601:22706]=14#ㅎ
label[22707:26780]=15#ㅏ
label[26781:33868]=16#ㅓ
label[33869:33938]=17#ㅔ
label[33939:33987]=18#ㅖ
label[33988:38722]=19#ㅗ
label[38723:38895]=20#ㅘ
label[39986:38953]=21#ㅛ
label[38954:43355]=22#ㅜ
label[43356:43491]=23#ㅠ
label[43492:43657]=24#ㅡ
label[43658:43725]=25#ㅢ
label[43726:43774]=26#ㅣ
data,Label = shuffle(immatrix,label, random_state=2)
train_data = [data,Label]
img=immatrix[167].reshape(img_rows,img_cols)
print('데이터 작업 완료!')
#batch_size to train
batch_size = 32
# 10가지 중에서 무엇인지 one-hot encoding으로 출력
nb_classes = 27# number of epochs to train
nb_epoch = 10
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
(X, y) = (train_data[0],train_data[1])
# STEP 1: split X and y into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)
X_train = X_train.reshape(X_train.shape[0],img_rows*img_cols)
X_test = X_test.reshape(X_test.shape[0],img_rows*img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
#학습률
learning_rate = 0.001
#전체 학습 횟수
training_epochs =50
#학습 할 때 얼만큼의 데이터만큼 잘라서 학습할지 정한다.
batch_size = 100
#Dropout의 크기를 정하기 위한 변수
keep_prob = tf.placeholder(tf.float32)
print('학습 레이어 세팅\n');
# 비트맵 이미지의 Input Layer
X = tf.placeholder(tf.float32, [None, 784])
#비트맵 이미지를 재조정(인자 설명 : 몇개의 이미지 미정, 가로 28,세로28, 색깔 1개)
X_img = tf.reshape(X, [-1, 28, 28, 1])
Y = tf.placeholder(tf.float32, [None, 27])
#1st Trial
#필터의 크기 : 3x3, 1=색깔, 32=필터 개수, stddeb=미분률?
#W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))
#
##1단계 작업 시작
##첫번째 Conv Layer
#L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
#L1 = tf.nn.relu(L1)
#
##첫번째 Max_pooling Layer-->비트맵 값중에서 가장 큰것을 색출-->이미지 크기 줄이고
##Subsampling하는 효과!
##ksize=2x2
##strides=2x2이기 때문에 이미지의 크기가 반으로 줄어들게 된다.
#L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],
# strides=[1, 2, 2, 1], padding='SAME')
##dropout 적용
#L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
## Conv -> (?, 28, 28, 32)
## Pool -> (?, 14, 14, 32)
##1단계 처리 완료 결과!(윗부분)
#
#
#
#
##2단계 시작->현재 이미지 상태 (n개, 14,14,32개 필터)
##필터 : 3x3, 32는 1단계에서의 필터 개수와 동일해야 한다. 64는 현재 정하는 필터의 개수
#W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
#
#
##두번째 Conv Layer
#L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
#L2 = tf.nn.relu(L2)
##두번째 MaxPooling Layer, 필터:2x2, 간격이동:2칸씩==>이미지의 크기 2배 줄어든다.
#L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1],
# strides=[1, 2, 2, 1], padding='SAME')
#L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
##두번째 처리 결과
## Conv ->(?, 14, 14, 64)
## Pool ->(?, 7, 7, 64)
#
#
#
## L3 ImgIn shape=(?, 7, 7, 64)
##3단계 시작
##필터 : 3x3, 64는 2단계에서의 필터 개수와 동일해야 한다.
##128은 현재 정하는 필터의 개수
#W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))
#
##3단계 Conv Layer
#L3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')
#L3 = tf.nn.relu(L3)
##3단게 MaxPooling Layer, 필터크기 :2x2, 간격이동 :2칸씩-->이미지의 크기 2배로 축소
#L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
# 1, 2, 2, 1], padding='SAME')
#L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
#
#
#W3_3 = tf.Variable(tf.random_normal([3, 3, 128, 256], stddev=0.01))
#
##3단계 Conv Layer
#L3_3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')
#L3_3 = tf.nn.relu(L3)
##3단게 MaxPooling Layer, 필터크기 :2x2, 간격이동 :2칸씩-->이미지의 크기 2배로 축소
#L3_3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
# 1, 2, 2, 1], padding='SAME')
#L3_3 = tf.nn.dropout(L3, keep_prob=keep_prob)
#
#
##현재 3단계 처리 완료된 상황의 픽셀을 벡터형으로 늘어 놓기(Fully Connected Layer 1)
#L3_flat = tf.reshape(L3, [-1, 256 * 4 * 2])
#
##입력 노드 개수 : 128*4*4, 출력 노드 개수 :625개
#W4 = tf.Variable(tf.random_normal([256 * 4 * 2, 625],stddev=0.01))
#
##bias도 출력노드 개수와 같이 설정
#b4 = tf.Variable(tf.random_normal([625]))
#L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
#L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
#L4_flat=tf.reshape(L4,[-1,625])
##Fully Connected Layer 1 완료
#
#
##Fully Connected Layer 2 시작
#W5 = tf.Variable(tf.random_normal([625, 124],stddev=0.01))
#
#b5 = tf.Variable(tf.random_normal([124]))#bias=출력 노드 개수
#L5=tf.nn.relu(tf.matmul(L4_flat,W5)+b5)
#L5 = tf.nn.dropout(L5, keep_prob=keep_prob)
#L5_flat=tf.reshape(L5,[-1,124])
#
#W7 = tf.Variable(tf.random_normal([124,62],stddev=0.01))
#
#b7 = tf.Variable(tf.random_normal([62]))#bias=출력 노드 개수
#L7=tf.nn.relu(tf.matmul(L5_flat,W7)+b7)
#
#
##Fully Connected Layer 3
#W6=tf.Variable(tf.random_normal([62,27],stddev=0.01))
#b6=tf.Variable(tf.random_normal([27]))
#logits = tf.matmul(L7, W6) + b6
#2nd Trial
W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))
#1단계 작업 시작
#첫번째 Conv Layer
L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
L1 = tf.nn.relu(L1)
#첫번째 Max_pooling Layer-->비트맵 값중에서 가장 큰것을 색출-->이미지 크기 줄이고
#Subsampling하는 효과!
#ksize=2x2
#strides=2x2이기 때문에 이미지의 크기가 반으로 줄어들게 된다.
L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#dropout 적용
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
# Conv -> (?, 28, 28, 32)
# Pool -> (?, 14, 14, 32)
#1단계 처리 완료 결과!(윗부분)
#2단계 시작->현재 이미지 상태 (n개, 14,14,32개 필터)
#필터 : 3x3, 32는 1단계에서의 필터 개수와 동일해야 한다. 64는 현재 정하는 필터의 개수
W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
#두번째 Conv Layer
L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
L2 = tf.nn.relu(L2)
#두번째 MaxPooling Layer, 필터:2x2, 간격이동:2칸씩==>이미지의 크기 2배 줄어든다.
L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
#두번째 처리 결과
# Conv ->(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
# L3 ImgIn shape=(?, 7, 7, 64)
#3단계 시작
#필터 : 3x3, 64는 2단계에서의 필터 개수와 동일해야 한다.
#128은 현재 정하는 필터의 개수
W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))
#3단계 Conv Layer
L3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')
L3 = tf.nn.relu(L3)
#3단게 MaxPooling Layer, 필터크기 :2x2, 간격이동 :2칸씩-->이미지의 크기 2배로 축소
L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
1, 2, 2, 1], padding='SAME')
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
W3_3 = tf.Variable(tf.random_normal([3, 3,128,256], stddev=0.01))
#3단계 Conv Layer
L3_3 = tf.nn.conv2d(L3, W3_3, strides=[1, 1, 1, 1], padding='SAME')
L3_3 = tf.nn.relu(L3_3)
#3단게 MaxPooling Layer, 필터크기 :2x2, 간격이동 :2칸씩-->이미지의 크기 2배로 축소
L3_3 = tf.nn.max_pool(L3_3, ksize=[1, 2, 2, 1], strides=[
1, 2, 2, 1], padding='SAME')
L3_3 = tf.nn.dropout(L3_3, keep_prob=keep_prob)
#현재 3단계 처리 완료된 상황의 픽셀을 벡터형으로 늘어 놓기(Fully Connected Layer 1)
L3_flat = tf.reshape(L3_3, [-1, 256 * 2 * 2])
#입력 노드 개수 : 128*4*4, 출력 노드 개수 :625개
W4 = tf.Variable(tf.random_normal([256 * 2 * 2, 625],stddev=0.01))
#bias도 출력노드 개수와 같이 설정
b4 = tf.Variable(tf.random_normal([625]))
L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
L4_flat=tf.reshape(L4,[-1,625])
#Fully Connected Layer 1 완료
#Fully Connected Layer 2 시작
W5 = tf.Variable(tf.random_normal([625, 124],stddev=0.01))
b5 = tf.Variable(tf.random_normal([124]))#bias=출력 노드 개수
L5=tf.nn.relu(tf.matmul(L4_flat,W5)+b5)
L5 = tf.nn.dropout(L5, keep_prob=keep_prob)
L5_flat=tf.reshape(L5,[-1,124])
W7 = tf.Variable(tf.random_normal([124,62],stddev=0.01))
b7 = tf.Variable(tf.random_normal([62]))#bias=출력 노드 개수
L7=tf.nn.relu(tf.matmul(L5_flat,W7)+b7)
#Fully Connected Layer 3
W6=tf.Variable(tf.random_normal([62,27],stddev=0.01))
b6=tf.Variable(tf.random_normal([27]))
logits = tf.matmul(L7, W6) + b6
#비용 및 최적화 변수 선언 및 초기화
#보통 AdamOptimizer사용 많이 한다(GradientDescentOptimizer보다)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=Y,logits=logits))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#세션 선언 및 실행하기(실행 전 초기화 필수!)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#모델 학습 시키기
#위에서 정한 Epoch값 만큼 전체 데이터 순환한다.
print('이미지 학습 시작')
for epoch in range(100):
avg_cost = 0
#total_batch = (전체 트레이닝 데이터 개수 / 미니 배치 크기)
total_batch = int(X_train.shape[0]/batch_size)
for i in range(total_batch):
#트레이닝 데이터 설정
batch_x=np.array([X_train[i].tolist()])
batch_y=np.array([Y_train[i].tolist()])
#값 설정 및 dropout 값 설정(0.7-->70%의 Weight만 사용한다는 뜻!)
feed_dict = {X: batch_x, Y: batch_y, keep_prob: 0.5}
#세션 Run!
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
#평균 비용
avg_cost += c / total_batch
#현재 학습 상황 보여주는 출력문들!
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('학습 완료!')
#얼마나 정확한지 정확도 분석하기
#logits의 결과와, Y값(0~9) 비교
#여기에서는 keep_prob:1==>모든 weight를 사용한다.
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('학습 정확도:', sess.run(accuracy, feed_dict={X: X_test, Y:Y_test, keep_prob: 1}))
##테스트용 데이터 중에서 하나 랜덤으로 선택한다.
#r = random.randint(0, len(Y_test) - 1)
##선택한 값 출력
#print("선택한 레이블 값: ", sess.run(tf.argmax(X_test[r:r + 1], 1)))
##학습 모델이 예측한 값 출력
##여기에서는 keep_prob:1==>모든 weight를 사용한다.
#print("모델이 예측한 값: ", sess.run(
# tf.argmax(logits, 1), feed_dict={X: Y_test[r:r + 1], keep_prob: 1}))
#for i in range(10):
# n=np.random.randint(647)
# plt.imshow(X_test[n].reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()
# print('정답 인덱스 : ',sess.run(tf.argmax(logits, 1), feed_dict={X: X_test[n:n + 1],Y:Y_test[n:n+1], keep_prob: 1}))
# print('\n')
#
#numbers1="[0] [1] [2] [3] [4] [5]"
#char1=" ㄱ ㄲ ㄴ ㄷ ㄸ ㄹ"
#numbers2="[6] [7] [8] [9] [10] [11]"
#char2=" ㅁ ㅂ ㅅ ㅆ ㅇ ㅊ"
#numbers3="[12] [13] [14] [15] [16] [17]"
#char3=" ㅋ ㅌ ㅍ ㅎ ㅏ ㅑ"
#numbers4="[18] [19] [20] [21] [22] [23]"
#char4=" ㅓ ㅔ ㅖ ㅗ ㅘ ㅚ"
#numbers5="[24] [25] [26] [27] [28] [29]"
#char5=" ㅛ ㅜ ㅠ ㅡ ㅢ ㅣ"
#
#
#for i in range(3):
# n=np.random.randint(len(X_test))
# plt.imshow(X_test[n].reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()
# print('<<Labels List>>')
# print(numbers1+'\n'+char1)
# print(numbers2+'\n'+char2)
# print(numbers3+'\n'+char3)
# print(numbers4+'\n'+char4)
# print(numbers5+'\n'+char5)
# print('정답 인덱스 : ',sess.run(tf.argmax(logits, 1), feed_dict={X: X_test[n:n + 1],Y:Y_test[n:n+1], keep_prob: 1}))
# print('\n')
|
992,601 | 80ac3d434e9905c9ef8ead496829296c56cf712a | #!/usr/bin/env python3
import random
import requests
import argparse
from requests import Request
from xml.etree import ElementTree as ET
from Crypto.PublicKey import RSA
from Crypto.Util import number
from Crypto.Cipher import PKCS1_OAEP
from common import e64bs, e64s, d64s, d64b, d64sb, hexlify
########################################
class Soapifier(object):
soap_env_tmpl = '''<?xml version="1.0" encoding="UTF-8"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<soapenv:Body>
<{0} xmlns="http://ctkipservice.rsasecurity.com">
<AuthData >{2}</AuthData>
<ProvisioningData>{3}</ProvisioningData>
<{1}>{4}</{1}>
</{0}>
</soapenv:Body>
</soapenv:Envelope>'''
def __init__(self, url, auth):
self.url = url
self.auth = auth
def make_ClientRequest(self, action, provisioning_data, body):
outer, inner = 'ClientRequest', 'Request'
soap = self.soap_env_tmpl.format(
outer, inner, self.auth,
e64s(provisioning_data), e64s(body))
return Request('POST', self.url, data=soap, headers={
'Authorization': self.auth,
'SOAPAction': action,
'content-type': 'application/vnd.otps.ctk-kip'})
def parse_ServerResponse(self, response):
outer, inner = 'ServerResponse', 'Response'
x = ET.fromstring(response.content)
fault = x.find('.//{http://schemas.xmlsoap.org/soap/envelope/}Fault')
if fault is not None:
faultcode = fault.find('faultcode').text
faultstring = fault.find('faultstring').text
raise RuntimeError(faultcode, faultstring)
assert x.tag == '{http://schemas.xmlsoap.org/soap/envelope/}Envelope'
r = x.find('.//{http://ctkipservice.rsasecurity.com}' + outer)
ad = r.find('{http://ctkipservice.rsasecurity.com}AuthData')
#assert ad.text == self.auth == response.headers.get('Authorization')
pd = r.find('{http://ctkipservice.rsasecurity.com}ProvisioningData')
rr = r.find('{http://ctkipservice.rsasecurity.com}' + inner)
return ET.fromstring(d64s(''.join(pd.itertext()))), ET.fromstring(d64s(''.join(rr.itertext())))
########################################
pd='''<?xml version="1.0"?><ProvisioningData><Version>5.0.2.440</Version><Manufacturer>RSA Security Inc.</Manufacturer><FormFactor/></ProvisioningData>'''
req1_tmpl='''<ClientHello xmlns="http://www.rsasecurity.com/rsalabs/otps/schemas/2005/11/ct-kip#" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Version="1.0"><SupportedKeyTypes xmlns=""><Algorithm xsi:type="xsd:anyURI">http://www.rsasecurity.com/rsalabs/otps/schemas/2005/09/otps-wst#SecurID-AES</Algorithm></SupportedKeyTypes><SupportedEncryptionAlgorithms xmlns=""><Algorithm xsi:type="xsd:anyURI">http://www.w3.org/2001/04/xmlenc#rsa-1_5</Algorithm></SupportedEncryptionAlgorithms><SupportedMACAlgorithms xmlns=""><Algorithm xsi:type="xsd:anyURI">http://www.rsasecurity.com/rsalabs/otps/schemas/2005/11/ct-kip#ct-kip-prf-aes</Algorithm></SupportedMACAlgorithms></ClientHello>'''
req2_tmpl='''<?xml version="1.0" encoding="UTF-8"?><ClientNonce xmlns="http://www.rsasecurity.com/rsalabs/otps/schemas/2005/11/ct-kip#" Version="1.0" SessionID="{session_id}"><EncryptedNonce xmlns="">{encrypted_client_nonce}</EncryptedNonce><Extensions xmlns="" xmlns:ds="http://www.w3.org/2000/09/xmldsig#" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Extension xmlns="" xmlns:ct-kip="http://www.rsasecurity.com/rsalabs/otps/schemas/2005/12/ct-kip#" xmlns:ds="http://www.w3.org/2000/09/xmldsig#" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><Data>{server_nonce}</Data></Extension></Extensions></ClientNonce>'''
def main():
p = argparse.ArgumentParser()
p.add_argument('-v', '--verbose', action='count')
p.add_argument('url')
p.add_argument('activation_code')
args = p.parse_args()
client = CtKipClient(args.url, args.activation_code, args.verbose)
session_id, server_nonce, pubk = client.startService()
print("Got server nonce and RSA pubkey:\n{}\n{}".format(
hexlify(server_nonce), pubk.exportKey('PEM').decode()))
key_id, token_id, key_exp, mac = client.serverFinished(session_id, server_nonce)
print("Got key ID, token ID, key expiration date, and MAC:"
"\nKeyID: {}\nTokenID: {}\nExpiration: {}\nMAC: {}".format(
key_id, token_id, key_exp, mac))
class CtKipClient(object):
def __init__(self, url, activation_code, verbose=0):
self.s = requests.session()
self.s.headers['user-agent'] = 'HTTPPOST'
self.soap = Soapifier(url, activation_code)
self.server_pubkey = None
self.verbose = verbose
def startService(self):
# send initial request
req1 = self.soap.make_ClientRequest('StartService', pd, req1_tmpl)
# get session ID, server key, and server nonce in response
raw_res1 = self.s.send(self.s.prepare_request(req1))
if self.verbose:
print(raw_res1.text)
pd_res1, res1 = self.soap.parse_ServerResponse(raw_res1)
if self.verbose:
print(res1)
session_id = res1.attrib['SessionID']
k = res1.find('.//{http://www.w3.org/2000/09/xmldsig#}RSAKeyValue')
mod = number.bytes_to_long(d64sb(k.find(
'{http://www.w3.org/2000/09/xmldsig#}Modulus').text))
exp = number.bytes_to_long(d64sb(k.find(
'{http://www.w3.org/2000/09/xmldsig#}Exponent').text))
pubk = RSA.construct((mod,exp))
pl = res1.find('.//Payload')
server_nonce = d64sb(pl.find('Nonce').text)
self.server_pubkey = pubk
return (session_id, server_nonce, pubk)
def serverFinished(self, session_id, server_nonce, client_none=None):
# generate and encrypt client nonce
if client_none is None:
client_nonce = random.getrandbits(16*8)
cipher = PKCS1_OAEP.new(self.server_pubkey)
client_nonce = client_nonce.to_bytes(16, byteorder='big')
encrypted_client_nonce = cipher.encrypt(client_nonce)
print("Generated client nonce:\n\tplaintext: {}\n\tencrypted: {}".format(
hexlify(client_nonce), hexlify(encrypted_client_nonce)))
# send second request
req2_filled = req2_tmpl.format(
session_id=session_id, encrypted_client_nonce=e64bs(encrypted_client_nonce), server_nonce=e64bs(server_nonce))
if self.verbose:
print(req2_filled)
req2 = self.soap.make_ClientRequest('ServerFinished', pd, req2_filled)
raw_res2 = self.s.send(self.s.prepare_request(req2))
if self.verbose:
print(raw_res2)
pd_res2, res2 = self.soap.parse_ServerResponse(raw_res2)
if self.verbose:
print(res2)
# get stuff from response
key_id = d64b(res2.find('TokenID').text)
token_id = d64b(res2.find('KeyID').text)
key_exp = res2.find('KeyExpiryDate').text
mac = d64b(res2.find('Mac').text)
return (key_id, token_id, key_exp, mac)
if __name__ == "__main__":
main()
|
992,602 | 6ce2d8574efefdb435c784ad97c145769de08900 | from logging import getLogger
from bugyocloudclient import BugyoCloudClient, BugyoCloudClientError
from bugyocloudclient.config import CONTENT_ENCODING
from bugyocloudclient.models.authinfo import AuthInfo
from bugyocloudclient.utils.urlproducer import produce_url
from requests import Response
logger = getLogger(__name__)
class Authenticate(object):
""" 認証します """
def call(self, client: BugyoCloudClient, token: str, auth_info: AuthInfo) -> str:
""" RedirectURLを返します。 """
url = self.__get_url(client)
data = self.__create_data(token, auth_info)
logger.debug('Trying to POST. url=%s data=%s', url, data)
resp = client.session.post(url=url, data=data)
resp.raise_for_status()
return self.__parse_response(resp)
def __create_data(self, token: str, auth_info: AuthInfo) -> Response:
return {
'btnLogin': None,
'OBCID': auth_info.login_id,
'Password_d1': None,
'Password_d2': None,
'Password_d3': None,
'Password': auth_info.password,
'__RequestVerificationToken': token,
'X-Requested-With': 'XMLHttpRequest'
}
def __parse_response(self, response: Response) -> None:
json = response.json()
if 'RedirectURL' in json:
return json['RedirectURL']
else:
content = response.content
logger.critical('Response is not to be expected. content=%s', content)
raise BugyoCloudClientError('Response is not to be expected.')
def __get_url(self, client: BugyoCloudClient) -> str:
key = type(self).__name__
return produce_url(key, client.param)
|
992,603 | 96d7a7c34dc7d23f6764ffcceffe0db0f3b884e0 | # Import dependencies
import pandas as pd
#import unidecode
# Import data
wine_data_df = pd.read_csv("Data/winemag-data-130k-v2.csv")
print(wine_data_df.shape)
wine_data_df.head()
## Select and keep only US data
# Only keep rows where country = US
US_wine_data_df = wine_data_df.loc[wine_data_df["country"] == "US"]
print(US_wine_data_df.shape)
US_wine_data_df.head()
# Drop columns that are not useful: Unnamed: 0, country, taster_name, taster_twitter_handle
US_wine_data_df = US_wine_data_df.drop(columns=["Unnamed: 0", "designation", "region_2","country","taster_name", "taster_twitter_handle"], axis=1)
# Keep California, Washington, and Oregon
WestCoast_wine_data = US_wine_data_df.loc[US_wine_data_df.province.isin(["California","Washington", "Oregon"])]
print(WestCoast_wine_data.shape)
WestCoast_wine_data.head()
## Evaluate data and clean
WestCoast_wine_data_title = WestCoast_wine_data
# Remove the region within the title
WestCoast_wine_data_title ['title'] = WestCoast_wine_data_title['title'].str.replace(r"\(.*\)","")
# Remove the state from region
WestCoast_wine_data_title ['region_1'] = WestCoast_wine_data_title['region_1'].str.replace(r"\(.*\)","")
# Create a region list
region_list = list(WestCoast_wine_data_title['region_1'])
print(len(region_list))
region_list
# Look at dataframe info again.
WestCoast_wine_data_title.info()
# Drop rows with NaN. Max rows US =50259
# "price" only has 50046 rows.
WestCoast_wine_data_title = WestCoast_wine_data_title.dropna()
print(WestCoast_wine_data_title.shape)
WestCoast_wine_data_title.head(20)
## Binning Variety, Region
variety_counts = WestCoast_wine_data_title.variety.value_counts()
variety_counts
# Visualize the value counts of variety
variety_counts.plot.density()
replace_variety = list(variety_counts[variety_counts <= 300].index)
# Replace in dataframe
for variety in replace_variety:
WestCoast_wine_data_title.variety = WestCoast_wine_data_title.variety.replace(variety,"Other")
# Check to make sure binning was successful
WestCoast_wine_data_title.variety.value_counts()
# Remove varieties where variety count <= 300
WestCoast_wine_data_title = WestCoast_wine_data_title[WestCoast_wine_data_title.variety != "Other"]
print(WestCoast_wine_data_title.shape)
WestCoast_wine_data_title.head()
# Determine value_counts for region binning
region_counts = WestCoast_wine_data_title.region_1.value_counts()
list(region_counts)
# Visualize the value counts of variety
region_counts.plot.density()
# Reduce regions list using same cut-off that was used for machine learning model.
replace_region = list(region_counts[region_counts <= 300].index)
# Replace in dataframe
for region in replace_region:
WestCoast_wine_data_title.region_1 = WestCoast_wine_data_title.region_1.replace(region,"Other")
# Check to make sure binning was successful
WestCoast_wine_data_title.region_1.value_counts()
# Remove regions where region count <= 300
wine_data_df = WestCoast_wine_data_title[WestCoast_wine_data_title.region_1 != "Other"]
print(wine_data_df.shape)
wine_data_df.head()
## Categorize Wines
# Create wine categories/types column
wine_data_df["type"] = wine_data_df["variety"]
# Categorize varieties
rose = ["Rosé"]
red = ["Pinot Noir", "Cabernet Sauvignon", "Syrah", "Red Blend", "Zinfandel", "Merlot","Bordeaux-style Red Blend",
"Cabernet Franc", "Rhône-style Red Blend", "Petite Sirah", "Malbec", "Grenache", "Sangiovese", "Tempranillo"]
white = ["Chardonnay", "Sauvignon Blanc","Riesling","Pinot Gris","Viognier", "Sparkling Blend", "Gewürztraminer",
"Pinot Grigio", "White Blend"]
wine_data_df = wine_data_df.replace({"type": white},"White")
wine_data_df = wine_data_df.replace({"type": rose},"Pink")
wine_data_df = wine_data_df.replace({"type": red},"Red")
wine_data_df.head()
# Import Dependencies for Database
from config import password
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import psycopg2
db_string = f"postgresql+psycopg2://postgres:" + password + "@127.0.0.1:5434/WineEnthusiast"
engine = create_engine(db_string)
wine_data_df.to_sql(name='us_wine', con=engine, method='multi') |
992,604 | 84df62a0d8d4e994c01b0ce2982fa066c38e46b9 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ModuleList',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'ID')),
('module_name', models.CharField(max_length=100)),
('module_fun_desc', models.CharField(max_length=500)),
('module_fun_ext', models.CharField(max_length=5000)),
],
options={
'db_table': 'module_list',
},
),
migrations.CreateModel(
name='ServerAppCateg',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'ID')),
('app_categ_name', models.CharField(max_length=100)),
],
options={
'db_table': 'server_app_categ',
},
),
migrations.CreateModel(
name='ServerFunCateg',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'ID')),
('server_categ_name', models.CharField(max_length=100)),
],
options={
'db_table': 'server_fun_categ',
},
),
migrations.CreateModel(
name='ServerList',
fields=[
('server_name', models.CharField(max_length=50, serialize=False, primary_key=True)),
('server_extip', models.CharField(max_length=45)),
('server_intip', models.CharField(max_length=45)),
('server_os', models.CharField(max_length=50)),
('server_app_id', models.ForeignKey(to='moadmin.ServerAppCateg')),
],
options={
'db_table': 'server_list',
},
),
migrations.AddField(
model_name='serverappcateg',
name='server_categ_id',
field=models.ForeignKey(to='moadmin.ServerFunCateg'),
),
]
|
992,605 | 5567592a9218f43f82ffc5fcdf2f1aec17be622d | from aux_funcs import *
CF=pickle.load(open(datadir+'OSNAP2016recovery/pickles/xarray/CF_M_2014-2016_hourlyTSD_1903.pickle','rb'))
dat=io.loadmat(datadir+'OSNAP2016recovery/LS/LSgridded_TS.mat')
dat
eddy_stats=io.loadmat(datadir+'OSNAP2016recovery/Eddies/LS_eddy_stats.mat')
eddy_stats
eddy_date = array([datetime.datetime.fromordinal(int(matlab_datenum)) + datetime.timedelta(days=matlab_datenum%1) - datetime.timedelta(days = 366) for matlab_datenum in eddy_stats['time'][0]])
loc_cycl=eddy_stats['loc_cycl']
shape(loc_cycl)
dat.keys()
dat['date']=array([datetime.datetime.fromordinal(int(matlab_datenum)) + datetime.timedelta(days=matlab_datenum%1) - datetime.timedelta(days = 366) for matlab_datenum in dat['time'][0]])
ii=5
nnind=(~isnan(dat['D'][:,ii,3000]))
datemat=tile(dat['date'],[sum(nnind),1])
dlist=[datetime.datetime(2014,9,1),datetime.datetime(2015,2,1),datetime.datetime(2015,9,1),datetime.datetime(2016,3,1),datetime.datetime(2016,8,1)]
matplotlib.rcParams['ps.fonttype'] = 42
rcParams['mathtext.fontset'] = 'cm'
Dmat=dat['D'][nnind,ii,:]
Dbot=Dmat[-1,:]
Dmat[-1,isnan(Dbot)]=nanmean(Dbot)
sum(isnan(Dmat[-1,:]))
figure(figsize=(16,4))
contourf(datemat,-Dmat,dat['T'][nnind,ii,:],cmap=cm.RdYlBu_r,levels=arange(1.5,6.1,0.1),extend='both')
colorbar(ticks=range(0,7,1),label='Potential temperature [$^\circ$ C]')
contour(datemat,-Dmat,dat['R'][nnind,ii,:],colors='k',levels=arange(27.6,27.9,0.05))
plot(eddy_date[(loc_cycl[-2,:]>0)==True],0*ones(sum(loc_cycl[-2,:]>0)),'ko')
ylabel('depth [m]')
title('LS'+str(ii+1))
for dd in range(len(dlist[:-1])):
xlim(dlist[dd],dlist[dd+1])
savefig('../figures/Eddies/hov/Tdenhov_abs_LS'+str(ii+1)+'_d'+str(dd)+'.png',bbox_inches='tight',dpi=300)
savefig('../figures/Eddies/hov/Tdenhov_abs_LS'+str(ii+1)+'_d'+str(dd)+'.pdf',bbox_inches='tight')
# savefig('../figures/Eddies/hov/Tdenhov_abs_LS'+str(ii+1)+'_d'+str(dd)+'.ps',bbox_inches='tight')
# def np64ToDatetime(DA):
# return [datetime.datetime.utcfromtimestamp((dd-np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')) for dd in DA]
#
# dcheck=array(np64ToDatetime(dat[6].date))
#
# datemat=tile(dcheck,[8,1])
# for ii in range(6,9):
# #
# # figure(figsize=(16,4))
# # for jj in range(len(dat[ii].dpvec)-2):
# # scatter(dcheck,dat[ii].depth[jj,:].T,c=(dat[ii].ptmp[jj,:].values-mean(dat[ii].ptmp[jj,:]).values),s=2,vmin=-1,vmax=1,cmap=cm.RdYlBu_r);
# # plot(eddy_date[(loc_cycl[ii-5,:]>0)==True],-2000*ones(sum(loc_cycl[ii-5,:]>0)),'k.')
# # colorbar(label='Potential temperature anomaly [$^\circ$ C]')
# # title('CF'+str(ii))
# # savefig('../figures/Eddies/hov/Thov_anom_CF'+str(ii)+'.png',bbox_inches='tight',dpi=300)
# #
# # figure(figsize=(16,4))
# # for jj in range(len(dat[ii].dpvec)-2):
# # scatter(dcheck,dat[ii].depth[jj,:].T,c=(dat[ii].sigma0[jj,:].values-mean(dat[ii].sigma0[jj,:]).values),s=2,vmin=-0.1,vmax=0.1,cmap=cm.BrBG)#,vmin=-5,vmax=5,cmap=cm.RdBu_r);
# # plot(eddy_date[(loc_cycl[ii-5,:]>0)==True],-2000*ones(sum(loc_cycl[ii-5,:]>0)),'k.')
# # colorbar(label='$\sigma_0$ anomaly [kg m$^{-3}$]')
# # title('CF'+str(ii))
# # savefig('../figures/Eddies/hov/Dhov_anom_CF'+str(ii)+'.png',bbox_inches='tight',dpi=300)
# #
# # figure(figsize=(16,4))
# # for jj in range(len(dat[ii].dpvec)-2):
# # scatter(dcheck,dat[ii].depth[jj,:].T,c=(dat[ii].ptmp[jj,:].values),s=2,vmin=0,vmax=7,cmap=cm.RdYlBu_r);
# # plot(eddy_date[(loc_cycl[ii-5,:]>0)==True],-2000*ones(sum(loc_cycl[ii-5,:]>0)),'k.')
# # colorbar(label='Potential temperature [$^\circ$ C]')
# # title('CF'+str(ii))
# # for dd in range(len(dlist[:-1])):
# # xlim(dlist[dd],dlist[dd+1])
# # savefig('../figures/Eddies/hov/Thov_abs_CF'+str(ii)+'_d'+str(dd)+'.png',bbox_inches='tight',dpi=300)
#
# # figure(figsize=(16,4))
# # for jj in range(len(dat[ii].dpvec)-2):
# # scatter(dcheck,dat[ii].depth[jj,:].T,c=(dat[ii].sigma0[jj,:].values),s=2,cmap=cm.YlGnBu,vmin=27.3,vmax=28)#,vmin=-5,vmax=5,cmap=cm.RdBu_r);
# # plot(eddy_date[(loc_cycl[ii-5,:]>0)==True],-2000*ones(sum(loc_cycl[ii-5,:]>0)),'k.')
# # colorbar(label='$\sigma_0$ [kg m$^{-3}$]')
# # title('CF'+str(ii))
# # savefig('../figures/Eddies/hov/Dhov_abs_CF'+str(ii)+'.png',bbox_inches='tight',dpi=300)
#
#
#
# eddy_date[0]
#
#
#
#
#
# tvec=arange(0,len(dcheck)/24,1./24)
# ttest=dat[6].ptmp[-1,:]
# [t_fit,t_std,t_period]=fitsin(tvec,ttest,mean(ttest).values,60,std(ttest).values,365.25)
#
# ttest.plot()
# plot(dcheck,t_fit)
#
# plot(dcheck,ttest-t_fit)
# axhline(0)
#
# XXXXXXXXXXXXXXXXXXXXXXXXXX
#
#
# daily=pickle.load(open(datadir+'OSNAP2016recovery/pickles/xarray/CF_xarray_notid_1809lpfilt_noextrap_wMLPV.pickle','rb'))
#
#
#
# for ii in range(5,8):
# # figure(figsize=(14,3))
# # pcolor(daily.date,daily.depth[250:],daily.temperature[ii,250:,:])
# # plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# # ylabel('depth [m]')
# # ylim(2e3,500)
# # colorbar(label='potential temperature [$^\circ$C]')
# # title('CF'+str(ii+1))
# # savefig('../figures/Eddies/hov/Thov_abs_dailygridded_CF'+str(ii+1)+'.png',bbox_inches='tight',dpi=300)
# #
# # figure(figsize=(14,3))
# # pcolor(daily.date,daily.depth[250:],daily.temperature[ii,250:,:]-daily.temperature[ii,250:,:].mean(dim='date'),vmin=-1,vmax=1,cmap=cm.RdYlBu_r)
# # plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# # ylabel('depth [m]')
# # ylim(2e3,500)
# # colorbar(label='potential temperature anomaly [$^\circ$C]')
# # title('CF'+str(ii+1))
# # savefig('../figures/Eddies/hov/Thov_anom_dailygridded_CF'+str(ii+1)+'.png',bbox_inches='tight',dpi=300)
#
# figure(figsize=(14,3))
# pcolor(daily.date,daily.depth[250:],daily['potential density'][ii,250:,:],cmap=cm.YlGnBu)
# plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# ylabel('depth [m]')
# ylim(2e3,500)
# colorbar(label='$\sigma_0$ [kg m$^{-3}$]')
# title('CF'+str(ii+1))
# savefig('../figures/Eddies/hov/Dhov_abs_dailygridded_CF'+str(ii+1)+'.png',bbox_inches='tight',dpi=300)
#
# # figure(figsize=(14,3))
# # pcolor(daily.date,daily.depth[250:],daily['potential density'][ii,250:,:]-daily['potential density'][ii,250:,:].mean(dim='date'),vmin=-0.1,vmax=0.1,cmap=cm.RdYlBu_r)
# # plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# # ylabel('depth [m]')
# # ylim(2e3,500)
# # colorbar(label='$\sigma_0$ anomaly [kg m$^{-3}$]')
# # title('CF'+str(ii+1))
# # savefig('../figures/Eddies/hov/Dhov_anom_dailygridded_CF'+str(ii+1)+'.png',bbox_inches='tight',dpi=300)
#
#
# eddy_date
#
#
# XXXXXXXXXXXXXXXXXXXXXXXXXX
#
# ii=5
# dd=1
#
#
# pcolor(daily.date,daily.depth[250:],daily.temperature[ii,250:,:],zorder=1)
#
# help(contour)
# contour(daily['potential density'][ii,250:,:],colors='k')
#
# figure(figsize=(14,3))
# pcolor(daily.date,daily.depth[250:],daily.temperature[ii,250:,:],zorder=1)
# colorbar(label='potential temperature [$^\circ$C]')
# contour(daily.date,daily.depth[250:],daily['potential density'][ii,250:,:],colors='k',zorder=2)
# plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# ylabel('depth [m]')
# ylim(2e3,500)
# title('CF'+str(ii+1))
# for dd in range(len(dlist[:-1])):
# xlim(dlist[dd],dlist[dd+1])
# savefig('../figures/Eddies/hov/Thov_abs_dailygridded_CF'+str(ii)+'_d'+str(dd)+'.png',bbox_inches='tight',dpi=300)
#
# for ii in range(5,8):
# figure(figsize=(14,3))
# pcolor(daily.date,daily.depth[250:],daily.temperature[ii,250:,:],zorder=1)
# colorbar(label='potential temperature [$^\circ$C]')
# contour(daily.date,daily.depth[250:],daily['potential density'][ii,250:,:],colors='k',zorder=2)
# plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# ylabel('depth [m]')
# ylim(2e3,500)
# title('CF'+str(ii+1))
# for dd in range(len(dlist[:-1])):
# xlim(dlist[dd],dlist[dd+1])
# savefig('../figures/Eddies/hov/Thov_abs_dailygridded_CF'+str(ii)+'_d'+str(dd)+'.png',bbox_inches='tight',dpi=300)
# # savefig('../figures/Eddies/hov/Thov_abs_dailygridded_CF'+str(ii+1)+'.png',bbox_inches='tight',dpi=300)
# #
# # figure(figsize=(14,3))
# # pcolor(daily.date,daily.depth[250:],daily.temperature[ii,250:,:]-daily.temperature[ii,250:,:].mean(dim='date'),vmin=-1,vmax=1,cmap=cm.RdYlBu_r)
# # plot(eddy_date[(loc_cycl[ii-4,:]>0)==True],600*ones(sum(loc_cycl[ii-4,:]>0)),'k.')
# # ylabel('depth [m]')
# # ylim(2e3,500)
# # colorbar(label='potential temperature anomaly [$^\circ$C]')
# # title('CF'+str(ii+1))
# # savefig('../figures/Eddies/hov/Thov_anom_dailygridded_CF'+str(ii+1)+'.png',bbox_inches='tight',dpi=300)
|
992,606 | 9220b34f43e6f03391d54a097c14814800439a1f | import json
def lambda_handler(event, context):
return {"taskInput": event}
|
992,607 | 67f6c2f4073c2a4ffef62f1f778c4dfa9f8186a5 | import random
import pytest
from Treap import *
size = 300
def f(x):
return (x*5)%17
def test_insert():
# passes if node inserts "take" (number of nodes inserted equals __size)
t = Treap()
for i in range(size):
#t.insert(i, str(i), f(i))
t.insert(i, str(i))
assert t.getSize() == i+1
assert t.getSize() == size
def test_find():
# passes if all nodes inserted can be found in treap
t = Treap()
for i in range(size):
#t.insert(i, str(i), f(i))
t.insert(i, str(i))
t.traverse("in")
for i in range(size-1, -1, -1):
assert t.find(i)
assert not t.find(i+size)
def test_delete():
# passes if nodes removed can no longer be found, and __size decreases with each deletion
t = Treap()
for i in range(size):
#t.insert(i, str(i), f(i))
t.insert(i, str(i))
for i in range(size//2, (size*3)//2):
j = i%size
s = t.getSize()
t.delete(j)
assert not t.find(j)
assert t.getSize() == s-1
pytest.main(["-v"]) |
992,608 | 35ea8f265e276b808a92c80294e812116ea0dcd8 | # -*- coding:Latin-1 -*- # Permet d'utiliser les caractères français.
#Import des modules nécessaires
from xml.dom.minidom import parse
import urllib, string, arcpy, time
#Fonction pour ouvrir et lire le fichier XML.
def xmldescription(url):
xmlfilename =urllib.urlopen(url) #Ouverture du fichier via le web.
dom= parse(xmlfilename) #Lecture du fichier
Description = dom.getElementsByTagName('summary') #Recherche l'élément 'summary'
nbSummary = len(Description) #Nombre de summary
condition = False
NoSummary = 1
while condition == False:
if string.count(Description[NoSummary].firstChild.nodeValue ,u"Température:"):
TxtNodeValue = Description[NoSummary].firstChild.nodeValue #Retourne le X ieme enfant du noeud soit le sommaire HTML
condition = True
NoSummary = NoSummary + 1
if NoSummary == nbSummary:
TxtNodeValue = "nil"
condition = True
# Traitement du noeud XML
ListeB = string.split(TxtNodeValue, "<b>") # Division des informations du noeud à '<b>' retourne un liste de plusieurs éléments.
if len(ListeB) > 1: # S'il y a de l'information dans la variable
for elements in ListeB: # Pour chaque élément de la liste.
if string.count(elements, "Temp") > 0: # S'il y a le texte 'TEMP' dans l'élément regardé.
TextTemp = string.split(elements, "</b>")[1] # Division pour extraire la température
NombreTemp = string.replace(TextTemp, "°C <br/>", "") # Efface par remplacement les éléments indésirables
else:
NombreTemp = "-9999" # Si aucune température, mettre la valeur -9999.
return string.strip(string.replace(NombreTemp, ",", ".")) # Retour de la valeur avec remplacement des , par des .
N = 0
while N < 23:
fc = r"D:\GIT\Cartedetemperature_Python\BD_Meteo.gdb\SationsMeteoQuebec" # Classe d'entités où se trouve l'information.
fields = ('ville', 'url', 'Temperature') # Les champs à lire.
with arcpy.da.UpdateCursor(fc, fields) as cursor: # Initialisation du curseur.
for row in cursor:
print row[0] + ":" + xmldescription("http://meteo.gc.ca/rss/city/" + row[1] ) # Concaténation de la donnée du champ url et le texte pour faire le lien url. De plus envoi des infos dans la fonction xmldescription().
row[2] = float(xmldescription("http://meteo.gc.ca/rss/city/" + row[1]))
cursor.updateRow(row) # Mise à jour du champ de température de la classe d'entités.
if arcpy.CheckOutExtension("Spatial") =="CheckedOut": # Vérification si le module Spatial Analyst est activé.
arcpy.env.workspace = r"D:\GIT\Cartedetemperature_Python\BD_Meteo.gdb" # Définition de l'espace de travail pour les commandes suivantes.
arcpy.MakeFeatureLayer_management(fc, "PointsMeteo_lyr", '"Temperature" > -9999') # Enlève les -9999 via une requête de définition pour l'interpolation.
OutIMG = arcpy.sa.Idw("PointsMeteo_lyr", "Temperature","","","VARIABLE 8", "limits") # Interpolation IDW des valeurs de température.
arcpy.env.overwriteOutput = 1 # Écrase le fichier s'il existe
arcpy.Clip_management(OutIMG, "-8881010.42143521 5620161.08275039 -6356953.62302241 9003041.17894863", "IDWTemperature", "ProvinceQc","-3.402823e+038","ClippingGeometry","NO_MAINTAIN_EXTENT") # Clip selon la Province.
mxd = arcpy.mapping.MapDocument(r"D:\GIT\Cartedetemperature_Python\meteo.mxd") # Définition du mxd.
#arcpy.mapping.ExportToPDF(mxd, "c:\\temp\\Meteo.pdf") # Export en PDF
arcpy.mapping.ExportToJPEG(mxd, "C:\\Users\\mcouture\\Dropbox\\CarteMétéoPQ\\IMG" + str(N) + "_" + time.strftime('%H') + "h.jpg")
arcpy.Delete_management(OutIMG) # Efface l'image de l'interpolation.
arcpy.Delete_management("PointsMeteo_lyr") # Efface le fichier LYR
print N
time.sleep(3600)
|
992,609 | ccbe85ed7165a52b442c687edb5e85910b74c92b | """ num=30
nombre="jairo"
print(num,type(num))
print(nombre,type(nombre))
def mensaje(msg):
print(msg)
mensaje("Mi primer programa en Python")
mensaje("Mi segundo programa en Python") """
class Sintaxis:
instancia=0 # atributo de clase
# _init_ Metodoconstructor que se ejecuta cuando se instancia la clase cuo objetivo es creando
# e inicializar los atributos de la clase. Self es un objetivo que representa la case creada
def __init__(self,dato="llamando al constructor2"):
self.frase=dato # atributo de instancia
Sintaxis.instancia = Sintaxis.instancia+1
def usoVariables(self):
edad, _peso = 21, 80
nombres = 'Jairo Llongo'
car = nombres[0]
Tipo_sexo = 'M'
Civil = True
# tuplas = () son colecciones de datos de cualquier tipo inmutables
usuario=()
usuario=('dllongo', '1234', 'llongo@gmail.com')
#print(usuario[2], nombres [7])
#usuario [3]="Castillo"
#usu = usuario [2]
# # listas = [] colecciones mutables
materias = []
materias = ['Estructura de datos', 'PHP', 'POO']
aux=materias[1]
materias[1]="Python"
materias.append("Go")
#print(materias,aux, materias[1] )
# diccionario = {} selecciones de objetos clave:valor tipo json
docente={}
docente = {'nombre': 'Jairo', 'edad': 21, 'activo': True}
edad = docente['edad']
docente['edad']=22
docente['carrera']='IS'
#print(docente,edad,docente['edad'])
# print(usuario,usuario[0],usuario[0:2],usuario[-1])
# print(nombres,nombres[0],nombres[0:2],nombres[-1])
print(materias,materias[2:],materias[:1],materias[::],materias[-2:])
# # presentacion con format
# print("""Mi nombre es {}, tengo {}
# años""".format(nombres, edad))
# print("Sintaxis antes de instancia: {}".format(Sintaxis.instancia))
ejer1 = Sintaxis() # Instancia la clase sintaxis y crea el objeto ejer1(copia de la clase)
# print("Sintaxis de ejer1 es: {}".format(Sintaxis.instancia))
# ejer2 = Sintaxis("instancia2")
# print(ejer1.frase)
# print("Sintaxis de ejer2 es: {}".format(Sintaxis.instancia))
# print("Sintaxis nuevamente de ejer1 es: {}".format(Sintaxis.instancia))
# print(ejer2.frase)
ejer1.usoVariables()
|
992,610 | 880e0eacecc1640016a4f9442ee3f4c55eb739a0 | from random import choice
class RandomWalk():
def __init__(self,num_point = 5000) -> None:
self.num_point =num_point
self.x =[0]
self.y =[0]
self.get_step()
def fill_walk(self):
while len(self.x) < self.num_point:
x_step = self.get_step()
y_step = self.get_step()
y_direction = choice([1,-1])
y_distance = choice([0,1,2,3,4])
y_step = y_direction*y_distance
if x_step ==0 and y_step ==0:
continue
next_x = self.x[-1]+x_step
next_y = self.y[-1]+y_step
self.x.append(next_x)
self.y.append(next_y)
def get_step(self):
direction = choice([1,-1])
distance = choice([0,1,2,3,4])
step = direction*distance
return step
|
992,611 | 2a16f5f3be99082ed7a17eb29062341e936184a7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from scribus import *
margins = (36, 36, 0, 0)
# Dictionary of logos' height/width aspect ratios. It is used to position the school logo
# There's no way to programatically adjust frame to image.
# The Python Scribus uses doesn't have any image utilities like PIL so I could not
# figure out a way to determine the image's aspect ratio programatically. :|
# There is a program I wrote called Logo_aspect_ratio.py that takes all the images files
# in a directory and generates a CSV file of their width and height. The program is located in
# the Women directory. After you run that program, you can run this one.
school_logos_dict = {}
with open("./School_Logos/filesizes_gif.csv") as f:
for line in f:
current_line_list = line.split(",")
school_logos_dict[current_line_list[0]] = float(current_line_list[2]) / float (current_line_list[1])
conf_logos_dict = {}
with open("./Conference_Logos/filesizes_png.csv") as f:
for line in f:
current_line_list = line.split(",")
conf_logos_dict[current_line_list[0]] = float(current_line_list[2]) / float (current_line_list[1])
players_list = []
players_names_list = []
with open("club_900_assists_photo.csv") as f:
next(f) # skip headers row
for line in f:
current_line_list = line.split(",")
full_name = current_line_list[0].split()
first_name = full_name[0]
first_last_name = full_name[1]
if (full_name[1] == "de" or full_name[1] == "La"):
player_name = full_name[0] + " " + full_name[1] + " " + full_name[2]
if (player_name in players_names_list):
player_name_count = sum([1 for plyr in players_names_list if plyr == player_name])
image_filename = "./" + "Club_900_assists/" + first_name + "_" + full_name[1] + "_" + full_name[2] + "_" + str(player_name_count + 1) + ".jpg"
else:
image_filename = "./" + "Club_900_assists/" + first_name + "_" + full_name[1] + "_" + full_name[2] + ".jpg"
else:
player_name = first_name + " " + first_last_name
if (player_name in players_names_list):
player_name_count = sum([1 for plyr in players_names_list if plyr == player_name])
image_filename = "./" + "Club_900_assists/" + first_name + "_" + first_last_name + "_" + str(player_name_count + 1) + ".jpg"
else:
image_filename = "./" + "Club_900_assists/" + first_name + "_" + first_last_name + ".jpg"
player_school = current_line_list[1]
school_state = current_line_list[2]
if current_line_list[2] == "Washington D.C.": school_state = "Washington, D.C."
player_conf = current_line_list[3]
school_division = current_line_list[4]
player_stat_1 = current_line_list[13]
player_photo = current_line_list[24]
single_player_list = [player_name, image_filename, player_school, school_state, player_conf, school_division, player_stat_1, player_photo]
players_list.append(single_player_list)
players_names_list.append(player_name)
if newDocument(PAPER_LETTER, margins, PORTRAIT, 1, UNIT_POINTS, NOFACINGPAGES, FIRSTPAGERIGHT, 1):
defineColor("NJCAA Blue", 217, 168, 55, 94)
defineColor("NJCAA Gray", 0, 0, 0, 40)
defineColor("NJCAA Gray 2", 0, 0, 0, 153)
defineColor("NJCAA Blue 2", 221, 168, 15, 30)
defineColor("Darker Gray", 0, 0, 0, 64)
num_players = len(players_list)
if (num_players % 8) == 0:
num_pages = (num_players / 8)
else:
num_pages = (num_players / 8) + 1
player_count = 0
for page in range(num_pages):
top_rect = createRect(0, 0, 612, 72)
setFillColor("NJCAA Blue", top_rect); setLineColor("NJCAA Blue", top_rect)
bottom_rect = createRect(0, 756, 612, 36)
setFillColor("NJCAA Blue", bottom_rect); setLineColor("NJCAA Blue", bottom_rect)
center_rect = createRect(0, 72, 612, 684)
setFillColor("NJCAA Gray", center_rect); setLineColor("NJCAA Gray", center_rect)
page_header = createText(36, 9, 540, 80)
page_title = "Wholesale Distributors\n"
page_subtitle = "Players with 900+ assists"
insertText(page_title, -1, page_header)
setFont("OLD SPORT 02 ATHLETIC NCV Regular", page_header); setFontSize(24, page_header)
title_length = getTextLength(page_header)
subtitle_length = len(page_subtitle)
insertText(page_subtitle, -1, page_header)
selectText(title_length, subtitle_length, page_header)
setFont("Playball Regular", page_header); setFontSize(26, page_header)
setLineSpacing(30, page_header); setTextColor("White", page_header); setTextAlignment(ALIGN_CENTERED, page_header)
years1 = createText(0, 24.5, 36, 36); setText("2019" + "\n" + "-" + "\n" + "2020", years1)
years2 = createText(576, 24.5, 36, 36); setText("2019" + "\n" + "-" + "\n" + "2020", years2)
setTextColor("White", years1); setTextColor("White", years2)
setFont("OLD SPORT 02 ATHLETIC NCV Regular", years1); setFontSize(11, years1); setTextAlignment(ALIGN_CENTERED, years1)
setFont("OLD SPORT 02 ATHLETIC NCV Regular", years2); setFontSize(11, years2); setTextAlignment(ALIGN_CENTERED, years2)
setLineSpacing(7, years1); setLineSpacing(7, years2)
for row in range(2):
for col in range(4):
current_player = players_list[player_count]
photo_x = 36 + col * (129 + 8)
# photo_y = 36 + 20 + row * (250 + 100)
photo_y = 72 + 32 + row * (294 + 32)
player_photo = createImage(photo_x, photo_y, 129, 215)
loadImage(current_player[1], player_photo); setScaleImageToFrame(1, 1, player_photo)
photo_credit = "Photo: " + current_player[7].replace("\n", "")
photo_credit_length = len(photo_credit)
photo_credit_width = 3.0 * photo_credit_length + 2.5
photo_credit_banner = createRect(photo_x + 129.0 - photo_credit_width, photo_y + 215 - 8, photo_credit_width, 8)
setFillColor("NJCAA Blue", photo_credit_banner); setLineColor("None", photo_credit_banner); setFillTransparency(0.70, photo_credit_banner)
photo_credit_text = createText(photo_x + 129.0 - photo_credit_width, photo_y + 215 - 8 + 1.5, photo_credit_width, 10)
setText(photo_credit, photo_credit_text)
setTextColor("White", photo_credit_text); setFont("Asimov Print C", photo_credit_text); setFontSize(6, photo_credit_text)
setTextAlignment(ALIGN_CENTERED, photo_credit_text)
division_x = photo_x + 5
if (current_player[5].replace("\n","") in ["NCAA DI", "NCAA DII", "NCAA DIII", "NJCAA DI", "NJCAA DII"]):
division_y = photo_y + 5
player_division = createImage(division_x, division_y, 25, 25)
else:
division_y = photo_y + 10
player_division = createImage(division_x, division_y, 25, 12)
loadImage("./Division_logos/" + current_player[5].replace(" ", "_").replace("\n","") + "_logo.png", player_division); setScaleImageToFrame(1, 1, player_division)
banner_x = photo_x
banner_y = photo_y + 215
player_banner_height = 45
player_banner = createRect(banner_x, banner_y, 129, player_banner_height)
setFillColor("White", player_banner); setLineColor("None", player_banner)
# academic_logo = createImage(banner_x + 2, banner_y + 2, 40, 40)
# loadImage("./All_Academic/All_Academic_logo.png", academic_logo); setScaleImageToFrame(1, 1, academic_logo)
logo_name = current_player[2].replace(" ", "_")
if (school_logos_dict[logo_name] < 0.7):
logo_width = 33.0
logo_height = min(logo_width * school_logos_dict[logo_name], 28)
else:
logo_height = 28.0
logo_width = min(logo_height / school_logos_dict[logo_name], 33)
logo_ypos = (banner_y + (player_banner_height - logo_height) / 2.0)
school_logo = createImage(banner_x + 0, logo_ypos, logo_width, logo_height)
loadImage("./School_Logos/" + logo_name + ".gif", school_logo); setScaleImageToFrame(1, 1, school_logo)
stat_banner_width = 38
stat_banner_height = 24
ellipse_width = 38
ellipse_height = 24
stat_banner_ellipse = createEllipse(banner_x + 85, (photo_y + stat_banner_height - ellipse_height / 2), ellipse_width, ellipse_height)
setFillColor("White", stat_banner_ellipse); setLineColor("None", stat_banner_ellipse)
stat_banner = createRect(banner_x + 85, photo_y, 38, 24)
setFillColor("White", stat_banner); setLineColor("None", stat_banner)
stat_text = createText(banner_x + 85, photo_y + 7, 38, 24)
insertText(current_player[6] + "\n" + "assists", -1, stat_text)
setFont("News of the World Wide Italic", stat_text); setFontSize(12, stat_text)
setLineSpacing(10, stat_text); setTextAlignment(ALIGN_CENTERED, stat_text); setTextColor("NJCAA Blue", stat_text)
vocales_acentos = ["Á", "É", "Í", "Ó", "Ú", "Ñ"]
if any(x in unicode(current_player[0]).upper() for x in vocales_acentos): player_name_ypos = banner_y + 2
else: player_name_ypos = banner_y + 4
if (len(current_player[2]) > 24): player_name_ypos += 2
else: player_name_ypos += 6
player_name = createText(banner_x + 28 + 5 + 1, player_name_ypos - 1, 95, 40)
insertText(unicode(current_player[0]).upper() + "\n", -1, player_name)
setFont("Asimov Print C", player_name); setFontSize(9, player_name)
name_length = getTextLength(player_name)
player_school = current_player[2]
school_length = len(player_school) + 1
insertText(unicode(player_school).upper() + "\n", -1, player_name)
selectText(name_length, school_length, player_name)
setFont("OLD SPORT 02 ATHLETIC NCV Regular", player_name)
selectText(name_length, len(player_school), player_name); setFontSize(6.2, player_name)
school_state = current_player[3]
insertText(school_state, -1, player_name)
selectText(name_length + school_length, len(school_state), player_name)
setFont("Playball Regular", player_name)
selectText(name_length + school_length, len(school_state), player_name); setFontSize(9, player_name)
setTextColor("NJCAA Blue", player_name)
setLineSpacing(9, player_name)
setTextAlignment(ALIGN_CENTERED, player_name)
player_conf_background_height = 34.0
player_conf_background = createRect(banner_x + 0.5, banner_y + player_banner_height, 129, player_conf_background_height)
setFillColor("Darker Gray", player_conf_background); setLineColor("Darker Gray", player_conf_background)
player_conf_img = current_player[4].replace(" ", "_")
if (conf_logos_dict[player_conf_img] < 0.75): # C-USA and ASUN are very wide
player_conf_logo_w = 33.0
player_conf_logo_h = min(player_conf_logo_w * conf_logos_dict[player_conf_img], 32.0)
else:
player_conf_logo_h = 32.0
player_conf_logo_w = min(player_conf_logo_h / conf_logos_dict[player_conf_img], 33.0)
conf_logo = createImage(banner_x + 0, banner_y + player_banner_height + (player_conf_background_height - player_conf_logo_h) / 2.0, player_conf_logo_w, player_conf_logo_h)
loadImage("./Conference_Logos/" + player_conf_img + ".png", conf_logo); setScaleImageToFrame(1, 1, conf_logo)
offset = 14.0
if len(current_player[4]) > 18 and len(current_player[4]) < 29: offset = 9.0
if len(current_player[4]) > 28: offset = 3.5
player_conf_frame = createText(banner_x + 28 + 5 + 1, banner_y + player_banner_height + offset, 95, 33)
# player_conf_array = current_player[4].split(" ")
# player_conf_array_length = len(player_conf_array)
# if (player_conf_array_length % 2) == 0: split_point = player_conf_array_length / 2
# else: split_point = (player_conf_array_length / 2) + 1
# player_conf = " ".join(player_conf_array[0:split_point]) + "\n" + " ".join(player_conf_array[split_point:])
player_conf = current_player[4]
# player_conf_length = len(player_conf)
insertText(player_conf, -1, player_conf_frame)
setTextColor("NJCAA Blue", player_conf_frame)
setFont("OLD SPORT 02 ATHLETIC NCV Regular", player_conf_frame); setFontSize(8, player_conf_frame)
setLineSpacing(11, player_conf_frame); setTextAlignment(ALIGN_CENTERED, player_conf_frame)
player_count += 1
if player_count == num_players: break
if player_count == num_players: break
if player_count == num_players: break
# if page == 0: break
# right_rect = createRect(576, 36, 36, 720)
# setFillColor("NJCAA Gray", right_rect); setLineColor("NJCAA Gray", right_rect)
# left_rect = createRect(0, 36, 36, 720)
# setFillColor("NJCAA Gray", left_rect); setLineColor("NJCAA Gray", left_rect)
newPage(-1)
|
992,612 | 79db1c848904f3599495a43d7907c41bde530d03 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 27 21:11:27 2016
@author: ORCHISAMA
"""
#Problem - 33
#The fraction 49/98 is a curious fraction, as an inexperienced mathematician in attempting to simplify it may incorrectly believe that 49/98 = 4/8, which is correct, is obtained by cancelling the 9s.
#
#We shall consider fractions like, 30/50 = 3/5, to be trivial examples.
#
#There are exactly four non-trivial examples of this type of fraction, less than one in value, and containing two digits in the numerator and denominator.
#
#If the product of these four fractions is given in its lowest common terms, find the value of the denominator.
#to reduce fractions to their lowest value, divide numerator and denominator by the HCF
def hcf(a,b):
if b == 0:
return a
else:
return hcf(b,a%b)
def checkCondition(num1,num2,den1,den2):
if num1 == den1 and num2 != den2:
return (True, num2, den2)
elif num1 == den2 and num2 != den1:
return (True, num2, den1)
elif num2 == den1 and num1 != den2:
return (True, num1, den2)
elif num2 == den2 and num1 != den1:
return (True, num1, den1)
else:
return (False, )
def checkFraction(num, den):
numdig1 = num%10
numdig2 = num/10
dendig1 = den%10
dendig2 = den/10
res = checkCondition(numdig1, numdig2, dendig1, dendig2)
if res[0] is True:
numdig = res[1]
dendig = res[2]
gcd = hcf(den,num)
den /= gcd
num /= gcd
gcd = hcf(dendig,numdig)
dendig /= gcd
numdig /= gcd
if dendig == den and numdig == num:
return True
else:
return False
else:
return False
strlist = []
for den in range(11,100):
for num in range(10, den):
if num%10 == 0 and den%10 == 0:
continue
else:
if checkFraction(num, den) is True:
strlist.append(str(num)+'/'+str(den))
print strlist
|
992,613 | d2f055c1fdfb6c500e1cd57ab6337d595242ea94 | # 2 Дан список:
# ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']
#
# Необходимо его обработать — обособить каждое целое число (вещественные не трогаем) кавычками
# (добавить кавычку до и кавычку после элемента списка, являющегося числом) и дополнить нулём до двух целочисленных
# разрядов:
# ['в', '"', '05', '"', 'часов', '"', '17', '"', 'минут', 'температура', 'воздуха', 'была', '"', '+05', '"',
# 'градусов']
#
# Сформировать из обработанного списка строку:
# в "05" часов "17" минут температура воздуха была "+05" градусов
#
# Подумать, какое условие записать, чтобы выявить числа среди элементов списка? Как модифицировать это условие
# для чисел со знаком?
# Примечание: если обособление чисел кавычками не будет получаться - можете вернуться к его реализации позже. Главное: дополнить числа до двух разрядов нулём!
some_list1 = ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']
some_list2 = []
for el in some_list1:
if el[-1].isdigit() and int(el) < 10:
el = '"' + el[:-1] + '0' + el[-1] + '"'
some_list2.append(el)
else:
some_list2.append(el)
print(' '.join(some_list2))
|
992,614 | a82c165f29c9324e2e33a0fcddbefcbd3b95bd3c | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Line(BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Has an effect only if `line.color` is set to a numerical array.
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['autocolorscale']
@autocolorscale.setter
def autocolorscale(self, val):
self['autocolorscale'] = val
# cauto
# -----
@property
def cauto(self):
"""
Has an effect only if `line.color` is set to a numerical array
and `cmin`, `cmax` are set by the user. In this case, it
controls whether the range of colors in `colorscale` is mapped
to the range of values in the `color` array (`cauto: true`), or
the `cmin`/`cmax` values (`cauto: false`). Defaults to `false`
when `cmin`, `cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['cauto']
@cauto.setter
def cauto(self, val):
self['cauto'] = val
# cmax
# ----
@property
def cmax(self):
"""
Has an effect only if `line.color` is set to a numerical array.
Sets the upper bound of the color domain. Value should be
associated to the `line.color` array index, and if set,
`line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmax']
@cmax.setter
def cmax(self, val):
self['cmax'] = val
# cmin
# ----
@property
def cmin(self):
"""
Has an effect only if `line.color` is set to a numerical array.
Sets the lower bound of the color domain. Value should be
associated to the `line.color` array index, and if set,
`line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmin']
@cmin.setter
def cmin(self, val):
self['cmin'] = val
# color
# -----
@property
def color(self):
"""
Sets the line color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to `cmin` and
`cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A number that will be interpreted as a color
according to scatter3d.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale and only has an effect if `line.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`line.cmin` and `line.cmax`. Alternatively, `colorscale` may be
a palette name string of the following list: Greys, YlGnBu,
Greens, YlOrRd, Bluered, RdBu, Reds, Blues, Picnic, Rainbow,
Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis,
Cividis
The 'colorscale' property is a colorscale and may be
specified as:
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis']
Returns
-------
str
"""
return self['colorscale']
@colorscale.setter
def colorscale(self, val):
self['colorscale'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['solid', 'dot', 'dash', 'longdash', 'dashdot',
'longdashdot']
Returns
-------
Any
"""
return self['dash']
@dash.setter
def dash(self, val):
self['dash'] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Has an effect only if `line.color` is set to a numerical array.
Reverses the color mapping if true (`cmin` will correspond to
the last color in the array and `cmax` will correspond to the
first color).
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['reversescale']
@reversescale.setter
def reversescale(self, val):
self['reversescale'] = val
# showscale
# ---------
@property
def showscale(self):
"""
Has an effect only if `line.color` is set to a numerical array.
Determines whether or not a colorbar is displayed.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showscale']
@showscale.setter
def showscale(self, val):
self['showscale'] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scatter3d'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Has an effect only if `line.color` is set to a
numerical array. Determines whether the colorscale is a
default palette (`autocolorscale: true`) or the palette
determined by `line.colorscale`. In case `colorscale`
is unspecified or `autocolorscale` is true, the default
palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or
mixed.
cauto
Has an effect only if `line.color` is set to a
numerical array and `cmin`, `cmax` are set by the user.
In this case, it controls whether the range of colors
in `colorscale` is mapped to the range of values in the
`color` array (`cauto: true`), or the `cmin`/`cmax`
values (`cauto: false`). Defaults to `false` when
`cmin`, `cmax` are set by the user.
cmax
Has an effect only if `line.color` is set to a
numerical array. Sets the upper bound of the color
domain. Value should be associated to the `line.color`
array index, and if set, `line.cmin` must be set as
well.
cmin
Has an effect only if `line.color` is set to a
numerical array. Sets the lower bound of the color
domain. Value should be associated to the `line.color`
array index, and if set, `line.cmax` must be set as
well.
color
Sets the line color. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `cmin` and `cmax` if set.
colorscale
Sets the colorscale and only has an effect if
`line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `line.cmin` and `line.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys, YlGnBu, Greens, YlOrRd, Bluered,
RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot,
Blackbody, Earth, Electric, Viridis, Cividis
colorsrc
Sets the source reference on plot.ly for color .
dash
Sets the dash style of the lines.
reversescale
Has an effect only if `line.color` is set to a
numerical array. Reverses the color mapping if true
(`cmin` will correspond to the last color in the array
and `cmax` will correspond to the first color).
showscale
Has an effect only if `line.color` is set to a
numerical array. Determines whether or not a colorbar
is displayed.
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmin=None,
color=None,
colorscale=None,
colorsrc=None,
dash=None,
reversescale=None,
showscale=None,
width=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Line
autocolorscale
Has an effect only if `line.color` is set to a
numerical array. Determines whether the colorscale is a
default palette (`autocolorscale: true`) or the palette
determined by `line.colorscale`. In case `colorscale`
is unspecified or `autocolorscale` is true, the default
palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or
mixed.
cauto
Has an effect only if `line.color` is set to a
numerical array and `cmin`, `cmax` are set by the user.
In this case, it controls whether the range of colors
in `colorscale` is mapped to the range of values in the
`color` array (`cauto: true`), or the `cmin`/`cmax`
values (`cauto: false`). Defaults to `false` when
`cmin`, `cmax` are set by the user.
cmax
Has an effect only if `line.color` is set to a
numerical array. Sets the upper bound of the color
domain. Value should be associated to the `line.color`
array index, and if set, `line.cmin` must be set as
well.
cmin
Has an effect only if `line.color` is set to a
numerical array. Sets the lower bound of the color
domain. Value should be associated to the `line.color`
array index, and if set, `line.cmax` must be set as
well.
color
Sets the line color. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `cmin` and `cmax` if set.
colorscale
Sets the colorscale and only has an effect if
`line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `line.cmin` and `line.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys, YlGnBu, Greens, YlOrRd, Bluered,
RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot,
Blackbody, Earth, Electric, Viridis, Cividis
colorsrc
Sets the source reference on plot.ly for color .
dash
Sets the dash style of the lines.
reversescale
Has an effect only if `line.color` is set to a
numerical array. Reverses the color mapping if true
(`cmin` will correspond to the last color in the array
and `cmax` will correspond to the first color).
showscale
Has an effect only if `line.color` is set to a
numerical array. Determines whether or not a colorbar
is displayed.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Line
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Line"""
)
# Import validators
# -----------------
from plotly.validators.scatter3d import (line as v_line)
# Initialize validators
# ---------------------
self._validators['autocolorscale'] = v_line.AutocolorscaleValidator()
self._validators['cauto'] = v_line.CautoValidator()
self._validators['cmax'] = v_line.CmaxValidator()
self._validators['cmin'] = v_line.CminValidator()
self._validators['color'] = v_line.ColorValidator()
self._validators['colorscale'] = v_line.ColorscaleValidator()
self._validators['colorsrc'] = v_line.ColorsrcValidator()
self._validators['dash'] = v_line.DashValidator()
self._validators['reversescale'] = v_line.ReversescaleValidator()
self._validators['showscale'] = v_line.ShowscaleValidator()
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
v = arg.pop('autocolorscale', None)
self.autocolorscale = autocolorscale if autocolorscale is not None else v
v = arg.pop('cauto', None)
self.cauto = cauto if cauto is not None else v
v = arg.pop('cmax', None)
self.cmax = cmax if cmax is not None else v
v = arg.pop('cmin', None)
self.cmin = cmin if cmin is not None else v
v = arg.pop('color', None)
self.color = color if color is not None else v
v = arg.pop('colorscale', None)
self.colorscale = colorscale if colorscale is not None else v
v = arg.pop('colorsrc', None)
self.colorsrc = colorsrc if colorsrc is not None else v
v = arg.pop('dash', None)
self.dash = dash if dash is not None else v
v = arg.pop('reversescale', None)
self.reversescale = reversescale if reversescale is not None else v
v = arg.pop('showscale', None)
self.showscale = showscale if showscale is not None else v
v = arg.pop('width', None)
self.width = width if width is not None else v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
|
992,615 | 46eb5ff253f82357f8eea285d761a8ef1688b1fc | # from django.shortcuts import render
# # Create your views here.
# from django.contrib.auth.models import User, Group
# from rest_framework import viewsets
# from quickstart.serializers import UserSerializer, GroupSerializer
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from quickstart.models import Quickstart
from quickstart.serializers import QuickstartSerializer
@csrf_exempt
def snippet_list(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
quickstart = Quickstart.objects.all()
serializer = QuickstartSerializer(snippets, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = QuickstartSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def snippet_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
snippet = Quickstart.objects.get(pk=pk)
except Quickstart.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = QuickstartSerializer(snippet)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = QuickstartSerializer(snippet, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
quickstart.delete()
return HttpResponse(status=204) |
992,616 | 2be615648824cd6a97e7cfaaee3beb3ac524e8c5 | import logging
import boto3
import json
import azure.functions as func
from os import environ
from botocore.exceptions import ClientError
def main(req: func.HttpRequest) -> func.HttpResponse:
'''
Returns the result of AWS API call.
Parameters:
req (HttpRequest): Request Parameters
Returns:
func.HttpResponse
'''
logging.info(f'Resource Requested: {func.HttpRequest}')
# Get AWS ID and Key
try:
aws_access_key_id = environ['AWSAccessKeyID']
aws_secret_access_key = environ['AWSSecretAccessKey']
aws_region_name = environ['AWSRegionName']
except KeyError as ke:
logging.error(f'Invalid Settings. {ke.args} configuration is missing.')
return func.HttpResponse(
'Invalid Settings. AWS Access ID/Key configuration is missing.',
status_code=500
)
# Get InstanceId, Filters, MaxResults and NextToken from the request parameters
instanceid = req.params.get('InstanceId')
filters = req.params.get('Filters')
max_results = req.params.get('MaxResults')
next_token = req.params.get('NextToken')
if not (instanceid or filters or max_results or next_token):
try:
req_body = req.get_json()
except ValueError:
pass
else:
instanceid = req_body.get('InstanceId')
filters = req_body.get('Filters')
max_results = req_body.get('MaxResults')
next_token = req_body.get('NextToken')
# Set parameter dictionary based on the request parameters
kwargs = {}
if instanceid:
kwargs['InstanceId'] = instanceid
if filters:
kwargs['Filters'] = filters
if max_results:
kwargs['MaxResults'] = max_results
if next_token:
kwargs['NextToken'] = next_token
if instanceid:
try:
logging.info('Creating Boto3 SSM Client.')
ssm_client = boto3.client(
"ssm",
region_name=aws_region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
try:
logging.info('Calling function describe_instance_patches.')
results = ssm_client.describe_instance_patches(**kwargs)
logging.info('Call to function describe_instance_patches successful.')
# Result returns InstalledTime as datetime.datetime object which is not JSON serializable. Convert datetime.datetime object to string.
if results and ('Patches' in results):
for patch in results['Patches']:
if 'InstalledTime' in patch:
patch['InstalledTime'] = patch['InstalledTime'].strftime("%Y-%m-%d %H:%M:%S")
return func.HttpResponse(
json.dumps(results),
headers = {"Content-Type": "application/json"},
status_code = 200
)
except ssm_client.exceptions.InternalServerError as ex:
logging.error(f"Internal Server Exception: {str(ex)}")
return func.HttpResponse("Internal Server Exception", status_code=500)
except ssm_client.exceptions.InvalidInstanceId as ex:
logging.error(f"Invalid InstanceId Exception: {str(ex)}")
return func.HttpResponse("Invalid InstanceId Exception", status_code=400)
except ssm_client.exceptions.InvalidNextToken as ex:
logging.error(f"Invalid NextToken Exception: {str(ex)}")
return func.HttpResponse("Invalid NextToken Exception", status_code=400)
except ssm_client.exceptions.InvalidFilter as ex:
logging.error(f"Invalid Filter Exception: {str(ex)}")
return func.HttpResponse("Invalid Filter Exception", status_code=400)
except ClientError as ex:
logging.error(f"SSM Client Error: {str(ex)}")
return func.HttpResponse("SSM Client Error", status_code=401)
except Exception as ex:
logging.error(f"Exception Occured: {str(ex)}")
return func.HttpResponse("Internal Server Exception", status_code=500)
else:
return func.HttpResponse(
"Pass InstanceId (required) and (optional) Filters, NextToken, MaxResults parameter(s) in the query string or request body.",
status_code=400
) |
992,617 | fa4ea3a6a7b6bd8d420bd2f6df87eff85bc38154 | import re, copy, os, sys, traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import txt_mixin
#reload(txt_mixin)
from rwkmisc import rwkstr
import pylab_util as PU
from pyp_basics import line, section
from pytexutils import break_at_pipes, OptionsDictFromList
#from IPython.core.debugger import Pdb
import pdb
def CountCurlies(strin):
mystr = rwkstr(strin)
numleft = len(mystr.findall('{'))
numright = len(mystr.findall('}'))
return numleft, numright
import var_to_latex as VL
#This is code from texpy that I don't completely remember the purpose
#of:
def BalanceCurlies(strin):
outstr = ''
rest = strin
ind = len(rest)
cont = True
while ind != -1 and cont:
ind = rest.find('}')
if ind > -1:
outstr += rest[0:ind]
rest = rest[ind+1:]
no, nc = CountCurlies(outstr)
if no == nc:
cont = False
return outstr
else:
outstr += '}'
else:
outstr += rest
return outstr
class env_popper(object):
"""This class will be used to grab environments delimited by {}
out of lists. These environments may span multiple lines of the
input list."""
def __init__(self, listin, map_in=None, preface='^'):
self.list = txt_mixin.txt_list(listin)
self.map = map_in
self.keys = self.map.keys()
self.keystr = '|'.join(self.keys)
self.preface = preface
self.pat = self.preface+'('+self.keystr+'){'
self.p = re.compile(self.pat)
self.lines = copy.copy(listin)
self.ind = 0
def search_vect(self, listname='lines', start=0, pstr='p'):
p = getattr(self, pstr)
myvect = getattr(self, listname)
for n, line in enumerate(myvect[start:]):
q = p.search(line)
if q:
return start+n
return None
def FindNextEnv(self, listname='lines', pstr='p'):
"""Find the next line matching self.p (the re.compile-ed
version of self.pat), starting at line self.ind.
The listname variable allows using this method on various
lists within self, i.e. for texmaxima I need one list that
doesn't get modified and one that does, so I have a
self.rawlist and self.lines (or something like that)."""
next_ind = self.search_vect(listname=listname, start=self.ind, \
pstr=pstr)
if next_ind is not None:
self.matchline = next_ind
self.ind = self.matchline+1
return self.matchline
else:
return None
## p = getattr(self, pstr)
## myvect = getattr(self, listname)
## for n, line in enumerate(myvect[self.ind:]):
## q = p.search(line)
## if q:
## self.matchline = self.ind+n
## self.ind = self.matchline+1#setup for next search, you
## #may not want this +1 if
## #the match gets removed and
## #replaced with nothing.
## return self.matchline
## return None#no match is left if we got this far
def FindEndofEnv(self, matchline=None, listname='lines'):
myvect = getattr(self, listname)
if matchline is None:
matchline = self.matchline
n = -1
match = False
numleft = 0
numright = 0
while (not match) and (n < len(self.lines)):
n += 1
curline = rwkstr(myvect[matchline+n])
numleft += len(curline.findall('{'))
numright += len(curline.findall('}'))
if numright >= numleft:
match = True
if match:
self.endline = matchline+n
return self.endline
else:
return None
def PopEnv(self, startline=None, endline=None, clear=True, listname='lines'):
myvect = getattr(self, listname)
if startline is None:
startline = self.matchline
if endline is None:
if self.endline is None:
endline = self.endline
else:
endline = self.endline+1
outlist = myvect[startline:endline]
## if startline==endline:#make sure the {'s and }'s are balanced
## outstr = BalanceCurlies(outlist[0])
## outlist = [outstr]
if clear:
myvect[startline:endline] = []
return outlist
def PopNext(self, clear=True, listname='lines'):
if self.FindNextEnv(listname=listname) is not None:#sets self.matchline
self.FindEndofEnv(listname=listname)#sets self.endline
outlist = self.PopEnv(clear=clear, listname=listname)
if clear:
self.ind = self.matchline#r return outlist
return outlist
else:
return None
def _CleanChunk(self, chunk):
"""Extract the Python code from \pyenv{ }"""
mystr = '\n'.join(chunk)
#find periods with only one space after them
p = re.compile(r'\. ([A-Z])')
mystr = p.sub(r'. \1',mystr)
p2 = re.compile(self.pat+'(.*)}', re.DOTALL)
q2 = p2.search(mystr)
code = q2.group(2)
code = BalanceCurlies(code)
nl, nr = CountCurlies(code)
assert nl==nr, "Number of left and right curly braces not equal:"+code
envkey = q2.group(1)
#codelist = code.split('\n')
return envkey, code
class simple_popper(env_popper):
def __init__(self, listin, start_re):
self.list = txt_mixin.txt_list(listin)
self.start_re = start_re
self.p_start = re.compile(self.start_re)
self.ind = 0
def PopNext(self, clear=False):
if self.FindNextEnv() is not None:#sets self.matchline
self.FindEndofEnv()#sets self.endline
outlist = self.PopEnv(clear=clear)
if clear:
self.ind = self.matchline#r return outlist
return outlist
else:
return None
def PopEnv(self, startline=None, endline=None, clear=False):
if startline is None:
startline = self.matchline
if endline is None:
if self.endline is not None:
endline = self.endline+1
outlist = self.list[startline:endline]
if clear:
myvect[startline:endline] = []
return outlist
def FindNextEnv(self):
"""Find the next line matching self.p_start (the re.compile-ed
version of self.pat), starting at line self.ind."""
next_ind = self.list.findnextre(self.p_start, ind=self.ind)
if next_ind is not None:
self.matchline = next_ind
self.ind = self.matchline+1
return self.matchline
else:
return None
def FindEndofEnv(self, matchline=None):
if matchline is None:
matchline = self.matchline
n = -1
match = False
numleft = 0
numright = 0
while (not match) and (n < len(self.list)):
n += 1
curline = rwkstr(self.list[matchline+n])
numleft += len(curline.findall('{'))
numright += len(curline.findall('}'))
if numright >= numleft:
match = True
if match:
self.endline = matchline+n
return self.endline
else:
return None
def Execute(self):
keepgoing = True
n = 0
self.nested_list = []
while keepgoing and (n < len(self.list)):
chunk = self.PopNext()
if chunk:
self.nested_list.append(chunk)
else:
keepgoing = False
n += 1
return self.nested_list
class pyp_figure(object):
def __init__(self, string_in, objlist, level=1):
self.rawstring = string_in
self.objlist = objlist
self.clean_string = self.rawstring.replace('\n',' ')
self.list = break_at_pipes(self.clean_string)
self.path = self.list.pop()
self.options = {'center':True}
self.level = level
mydict, loners = OptionsDictFromList(self.list)
self.options.update(mydict)
self.caption = ''
self.height = None
self.width = None
self.label = None
assert len(loners) < 2, 'self.list has more than 1 unlabeled option'
if len(loners) == 1:
self.options['caption'] = loners[0]
self.caption = loners[0]
elif self.options.has_key('caption'):
self.caption = self.options['caption']
if not (self.options.has_key('width') or self.options.has_key('height')):
#self.options['height']='0.9\\textheight'
self.options['width']='0.9\\textwidth'
self.center = self.options['center']
map_list = ['height','width','label','placestr']
for key in map_list:
if self.options.has_key(key):
setattr(self, key, self.options[key])
list_map = txt_mixin.default_map
class multicols(txt_mixin.txt_file_with_list):
def clean_start(self, pat='[two|three|four]cols{(.*)'):
obj0 = self.objlist[0]
p = re.compile(pat)
q = p.search(obj0.string)
if q:
self.objlist.pop(0)
rest = q.group(1)
if rest:
line0 = line(rest)
self.objlist.insert(0,line0)
def clean_end(self, pat='(.*)}'):
end_obj = self.objlist[-1]
p = re.compile(pat)
q = p.match(end_obj.string)
if q:
self.objlist.pop()
start = q.group(1)
rest = q.group(1)
if rest:
lastline = line(rest)
self.objlist.append(lastline)
def clean_list(self):
while not self.list[0]:
self.list.pop(0)
while not self.list[-1]:
self.list.pop()
def __init__(self, string_in, objlist, widths=None, \
level=0, *args, **kwargs):
txt_mixin.txt_file_with_list.__init__(self, pathin=None)
self.level = level
self.rawstring = string_in
self.objlist = objlist
self.clean_start()
self.clean_end()
mylist = self.rawstring.split('\n')
self.list = txt_mixin.txt_list(mylist)
self.clean_list()
msg = 'self.objlist and self.list do not correspond.'
assert len(self.list) == len(self.objlist), \
msg +'\nlength mismatch'
bool_list = []
for obj, string in zip(self.objlist, self.list):
cur_bool = obj.string == string
if not cur_bool:
raise StandardError(msg + '\n' + 'Problem items:' + \
str(obj) +'!=' +string)
self.widths = widths
for item in list_map:
cur_func = getattr(self.list, item)
setattr(self, item, cur_func)#map functions from self.list
def break_up_cols(self):
self.inds = self.findallre('^[-=]+$')
self.col_objs = []
prev_ind = 0
for ind in self.inds:
cur_slice = self.objlist[prev_ind:ind]
self.col_objs.append(cur_slice)
prev_ind = ind+1
cur_slice = self.objlist[prev_ind:]
self.col_objs.append(cur_slice)
def parse_cols(self):
if not hasattr(self, 'col_objs'):
self.break_up_cols()
self.cols = []
for cur_slice in self.col_objs:
cur_col = column(cur_slice)
self.cols.append(cur_col)
class twocols(multicols):
def __init__(self, string_in, objlist, widths=None, \
*args, **kwargs):
#print('string_in='+str(string_in))
if widths is None:
widths = ['0.45\\textwidth']*2
multicols.__init__(self, string_in, objlist, widths=widths)
self.break_up_cols()
self.parse_cols()
class column(object):
def __init__(self, objlist):
self.objlist_in = objlist
#self.clean_start()
self.env_popper = pyp_env_popper(self.objlist_in)
self.env_popper.Execute()
self.objlist = self.env_popper.objlist
class pyp_eqn(object):
def __init__(self, string_in, objlist, level=0):
self.rawline = string_in
if string_in.find('|'):
env, rest = string_in.split('|',1)
self.env = env
self.eqn = rest.lstrip()
else:
self.eqn = string_in
self.env = 'equation'
self.objlist = objlist
self.level = level
class pyp_code(object):
def __init__(self, string_in, objlist, level=0):
self.code = string_in
self.rawline = string_in
self.objlist = objlist
self.lines = self.code.split('\n')
self.level = level
class pyp_link(object):
def __init__(self, string_in, objlist, level=0):
self.link = string_in
self.rawline = objlist[0].rawline
self.objlist = objlist
self.level = level
pyp_def_map = {'fig':pyp_figure, 'twocols':twocols, \
'eqn':pyp_eqn,'code':pyp_code, 'link':pyp_link}
class pyp_env_popper(env_popper):
"""The trick with trying to use env_popper with pyp parsing is
that the input lists are composed of raw strings, but line
instances. So, pyp_env_popper will create a separate list of the
strings of the input list of lines and try and work with those two
separate lists, one mainly for searching, the other for one we are
really trying to work with."""
def __init__(self, listin, map_in=None, preface='', def_level=1):
if map_in is None:
map_in = pyp_def_map
env_popper.__init__(self, listin, map_in, preface=preface)
self.objlist = copy.copy(self.list)
self.def_level = def_level
self.lines = [item.string for item in self.list]
def PopEnv(self, startline=None, endline=None, clear=True, \
listname='objlist', clearnames=['lines']):#'objlist']):
myvect = getattr(self, listname)
if startline is None:
startline = self.matchline
if endline is None:
endline = self.endline+1
outlist = myvect[startline:endline]
if clear:
myvect[startline:endline] = []
for curname in clearnames:
curvect = getattr(self, curname)
curvect[startline:endline] = []
return outlist
def Chunk_from_Objlist(self, objlist):
chunk = [item.string for item in objlist]
return chunk
def PopNext(self, clear=True, list1name='lines', \
list2name='objlist'):
if self.FindNextEnv(listname=list1name) is not None:#sets self.matchline
self.FindEndofEnv(listname=list1name)#sets self.endline
outlist = self.PopEnv(clear=clear, listname=list2name, \
clearnames=[list1name])
if clear:
self.ind = self.matchline#r return outlist
return outlist
else:
return None
def Execute(self):
keepgoing = True
n = 0
while keepgoing and (n < len(self.list)):
obj_chunk = self.PopNext()
if obj_chunk:
chunk = self.Chunk_from_Objlist(obj_chunk)
envkey, code = self._CleanChunk(chunk)
#print('envkey='+str(envkey))
curclass = self.map[envkey]
cur_object = curclass(code, obj_chunk, \
level=self.def_level)
self.objlist[self.matchline:self.matchline] = [cur_object]
self.lines[self.matchline:self.matchline] = ['!!!space holder!!!']
else:
keepgoing = False
n += 1
return self.objlist
class python_report_env(object):
def __init__(self, listin):
self.list = txt_mixin.txt_list(listin)
self.code = '\n'.join(self.list)
def Execute(self, namespace, **kwargs):
self.namespace = namespace
try:
exec(self.code, namespace)
except:
for i,l in enumerate(self.code.split('\n')):
print('%s: %s'%(i+1,l))
traceback.print_exc(file=sys.stdout)
sys.exit(0)
def To_PYP(self, **kwargs):
raise NotImplementedError
class py_figure(python_report_env):
"""A pyfig environment is a chunk of code that generates a figure.
The figure should be ready to be saved by the end of the block,
i.e. all formatting is done and it looks pretty.
The caption and filename should be specified at the beginning of
the block in a line that starts with a # and has a colon after the
work caption of filename like so:
#pyfig
#caption:This is my caption.
#filename:filename.png
multi-line captions are o.k.: the caption is assumed to end on
either the line with #filename: in it or the next non-commented
line."""
def Execute(self, namespace, figfolder='figs',\
def_ext='.png', dpi=100, **kwargs):
if not os.path.exists(figfolder):
os.mkdir(figfolder)
python_report_env.Execute(self, namespace=namespace, **kwargs)
keys = ['caption','filename','label']
mypat = '^#('+'|'.join(keys)+')'
comments = [item for item in self.list if item.find('#')==0]
if comments[0].find('#pyfig') == 0:
comments.pop(0)
com_list = txt_mixin.txt_list(comments)
start_inds = com_list.findallre(mypat)
end_inds = start_inds[1:]+[None]
pat2 = '^#('+'|'.join(keys)+')'+':(.*)'
p2 = re.compile(pat2)
keysfound = []
for si, ei in zip(start_inds, end_inds):
chunk = ''.join(com_list[si:ei])
q2 = p2.match(chunk)
if q2:
key = q2.group(1)
body = q2.group(2)
body = body.replace('#',' ')
setattr(self, key, body)
keysfound.append(key)
assert 'filename' in keysfound, "#filename: was not found in " + \
self.code +'\n'*2+ \
'NOTE: it must be in the beginning comments.'
fno, ext = os.path.splitext(self.filename)
if not ext:
ext = def_ext
self.nameout = fno+ext
self.pathout = os.path.join(figfolder, self.nameout)
PU.mysave(self.pathout, dpi=dpi)
def To_PYP(self, echo=False, **kwargs):
outlist = []
if echo:
outlist.append('code{')
for line in self.list:
if line and line[0] != '#':
outlist.append(line)
outlist.append('}')
pyp_out_str = 'fig{'
if hasattr(self, 'caption'):
if self.caption:
pyp_out_str += 'caption:'+self.caption+'|'
if hasattr(self, 'label'):
if self.label:
pyp_out_str += 'label:'+self.label+'|'
pyp_out_str += self.pathout+'}'
outlist.append(pyp_out_str)
return outlist
def find_lhs(line):
"""Find the left hand side (lhs) of an assignment statement,
checking to make sure that the equals sign is not inside the
arguement of a function call."""
ind = line.find('=')
ind2 = line.find('(')
if ind == -1:
return None
elif ind2 > -1:
#there is both an equal sign and a (
if ind < ind2:
#the equal sign is first and there is an lhs
#out = myfunc(b=5)#<-- the lhs here is "out"
return line[0:ind]
else:
#the ( is first as in
#myfunc(1, b=2)#<-- note that there is no assignment here
return None
else:
#there is an equal sign, but no (
return line[0:ind]
ignore_list = ['!','-','=']
class py_body(python_report_env):
def To_PYP(self, usetex=False, echo=False, **kwargs):
pyp_out = []
self.lhslist = []
if self.list[0].find('#pybody') == 0:
self.list.pop(0)
for line in self.list:
if not line:
pyp_out.append('')
elif line[0] == '#':
#lines like #! or #---- or #==== are caught here and
#dropped - they will not be echoed.
if line[1] not in ignore_list:
pyp_out.append(line[1:])
else:
lhs = find_lhs(line)
if echo:
pyp_out.append('code{'+line+'}')
if lhs and lhs.find('print')==-1:
myvar = eval(lhs, self.namespace)
if usetex:
outlines, env = VL.VariableToLatex(myvar, lhs,**kwargs)
if len(outlines) == 1:
eqnlines = ['eqn{'+env+'|'+outlines[0]+'}']
else:
eqnlines = ['eqn{'+env+'|']+outlines+['}']
pyp_out.extend(eqnlines)
else:
pyp_out.append('%s = %s' % (lhs, myvar))
self.lhslist.append(lhs)
return pyp_out
class py_no(python_report_env):
def To_PYP(self, **kwargs):
return []
py_def_map = {'fig':py_figure, 'body':py_body,'no':py_no}
class python_report_popper(env_popper):
"""This class exists to make it easier to create journal entries
or other reports directly from commented python files. The python
file must include things like #pyno, #pybody and #pyfig to tell
the popper how to chop up the file. The chopping up will not
include curly braces so that the end of one environment will be
marked by the start of the next."""
def __init__(self, listin, map_in=None, preface='^#py', show=False):
if map_in is None:
map_in = py_def_map
if not show:
listin = [item for item in listin if \
item.find('show(') != 0]
env_popper.__init__(self, listin, map_in, preface)
self.pat = '^#(?!\!)'#match any comment sign that isn't
#followed by a !. If it doesn't match a
#known env, it will default to pybody
self.p = re.compile(self.pat)
self.pat2 = self.preface+'('+self.keystr+')'
self.p2 = re.compile(self.pat2)
self.first = True
def FindNextEnv(self):
"""Find the next line matching self.p (the re.compile-ed
version of self.pat), starting at line self.ind."""
if self.first:
self.matchline = 0
self.ind = self.matchline+1
self.first = 0
return self.matchline
else:
next_ind = self.list.findnextre(self.p, ind=self.ind)
if next_ind is not None:
self.matchline = next_ind
self.ind = self.matchline+1
return self.matchline
else:
return None
def FindEndofEnv(self, matchline=None):
#this needs to handle pyfig env's better now that pat just
#looks for # without a !
if matchline is None:
matchline = self.matchline
line0 = self.list[matchline]
envkey = self._Get_Env_Key(line0)
if envkey == 'fig':
#pdb.set_trace()
self.ind = self.list.find_next_non_comment(start_ind=self.ind)
end_ind = self.list.findnextre(self.p, ind=self.ind)
if end_ind:
end_ind = end_ind-1
self.endline = end_ind
return end_ind
def PopEnv(self, startline=None, endline=None, clear=False):
if startline is None:
startline = self.matchline
if endline is None:
if self.endline is not None:
endline = self.endline+1
outlist = self.list[startline:endline]
if clear:
myvect[startline:endline] = []
return outlist
def PopNext(self, clear=False):
if self.FindNextEnv() is not None:#sets self.matchline
self.FindEndofEnv()#sets self.endline
outlist = self.PopEnv(clear=clear)
if clear:
self.ind = self.matchline#r return outlist
return outlist
else:
return None
def _Get_Env_Key(self, line0):
"""Extract the Python code from env"""
#line0 = chunk[0]
#code = '\n'.join(chunk)
q2 = self.p2.match(line0)
if q2:
envkey = q2.group(1)
else:
envkey = 'body'
#codelist = code.split('\n')
return envkey#, code
def Execute(self):
keepgoing = True
n = 0
self.objlist = []
while keepgoing and (n < len(self.list)):
chunk = self.PopNext()
if chunk:
line0 = chunk[0]
envkey = self._Get_Env_Key(line0)
curclass = self.map[envkey]
cur_object = curclass(chunk)
self.objlist.append(cur_object)
else:
keepgoing = False
n += 1
return self.objlist
class reg_exp_popper(simple_popper, python_report_popper):
"""This class exists to make it easier to create journal entries
or other reports directly from commented python files. The python
file must include things like #pyno, #pybody and #pyfig to tell
the popper how to chop up the file. The chopping up will not
include curly braces so that the end of one environment will be
marked by the start of the next."""
def __init__(self, listin, start_re, end_re=None):
simple_popper.__init__(self, listin, start_re)
self.end_re = end_re
self.p_end = re.compile(self.end_re)
def FindEndofEnv(self, matchline=None):
#this needs to handle pyfig env's better now that pat just
#looks for # without a !
if matchline is None:
matchline = self.matchline
end_ind = self.list.findnextre(self.p_end, ind=self.ind)
## if end_ind:
## end_ind = end_ind-1
self.endline = end_ind
return end_ind
def line_starts_with_non_white_space(linein):
if linein is None:
return False
if linein == '':
return False
first_char = linein[0]
ws_list = [' ','\t']#list of whitespace characters
if first_char in ws_list:
return False
else:
return True
class rst_popper(env_popper):
"""This is my quick and dirty attemp to convert a sage rst
document to a file sage can load. I will make some attempt to
generalize it so that it could work with other rst documents."""
def __init__(self, listin, preface='^'):#map_in=None
self.list = txt_mixin.txt_list(listin)
#self.map = map_in
#self.keys = self.map.keys()
#self.keystr = '|'.join(self.keys)
self.preface = preface
self.pat = self.preface + '\.\. (py|pyno)::'
self.p = re.compile(self.pat)
self.lines = copy.copy(listin)
self.ind = 0
self.pat2 = "^[ \t]+:label:"
self.p2 = re.compile(self.pat2)
self.pat_code = '^([ \t]+)'#for finding white_space
self.pcode = re.compile(self.pat_code)
def FindEndofEnv(self, matchline=None, listname='lines'):
myvect = getattr(self, listname)
if matchline is None:
matchline = self.matchline
n = -1
N = len(myvect)
i = matchline + 1
while i < N-1:
curline = myvect[i]
if line_starts_with_non_white_space(curline):
#print('curline[0]=' + curline[0] + '.')
self.endline = i-1
return self.endline
else:
i += 1
#if the code makes it to here, the file ends on a .. py:: or
#.. pyno:: environment
self.endline = None
return self.endline
def _CleanChunk(self, chunk):
first_line = chunk.pop(0)
q = self.p.search(first_line)
assert q is not None, "First line of chunk did not match pattern."
line_two = chunk[0]#first line is already popped off
q2 = self.p2.search(line_two)
if q2 is not None:
line_two = chunk.pop(0)#remove the label line
while not chunk[0]:
chunk.pop(0)#remove empty lines at the beginning
while not chunk[-1]:
chunk.pop()#remove empty lines at the end
first_code_line = chunk[0]
qcode = self.pcode.search(first_code_line)
ws = qcode.group(0)
self.pat_code2 = '^' + ws
self.pcode2 = re.compile(self.pat_code2)
lines_out = []
for line in chunk:
clean_line = self.pcode2.sub('',line)
lines_out.append(clean_line)
lines_out.append('')#one empty line per chunk
return lines_out
def Execute(self):
keepgoing = True
n = 0
self.list_out = []
#Pdb().set_trace()
while keepgoing and (n < len(self.list)):
chunk = self.PopNext()
if chunk:
clean_chunk = self._CleanChunk(chunk)
self.list_out.extend(clean_chunk)
else:
keepgoing = False
n += 1
return self.list_out
def save(self, outpath):
txt_mixin.dump(outpath, self.list_out)
if __name__ == '__main__':
filepath = '/home/ryan/siue/Research/DT_TMM/cantilever_beam/two_masses_analysis.rst'
import txt_mixin
myfile = txt_mixin.txt_file_with_list(filepath)
mylist = myfile.list
mypopper = rst_popper(mylist)
mypopper.Execute()
pne, ext = os.path.splitext(filepath)
outpath = pne + '.sage'
mypopper.save(outpath)
|
992,618 | f31a87a22d926adde8649aa8047454d0af20c90d | var1 = 3
var2 = 6
var3 = 9
var4 = ((var1 + var2 + var3) /3)
print (var4)
# print a var
|
992,619 | 725b7f29066eb1e6c94348353c34ef22d2d09d9d | import json
import mlrun.errors
import mlrun.utils.singleton
from mlrun.api.schemas.marketplace import (
MarketplaceCatalog,
MarketplaceItem,
MarketplaceItemMetadata,
MarketplaceItemSpec,
MarketplaceSource,
ObjectStatus,
)
from mlrun.api.utils.singletons.k8s import get_k8s
from mlrun.config import config
from mlrun.datastore import store_manager
# Using a complex separator, as it's less likely someone will use it in a real secret name
secret_name_separator = "-__-"
class Marketplace(metaclass=mlrun.utils.singleton.Singleton):
def __init__(self):
self._internal_project_name = config.marketplace.k8s_secrets_project_name
self._catalogs = {}
@staticmethod
def _get_k8s():
k8s_helper = get_k8s()
if not k8s_helper.is_running_inside_kubernetes_cluster():
return None
return k8s_helper
@staticmethod
def _generate_credentials_secret_key(source, key=""):
return source + secret_name_separator + key
def add_source(self, source: MarketplaceSource):
source_name = source.metadata.name
credentials = source.spec.credentials
if credentials:
self._store_source_credentials(source_name, credentials)
def remove_source(self, source_name):
self._catalogs.pop(source_name, None)
if not self._get_k8s():
return
source_credentials = self._get_source_credentials(source_name)
if not source_credentials:
return
secrets_to_delete = [
self._generate_credentials_secret_key(source_name, key)
for key in source_credentials
]
self._get_k8s().delete_project_secrets(
self._internal_project_name, secrets_to_delete
)
def _store_source_credentials(self, source_name, credentials: dict):
if not self._get_k8s():
raise mlrun.errors.MLRunInvalidArgumentError(
"MLRun is not configured with k8s, marketplace source credentials cannot be stored securely"
)
adjusted_credentials = {
self._generate_credentials_secret_key(source_name, key): value
for key, value in credentials.items()
}
self._get_k8s().store_project_secrets(
self._internal_project_name, adjusted_credentials
)
def _get_source_credentials(self, source_name):
if not self._get_k8s():
return {}
secret_prefix = self._generate_credentials_secret_key(source_name)
secrets = self._get_k8s().get_project_secret_data(self._internal_project_name)
source_secrets = {}
for key, value in secrets.items():
if key.startswith(secret_prefix):
source_secrets[key[len(secret_prefix) :]] = value
return source_secrets
@staticmethod
def _transform_catalog_dict_to_schema(source, catalog_dict):
catalog_dict = catalog_dict.get("functions")
if not catalog_dict:
raise mlrun.errors.MLRunInternalServerError(
"Invalid catalog file - no 'functions' section found."
)
catalog = MarketplaceCatalog(catalog=[])
# Loop over channels, then per function extract versions.
for channel_name in catalog_dict:
channel_dict = catalog_dict[channel_name]
for function_name in channel_dict:
function_dict = channel_dict[function_name]
for version_tag in function_dict:
version_dict = function_dict[version_tag]
function_details_dict = version_dict.copy()
spec_dict = function_details_dict.pop("spec", None)
metadata = MarketplaceItemMetadata(
channel=channel_name, tag=version_tag, **function_details_dict
)
item_uri = source.get_full_uri(metadata.get_relative_path())
spec = MarketplaceItemSpec(item_uri=item_uri, **spec_dict)
item = MarketplaceItem(
metadata=metadata, spec=spec, status=ObjectStatus()
)
catalog.catalog.append(item)
return catalog
def get_source_catalog(
self,
source: MarketplaceSource,
channel=None,
version=None,
tag=None,
force_refresh=False,
) -> MarketplaceCatalog:
source_name = source.metadata.name
if not self._catalogs.get(source_name) or force_refresh:
url = source.get_catalog_uri()
credentials = self._get_source_credentials(source_name)
catalog_data = mlrun.run.get_object(url=url, secrets=credentials)
catalog_dict = json.loads(catalog_data)
catalog = self._transform_catalog_dict_to_schema(source, catalog_dict)
self._catalogs[source_name] = catalog
else:
catalog = self._catalogs[source_name]
result_catalog = MarketplaceCatalog(catalog=[])
for item in catalog.catalog:
if (
(channel is None or item.metadata.channel == channel)
and (tag is None or item.metadata.tag == tag)
and (version is None or item.metadata.version == version)
):
result_catalog.catalog.append(item)
return result_catalog
def get_item(
self,
source: MarketplaceSource,
item_name,
channel,
version=None,
tag=None,
force_refresh=False,
) -> MarketplaceItem:
catalog = self.get_source_catalog(source, channel, version, tag, force_refresh)
items = [item for item in catalog.catalog if item.metadata.name == item_name]
if not items:
raise mlrun.errors.MLRunNotFoundError(
f"Item not found. source={item_name}, channel={channel}, version={version}"
)
if len(items) > 1:
raise mlrun.errors.MLRunInvalidArgumentError(
"Query resulted in more than 1 catalog items. "
+ f"source={item_name}, channel={channel}, version={version}, tag={tag}"
)
return items[0]
def get_item_object_using_source_credentials(self, source: MarketplaceSource, url):
credentials = self._get_source_credentials(source.metadata.name)
if not url.startswith(source.spec.path):
raise mlrun.errors.MLRunInvalidArgumentError(
"URL to retrieve must be located in the source filesystem tree"
)
if url.endswith("/"):
stores = store_manager.set(credentials)
obj = stores.object(url=url)
listdir = obj.listdir()
return {
"listdir": listdir,
}
else:
catalog_data = mlrun.run.get_object(url=url, secrets=credentials)
return catalog_data
|
992,620 | 7af1b5cd60640dde69503f04394ef2e48afecc92 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 20 09:33:14 2017
@author: r.dewinter
"""
import numpy as np
from scipy.optimize import minimize
def fminsearchbnd(fun=None,x0=None,LB=None,UB=None,options=None,varargin=None):
'''
FMINSEARCHBND: FMINSEARCH, but with bound constraints by transformation
usage: x=FMINSEARCHBND(fun,x0)
usage: x=FMINSEARCHBND(fun,x0,LB)
usage: x=FMINSEARCHBND(fun,x0,LB,UB)
usage: x=FMINSEARCHBND(fun,x0,LB,UB,options)
usage: x=FMINSEARCHBND(fun,x0,LB,UB,options,p1,p2,...)
usage: [x,fval,exitflag,output]=FMINSEARCHBND(fun,x0,...)
arguments:
fun, x0, options - see the help for FMINSEARCH
LB - lower bound vector or array, must be the same size as x0
If no lower bounds exist for one of the variables, then
supply -inf for that variable.
If no lower bounds at all, then LB may be left empty.
Variables may be fixed in value by setting the corresponding
lower and upper bounds to exactly the same value.
UB - upper bound vector or array, must be the same size as x0
If no upper bounds exist for one of the variables, then
supply +inf for that variable.
If no upper bounds at all, then UB may be left empty.
Variables may be fixed in value by setting the corresponding
lower and upper bounds to exactly the same value.
Notes:
If options is supplied, then TolX will apply to the transformed
variables. All other FMINSEARCH parameters should be unaffected.
Variables which are constrained by both a lower and an upper
bound will use a sin transformation. Those constrained by
only a lower or an upper bound will use a quadratic
transformation, and unconstrained variables will be left alone.
Variables may be fixed by setting their respective bounds equal.
In this case, the problem will be reduced in size for FMINSEARCH.
The bounds are inclusive inequalities, which admit the
boundary values themselves, but will not permit ANY function
evaluations outside the bounds. These constraints are strictly
followed.
If your problem has an EXCLUSIVE (strict) constraint which will
not admit evaluation at the bound itself, then you must provide
a slightly offset bound. An example of this is a function which
contains the log of one of its parameters. If you constrain the
variable to have a lower bound of zero, then FMINSEARCHBND may
try to evaluate the function exactly at zero.
Example usage:
rosen = @(x) (1-x(1)).^2 + 105*(x(2)-x(1).^2).^2;
fminsearch(rosen,[3 3]) unconstrained
ans =
1.0000 1.0000
fminsearchbnd(rosen,[3 3],[2 2],[]) constrained
ans =
2.0000 4.0000
See test_main.m for other examples of use.
See also: fminsearch, fminspleas
size checks
'''
n = len(x0)
if LB is None:
LB = np.full_like(np.empty(n), -np.inf)
if UB is None:
UB = np.full_like(np.empty(n), np.inf)
if n!=len(LB) or n!=len(UB):
raise ValueError('x0 is incompatible in size with either LB or UB')
if options is None or not options:
options = dict()
options['Display'] = True
options['maxiter'] = 200*n
options['fatol'] = 1e-4
params = dict()
params['args'] = varargin
params['LB'] = LB
params['UB'] = UB
params['fun'] = fun
params['n'] = n
params['OutputFcn'] = []
# 0 --> unconstrained variable
# 1 --> lower bound only
# 2 --> upper bound only
# 3 --> dual finite bounds
# 4 --> fixed variable
boundClass = np.zeros(n)
for i in range(n):
k = np.isfinite(LB[i]) + 2*np.isfinite(UB[i])
boundClass[i] = k
if k==3 and LB[i] == UB[i]:
boundClass[i] = 4
params['BoundClass'] = boundClass
x0u = np.copy(x0)
k = 0
for i in range(n):
bC = params['BoundClass'][i]
if bC == 1:
if x0[i] <= LB[i]:
x0u[k] = 0
else:
x0u[k] = np.sqrt(x0[i] - LB[i])
k+=1
if bC == 2:
if x0[i]>=UB[i]:
x0u[k] = 0
else:
x0u[k] = np.sqrt(UB[i]-x0[i])
k+=1
if bC == 3:
if x0[i]<=LB[i]:
x0u[k] = -np.pi/2
elif x0[i]>=UB[i]:
x0u[k] = -np.pi/2
else:
x0u[k] = 2*(x0[i] - LB[i])/(UB[i] - LB[i]) -1
x0u[k] = 2*np.pi+np.arcsin(max(-1,min(1,x0u[k])))
k+=1
if bC == 0:
x0u[k] = x0[i]
k+=1
#dont do anything if bC == 4
if k<n:
x0u = x0u[:k]
if len(x0u)==0:
x = xtransform(x0u,params)
fval = params['fun'](x)
exitflag = False
output = dict()
output['iterations'] = 0
output['funcount'] = 1
output['algorithm'] = 'fminsearch'
output['message'] = 'All variables were held fixed by the applied bounds'
return [x, fval, exitflag, output]
def outfun_wrapper(x, varargin, params):
xtrans = xtransform(x,params)
stop = params['OutputFcn'](xtrans,varargin)
return stop
if 'OutputFcn' in options:
params['OutputFcn'] = options['OutputFcn']
options['OutputFcn'] = outfun_wrapper
optimizeResult = minimize(intrafun,x0u,args=(params),method='Nelder-Mead',tol=np.inf, options=options)
fval = optimizeResult['fun']
exitflag = optimizeResult['success']
xu = optimizeResult['x']
output = dict()
output['iterations'] = optimizeResult['nit']
output['funcount'] = optimizeResult['nfev']
output['algorithm'] = 'fminsearch'
output['message'] = optimizeResult['message']
x = xtransform(xu,params)
return [x, fval, exitflag, output]
def intrafun(x, params=None):
xtrans = xtransform(x,params)
fval = params['fun'](xtrans)
return fval
def xtransform(x,params):
xtrans = np.zeros(params['n'])
k = 0
for i in range(params['n']):
bC = params['BoundClass'][i]
if bC == 1:
xtrans[i] = params['LB'][i] + x[k]**2
k += 1
if bC == 2:
xtrans[i] = params['UB'][i] - x[k]**2
k += 1
if bC == 3:
xtrans[i] = (np.sin(x[k])+1)/2
xtrans[i] = xtrans[i]*(params['UB'][i] - params['LB'][i]) + params['LB'][i]
xtrans[i] = max(params['LB'][i], min(params['UB'][i],xtrans[i]))
k+=1
if bC == 4:
xtrans[i] = params['LB'][i]
if bC == 0:
xtrans[i] = x[k]
k+=1
return xtrans |
992,621 | 401cb8b86a180e76dc2b537b5766e4f0ce6abf36 | import json
from typing import List, Union
from grapple.bom.entity import Entity
from grapple.bom.node import Node
from grapple.bom.relation import Relation
Payload = List[Union[Entity, Node, Relation]]
class Condition(object):
@property
def signature(self) -> str:
raise NotImplementedError('To be overridden in implementing classes')
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
raise NotImplementedError('To be overridden in implementing classes')
class IsNode(Condition):
@property
def signature(self) -> str:
return '()'
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and isinstance(payload[-1], Node)
class HasLabel(Condition):
def __init__(self, label: str):
self._label = label
@property
def signature(self) -> str:
return '(:%s)' % self._label
@property
def label(self) -> str:
return self._label
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and type(payload[-1]) is Node and self._label in payload[-1].labels
class IsRelation(Condition):
@property
def signature(self) -> str:
return '[]'
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and isinstance(payload[-1], Relation)
class HasType(Condition):
def __init__(self, type: str):
self._type = type
@property
def signature(self) -> str:
return '[:%s]' % self._type
@property
def type(self) -> str:
return self._type
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and type(payload[-1]) is Relation and self._type in payload[-1].types
class HasKey(Condition):
def __init__(self, key: str):
self._key = key
@property
def signature(self) -> str:
return '{%s}' % self._key
@property
def key(self) -> str:
return self._key
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and payload[-1].has_property(self._key)
class HasProperty(Condition):
def __init__(self, key: str, value: 'Value'):
self._key = key
self._value = value
@property
def signature(self) -> str:
return '{%s: %s}' % (self._key, json.dumps(self._value))
@property
def key(self) -> str:
return self._key
@property
def value(self) -> 'Value':
return self._value
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and payload[-1].get_property(self._key) == self._value
class AreEqual(Condition):
@property
def signature(self) -> str:
return '=='
def is_valid(self, payload: Payload, other: Payload = None) -> bool:
return payload and other and payload[-1] == other[-1]
def temp(rel: Relation, nod: Node) -> bool:
pass
|
992,622 | 324cac83d63ddcd69464a28e76d58474a6e48817 | #combines jsonlines from desired jsonl files into one, and takes a desired sample from the lines
import pathlib
import ntpath
import random
import os
import linecache
x=0
files=[]
for path in pathlib.Path("/Users/sophiawang/Desktop/May18").iterdir(): #replace the path with the folder of jsonl files you want to merge
if path.is_file():
print(path)
if ntpath.basename(path).endswith(".jsonl"):
files.append(ntpath.basename(path))
f = open("bigfile.txt", "w") #creates a file called bigfile.txt, with all json lines in file from the jsonl files in the path specified
for file in files:
with open(file) as file:
f.write(file.read())
f.close()
def random_lines(filename):
idxs = random.sample(range(25919), 50) # (range(amount of lines there are in total), sample amount you want)
return [linecache.getline(filename, i) for i in idxs]
fi = open("randomsamples.jsonl", "w") #creates randomsamples.jsonl with random lines according to the amount you put
for line in random_lines('bigfiletest.txt'):
fi.write(line)
fi.close()
"""
def merge_JsonFiles(filename):
result = list()
for f1 in filename:
with open(f1, 'r') as infile:
result.extend(json.load(infile))
with open('counseling3.json', 'w') as output_file:
json.dump(result, output_file)
merge_JsonFiles(files)
"""
|
992,623 | 065485c3903e15e2ceec02940bb07c441cb254ae | def quickSort(nums):
if len(nums) == 1:
return nums
#初始化两个栈
startStack = [0,]
endStack = [len(nums)-1,]
#进入循环,两个栈均为空时,排序结束
while startStack and endStack:
#得到本次循环的start 和 end
start = startStack.pop()
end = endStack.pop()
#判断子数组是否有序
if start>end:
continue
i = start
j = end
while i < j:
if nums[i] > nums[j]:
nums[i], nums[j-1], nums[j] = nums[j-1], nums[j], nums[i]
j -= 1
else:
i += 1
#将两个子数组的开始和结尾push进栈中
startStack.append(start)
endStack.append(i-1)
startStack.append(i+1)
endStack.append(end)
if __name__ =='__main__':
while True:
try:
arr = [int(x) for x in input().split(' ')]
quickSort(arr)
print(' '.join([str(x) for x in arr]))
except EOFError:
break |
992,624 | 16d99fd4feedb6012af86c4ece968802272903e5 | #!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for manipulating profile data.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from soc.modules.seeder.logic.seeder import logic as seeder_logic
# TODO: Should this go in it's own module?
class GSoCProfileHelper(object):
"""Helper class to aid in manipulating profile data.
"""
def __init__(self, program, dev_test):
"""Initializes the GSocProfileHelper.
Args:
program: a GSoCProgram
dev_test: if set, always creates users as developers
"""
self.program = program
self.user = None
self.profile = None
self.dev_test = dev_test
def createUser(self):
"""Creates a user entity for the current user.
"""
if self.user:
return self.user
from soc.models.user import User
from soc.modules.seeder.logic.providers.user import CurrentUserProvider
properties = {'account': CurrentUserProvider(),
'status': 'valid', 'is_developer': self.dev_test}
self.user = seeder_logic.seed(User, properties=properties)
return self.user
def createDeveloper(self):
"""Creates a user entity for the current user that is a developer.
"""
self.createUser()
self.user.is_developer = True
self.user.put()
def createOtherUser(self, email):
"""Creates a user entity for the specified email.
"""
from soc.models.user import User
from soc.modules.seeder.logic.providers.user import FixedUserProvider
properties = {'account': FixedUserProvider(value=email), 'status': 'valid'}
self.user = seeder_logic.seed(User, properties=properties)
return self.user
def createProfile(self):
"""Creates a profile for the current user.
"""
if self.profile:
return
from soc.modules.gsoc.models.profile import GSoCProfile
user = self.createUser()
properties = {'link_id': user.link_id, 'student_info': None, 'user': user,
'parent': user, 'scope': self.program, 'status': 'active'}
self.profile = seeder_logic.seed(GSoCProfile, properties)
def createStudent(self):
"""Sets the current suer to be a student for the current program.
"""
self.createProfile()
from soc.modules.gsoc.models.profile import GSoCStudentInfo
properties = {'key_name': self.profile.key().name(), 'parent': self.profile}
self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)
self.profile.put()
def createStudentWithProposal(self):
"""Sets the current user to be a student with a proposal for the current program.
"""
self.createStudent()
from soc.modules.gsoc.models.proposal import GSoCProposal
properties = {'link_id': self.profile.link_id, 'scope': self.profile,
'parent': self.profile, 'status': 'new'}
seeder_logic.seed(GSoCProposal, properties)
def createStudentWithProject(self):
"""Sets the current user to be a student with a project for the current program.
"""
self.createStudentWithProposal()
from soc.modules.gsoc.models.student_project import StudentProject
properties = {'link_id': self.profile.link_id, 'scope': self.profile,
'student': self.profile, 'parent': self.profile}
seeder_logic.seed(StudentProject, properties)
def createHost(self):
"""Sets the current user to be a host for the current program.
"""
self.createUser()
self.user.host_for = [self.program.scope.key()]
self.user.put()
def createOrgAdmin(self, org):
"""Creates an org admin profile for the current user.
"""
self.createProfile()
self.profile.org_admin_for = [org.key()]
self.profile.put()
def createMentor(self, org):
"""Creates an mentor profile for the current user.
"""
self.createProfile()
self.profile.mentor_for = [org.key()]
self.profile.put()
def createMentorWithProject(self, org):
"""Creates an mentor profile with a project for the current user.
"""
self.createMentor(org)
from soc.modules.gsoc.models.student_project import StudentProject
properties = {'mentor': self.profile}
seeder_logic.seed(StudentProject, properties)
|
992,625 | f8f0269deb1b6d2024bef5ed45e2ecf0cc1ea32e | import os
import json
import csv
import sys
# 读取每个问句对应的实体链接表信息
def read_qid2entity(init_dir_name):
'''
功能:从第一阶段产生的link文件中提取每个问句qid对应的实体链接数据
输入:第一阶段产生的候选文件夹名字
输出:问句ID与链接到的实体构成的词典,其中key值为问句ID,value值为链接实体列表,形如:
'''
qid2entity = {}
for root, dirs, files in os.walk(init_dir_name):
# print(root, dirs, files)
for dir_name in dirs: # 针对每组问句对应的文件夹进行处理
file_names = os.listdir(root + dir_name)
for file_name in file_names:
if('_links' in file_name):
f = open(init_dir_name + dir_name + '/' + file_name, 'r', encoding = 'utf-8')
qid = file_name[0:4]
if(qid not in qid2entity):
qid2entity[qid] = []
for line in f:
line_json = json.loads(line.strip())
if(len(line_json) != 9):
print('长度不等于9')
import pdb; pdb.set_trace()
category = line_json[1][1]
if(line_json[6][0] != 'value'):
print('value 所在位置不一致')
import pdb; pdb.set_trace()
value = line_json[6][1]
if(line_json[7][0] != 'name'):
print('name 所在位置不一致')
import pdb; pdb.set_trace()
name = line_json[7][1]
qid2entity[qid].append((category + ' ' + value + ' ' + name))
# import pdb; pdb.set_trace()
return qid2entity
def WebQ():
result = {}
init_dir_name = '../../runnings/candgen_WebQ/20201202_entity_time_type_ordinal/data/'
# init_dir_name = '/home/jiayonghui/github/bert_rank/runnings/candgen_WebQ/20201202_entity_time_type_ordinal/data/'
question2path = {}
num = 0
for root, dirs, files in os.walk(init_dir_name):
# print(root, dirs, files)
for dir_name in dirs:
file_names = os.listdir(init_dir_name + dir_name)
for file_name in file_names:
# import pdb; pdb.set_trace()
if('_schema' in file_name):
f = open(init_dir_name + dir_name + '/' + file_name, 'r', encoding = 'utf-8')
k = 0.0
temp = ''
mid = []
for line in f:
line_json = json.loads(line.strip())
# import pdb; pdb.set_trace()
if(line_json['f1'] > k):
k = line_json['f1']
temp = line.strip()
for item in line_json['raw_paths']:
mid.append(item[3])
# import pdb; pdb.set_trace()
que_id = (file_name[0:4])
# if(int(que_id) < 5810):
# result[que_id + '\n' + temp] = k
if(int(que_id) < 3778):
result[que_id + '\n' + temp] = k
# if(int(que_id) >= 3778):
# result[que_id + '\n' + temp] = k
question2path[que_id] = mid
# print(result)
print('len(result):', len(result))
sum_f1 = 0.0
f = open('./max_match_result.txt', 'w', encoding = 'utf-8')
result_list = sorted(result.items(), key = lambda x:x[0])
for item in result_list:
if(item[1] <= 0.1):
num += 1
sum_f1 += item[1]
f.write(item[0] + '\n')
f.flush()
print(sum_f1)
print(sum_f1 / len(result))
print('训练集中没有正确答案的问句个数:', num)
# import pdb; pdb.set_trace()
def CompQ():
result = {}
init_dir_name = '/data2/yhjia/bert_rank/Generate_QueryGraph/Luo/runnings/candgen_CompQ/20201130_entity_time_type_ordinal/data/'
for root, dirs, files in os.walk(init_dir_name):
# print(root, dirs, files)
for dir_name in dirs:
file_names = os.listdir(init_dir_name + dir_name)
for file_name in file_names:
# import pdb; pdb.set_trace()
if('_schema' in file_name):
f = open(init_dir_name + dir_name + '/' + file_name, 'r', encoding = 'utf-8')
lines = f.readlines()
k = 0.0
temp = []
for line in lines:
line_json = json.loads(line.strip())
# import pdb; pdb.set_trace()
if(line_json['f1'] > k):
k = line_json['f1']
if k > 0:
for line in lines:
line_json = json.loads(line.strip())
# import pdb; pdb.set_trace()
if(line_json['f1'] == k):
temp.append(line.strip())
# import pdb; pdb.set_trace()
que_id = (file_name[0:4])
# if(int(que_id) < 1300):
# result[que_id + '\n' + '\n'.join(temp)] = k
if(int(que_id) < 3000):
result[que_id + '\n' + '\n'.join(temp)] = k
# if(int(que_id) < 3000 and int(que_id) >= 1300):
# result[que_id + '\n' + '\n'.join(temp)] = k
# print(result)
print('len(result):', len(result))
sum_f1 = 0.0
f = open('./runnings/candgen_CompQ/max_match_result_joe.txt', 'w', encoding = 'utf-8')
# f = open('./runnings/candgen_CompQ/max_match')
result_list = sorted(result.items(), key = lambda x:x[0])
for item in result_list:
sum_f1 += item[1]
# if(item[1] == 0.0):
# print(item)
# import pdb; pdb.set_trace()
f.write('\n'.join(item[0].split('\n')[0:2]) + '\n')
f.flush()
print(sum_f1)
print(sum_f1 / len(result))
# import pdb; pdb.set_trace()
def load_compq():
compq_path = './qa-corpus/MulCQA'
qa = []
for Tvt in ('train', 'test'):
fp = '%s/compQ.%s.release' % (compq_path, Tvt)
print(fp)
br = open(fp, 'r', encoding='utf-8')
lines = br.readlines()
for i, line in enumerate(lines):
str_i = str(i).zfill(4)
q, a_list_str = line.strip().split('\t')
qa.append(q + '\n' + a_list_str)
return qa
def load_webq():
webq_path = './qa-corpus/web-question'
qa_list = []
for Tvt in ('train', 'test'):
file_name = '%s/data/webquestions.examples.%s.json' % (webq_path, Tvt)
fp = open(file_name, 'r', encoding='utf-8')
webq_data = json.load(fp)
# import pdb; pdb.set_trace()
for raw_info in webq_data:
qa = {}
target_value = []
ans_line = raw_info['targetValue']
ans_line = ans_line[7: -2] # remove '(list (' and '))'
for ans_item in ans_line.split(') ('):
ans_item = ans_item[12:] # remove 'description '
if ans_item.startswith('"') and ans_item.endswith('"'):
ans_item = ans_item[1: -1]
target_value.append(ans_item)
qa['utterance'] = raw_info['utterance']
qa['targetValue'] = target_value
qa_list.append(qa['utterance'] + '\n' + '\t'.join(qa['targetValue']))
# import pdb; pdb.set_trace()
# qa_list中每个元素格式:{'utterance': 'what is the name of justin bieber brother?', 'targetValue': ['Jazmyn Bieber', 'Jaxon Bieber']}
return qa_list
# 获取在候选查询图中找不到正确答案的问句
def CompQ_no_answer():
qa = load_compq()
result = {}
# init_dir_name = './runnings/candgen_CompQ/20200712_yh/data/'
init_dir_name = '/data/yhjia/runnings/candgen_CompQ/200418_joe/data/'
for root, dirs, files in os.walk(init_dir_name):
print(root, dirs, files)
for dir_name in dirs:
file_names = os.listdir(init_dir_name + dir_name)
for file_name in file_names:
# import pdb; pdb.set_trace()
if('_schema' in file_name):
f = open(init_dir_name + dir_name + '/' + file_name, 'r', encoding = 'utf-8')
lines = f.readlines()
k = 0.0
temp = []
for line in lines:
line_json = json.loads(line.strip())
# import pdb; pdb.set_trace()
if(line_json['f1'] > k):
k = line_json['f1']
# if(int(file_name[0:4]) == 619):
# import pdb; pdb.set_trace()
if k <= 0:
que_id = (file_name[0:4])
# if(int(que_id) >= 1300):
# break
result[que_id] = qa[int(que_id)]
# import pdb; pdb.set_trace()
result_list = sorted(result.items(), key=lambda x: x[0])
f = open('./runnings/candgen_CompQ/match_no_result_joe_2100.txt', 'w', encoding = 'utf-8')
for item in result_list:
f.write(item[0] + '\n' + item[1] + '\n')
print('no answer 个数:', len(result_list))
f.flush()
# 获取在候选查询图中找不到正确答案的问句
def WebQ_no_answer():
qa = load_webq()
result = {}
# init_dir_name = '/data2/yhjia/kbqa_sp/runnings/candgen_WebQ/20200712_yh/data/'
init_dir_name = '/data/yhjia/Question2Cands/runnings/candgen_WebQ/20201102_STAGG_add_answer_type_with_datatype/data/'
qid2entity = read_qid2entity(init_dir_name)
for root, dirs, files in os.walk(init_dir_name):
print(root, dirs, files)
for dir_name in dirs:
file_names = os.listdir(init_dir_name + dir_name)
for file_name in file_names:
# import pdb; pdb.set_trace()
if('_schema' in file_name):
f = open(init_dir_name + dir_name + '/' + file_name, 'r', encoding = 'utf-8')
lines = f.readlines()
k = 0.0
temp = []
for line in lines:
line_json = json.loads(line.strip())
# import pdb; pdb.set_trace()
if(line_json['f1'] > k):
k = line_json['f1']
if k <= 0.1:
que_id = (file_name[0:4])
# if(int(que_id) >= 3778):
# break
result[que_id] = (qa[int(que_id)], qid2entity[que_id])
# import pdb; pdb.set_trace()
result_list = sorted(result.items(), key=lambda x: x[0])
f = open('./runnings/candgen_WebQ/match_no_result_stagg_add_answer_type_with_datatype.txt', 'w', encoding = 'utf-8')
for item in result_list:
f.write(item[0] + '\n' + ' ## '.join(item[1][1]) + '\n' + item[1][0] + '\n')
f.flush()
print('no answer 个数:', len(result_list))
if __name__ == "__main__":
#**************获取WebQ数据集查询图生成模块的平均F1性能*******************
WebQ()
#**************获取CompQ数据集查询图生成模块的平均F1性能*******************
# CompQ()
#*********************************************************************
# CompQ_no_answer()
# WebQ_no_answer() |
992,626 | 00ab38b476d8d6234d9e7eeb8405b07d8adb8272 |
import sys
import os
import math
# CSV file to map
#
def csv_to_map (csv_file, key_index):
res = {}
with open(csv_file, 'r') as fid:
keys = None
for index,line in enumerate(fid):
els = line.split(',')
els = [e.strip() for e in els]
if index == 0:
keys = els
else:
res[els[key_index]] = {}
for i,e in enumerate(els):
res[els[key_index]][keys[i]] = e
return res
sub_csv = csv_to_map(sys.argv[1], 0)
pad_csv = csv_to_map(sys.argv[2], 0)
# Header
print("Die#,Subs#,Net Name,Pad Name,Side,Layer,Length,Bond Angle,Die X,Die Y,Subs X,Subs Y,Width,Height")
for k,pad_v in pad_csv.items():
sub_v = sub_csv[k]
sub_x = float(sub_v['Subs X'])
sub_y = float(sub_v['Subs Y'])
die_x = float(pad_v['Die X'])
die_y = float(pad_v['Die Y'])
# die# and subs#
row_str = '%s,%s' % (k,k)
# Net Name
row_str += ',%s' % sub_v['Net Name']
# Pad Name
row_str += ',%s' % pad_v['Pad Name']
# Side
row_str += ',%s' % pad_v['Side']
# Layer
row_str += ',%s' % pad_v['Layer']
# Wire Length
row_str += ',%f' % math.sqrt((sub_x-die_x)**2 + (sub_y-die_y)**2)
# Bond Angle
row_str += ',%f' % math.degrees(math.atan2(math.fabs(sub_y-die_y),math.fabs(sub_x-die_x)))
# Die X
row_str += ',%f' % die_x
# Die Y
row_str += ',%f' % die_y
# Sub X
row_str += ',%f' % sub_x
# Sub Y
row_str += ',%f' % sub_y
# Width
row_str += ',%s' % pad_v['Width']
# Height
row_str += ',%s' % pad_v['Height']
print(row_str)
|
992,627 | cd706204eb8aa1ddad9857c8926c573639a2e1a3 | from .netcdf import NetCDFMonitor
from .plot import PlotFunctionMonitor
from .basic import ConstantPrognostic, ConstantDiagnostic, RelaxationPrognostic
__all__ = (
PlotFunctionMonitor,
NetCDFMonitor,
ConstantPrognostic, ConstantDiagnostic, RelaxationPrognostic)
|
992,628 | 3f1d0ba28e2747bc44899ee772c90787bb599f9d |
while True:
num = int(input('Digite um numero: '))
val = str(num)
if num <= 1000000:
if str(num) == val[::-1]:
print('O numero {} é PALINDROMO'.format(num))
else:
print('O numero {} não é PALINDROMO'.format(num))
else:
print('Digite numero menores que 1000000')
cont = str(input("Deseja continuar (S/N)? ")).upper()
if cont != 'S':
break
|
992,629 | 086ac644dafc38478fa1e3d8178fbecd6090c752 | import argparse
from eval_loss import load_names
from rank_algos import significance, significance_cs01, preprocess_df_granular, preprocess_df, base_name, set_base_name
import numpy as np
MTR_LABEL = 'iwr'
def wins_losses(df, xname, yname, args=None):
rawx = df.loc[df.algo == xname].groupby('ds').rawloss.mean()
rawy = df.loc[df.algo == yname].groupby('ds').rawloss.mean()
sz = df.loc[df.algo == xname].groupby('ds').sz.max()
if args.use_hoeffding:
pvals = significance_cs01(rawx, rawy, sz)
else:
pvals = significance(rawx, rawy, sz)
return (np.sum((rawx < rawy) & (pvals < args.alpha)),
np.sum((rawx > rawy) & (pvals < args.alpha)))
def print_table(df, alg_names, labels=None, args=None):
n = len(alg_names)
if labels is None:
labels = alg_names
table = [['-' for _ in range(n)] for _ in range(n)]
for i in range(n):
for j in range(i):
wins, losses = wins_losses(df, alg_names[i], alg_names[j], args=args)
if args.diff:
table[i][j] = str(wins - losses)
table[j][i] = str(losses - wins)
else:
table[i][j] = '{} / {}'.format(wins, losses)
table[j][i] = '{} / {}'.format(losses, wins)
print(r'\begin{tabular}{ | l |', 'c | ' * n, '}')
print(r'\hline')
print(r'$\downarrow$ vs $\rightarrow$ &', ' & '.join(labels), r'\\ \hline')
for i in range(n):
print(labels[i], '&', ' & '.join(table[i]), r'\\ \hline')
print(r'\end{tabular}')
def print_table_rect(df, alg_names_row, alg_names_col, labels_row=None, labels_col=None, args=None):
n, m = len(alg_names_row), len(alg_names_col)
if labels_row is None:
labels_row = alg_names_row
if labels_col is None:
labels_col = alg_names_col
table = [['-' for _ in range(m)] for _ in range(n)]
for i in range(n):
for j in range(m):
wins, losses = wins_losses(df, alg_names_row[i], alg_names_col[j], args=args)
if args.diff:
table[i][j] = str(wins - losses)
else:
table[i][j] = '{} / {}'.format(wins, losses)
print(r'\begin{tabular}{ | l |', 'c | ' * m, '}')
print(r'\hline')
print(r'$\downarrow$ vs $\rightarrow$ &', ' & '.join(labels_col), r'\\ \hline')
for i in range(n):
print(labels_row[i], '&', ' & '.join(table[i]), r'\\ \hline')
print(r'\end{tabular}')
def print_enc_table(df, df_big, algs, labels=None, args=None):
n = len(algs)
if labels is None:
labels = alg_names
table = [['-' for _ in range(n)] for _ in range(2)]
for i in range(n):
wins, losses = wins_losses(df, algs[i].format(enc='neg10'), algs[i].format(enc='01'), args=args)
if args.diff:
table[0][i] = str(wins - losses)
else:
table[0][i] = '{} / {}'.format(wins, losses)
wins, losses = wins_losses(df_big, algs[i].format(enc='neg10'), algs[i].format(enc='01'), args=args)
if args.diff:
table[1][i] = str(wins - losses)
else:
table[1][i] = '{} / {}'.format(wins, losses)
print(r'\begin{tabular}{ | c |', 'c | ' * n, '}')
print(r'\hline')
print(r'datasets &', ' & '.join(labels), r'\\ \hline')
print('all', '&', ' & '.join(table[0]), r'\\ \hline')
print(r'$\geq$ 10000', '&', ' & '.join(table[1]), r'\\ \hline')
print(r'\end{tabular}')
def print_loss_table(df, algs, labels=None, args=None, stddev=False):
n = len(algs)
if labels is None:
labels = alg_names
table = ['-' for _ in range(n)]
for i in range(n):
if stddev:
assert df[df.algo == algs[i]].rawloss.shape[0] == 10
table[i] = '{:.3f} $\\pm$ {:.4f}'.format(
df[df.algo == algs[i]].rawloss.mean(),
df[df.algo == algs[i]].rawloss.std())
else:
assert df[df.algo == algs[i]].rawloss.shape[0] == 1
table[i] = '{:.3f}'.format(df[df.algo == algs[i]].rawloss.mean())
print(r'\begin{tabular}{ |', 'c | ' * n, '}')
print(r'\hline')
print(' & '.join(labels), r'\\ \hline')
print(' & '.join(table), r'\\ \hline')
print(r'\end{tabular}')
def print_loss_table_allnames(df, algs, labels=None, args=None):
n = len(algs)
if labels is None:
labels = alg_names
table = [['-' for _ in range(n)] for _ in range(4)]
names = ['01', '01b', 'neg10', 'neg10b']
col_labels = ['0/1', '0/1+b', '-1/0', '-1/0+b']
for i in range(4):
for j in range(n):
assert df[df.algo == algs[j] + ':' + names[i]].rawloss.shape[0] == 1
table[i][j] = '{:.3f}'.format(df[df.algo == algs[j] + ':' + names[i]].rawloss.mean())
print(r'\begin{tabular}{ | c |', 'c | ' * n, '}')
print(r'\hline')
print(r' &', ' & '.join(labels), r'\\ \hline')
for i in range(4):
print(col_labels[i], '&', ' & '.join(table[i]), r'\\ \hline')
print(r'\end{tabular}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='barplots')
parser.add_argument('--all', action='store_true', default=False)
parser.add_argument('--granular_opt', action='store_true', default=False)
parser.add_argument('--granular', action='store_true', default=False)
parser.add_argument('--granular_neg10', action='store_true', default=False)
parser.add_argument('--granular_01', action='store_true', default=False)
parser.add_argument('--granular_neg10_bopt', action='store_true', default=False)
parser.add_argument('--granular_name', action='store_true', default=False)
parser.add_argument('--bag_vs_greedy', action='store_true', default=False)
parser.add_argument('--opt', action='store_true', default=False)
parser.add_argument('--opt_neg10', action='store_true', default=False)
parser.add_argument('--opt_01', action='store_true', default=False)
parser.add_argument('--opt_name', action='store_true', default=False)
parser.add_argument('--opt_algo', action='store_true', default=False)
parser.add_argument('--comp_enc', action='store_true', default=False)
parser.add_argument('--sep_cb_type', action='store_true', default=False)
parser.add_argument('--sep_name', action='store_true', default=False)
parser.add_argument('--sep_enc', action='store_true', default=False)
parser.add_argument('--sep_b', action='store_true', default=False)
parser.add_argument('--short', action='store_true', default=False,
help='only show main methods, for main paper')
parser.add_argument('--skip_ips', action='store_true', default=False)
parser.add_argument('--algo', default=None)
parser.add_argument('--name', default=None)
parser.add_argument('--enc', default=None)
parser.add_argument('--b', default=None)
parser.add_argument('--cb_type', default=None)
parser.add_argument('--granular_ds', default=None)
parser.add_argument('--granular_ds_name', default=None)
parser.add_argument('--avg_std_name', action='store_true', default=False)
parser.add_argument('--alpha', type=float, default=0.05)
parser.add_argument('--use_cs', action='store_true', default=False)
parser.add_argument('--use_hoeffding', action='store_true', default=False)
parser.add_argument('--min_size', type=int, default=None)
parser.add_argument('--max_size', type=int, default=None)
parser.add_argument('--min_actions', type=int, default=None)
parser.add_argument('--max_actions', type=int, default=None)
parser.add_argument('--min_features', type=int, default=None)
parser.add_argument('--max_features', type=int, default=None)
parser.add_argument('--min_refloss', type=float, default=None)
parser.add_argument('--max_refloss', type=float, default=None)
parser.add_argument('--diff', action='store_true', default=True)
parser.add_argument('--nodiff', dest='diff', action='store_false')
parser.add_argument('--noval', action='store_true', default=False)
parser.add_argument('--uci', action='store_true', default=False)
parser.add_argument('--base_name', default='allrandfix')
args = parser.parse_args()
set_base_name(args.base_name)
print((base_name()))
if args.avg_std_name and args.base_name.startswith('rcv1'):
names = ['{}01'.format(base_name())]
else:
names = ['{}{}'.format(base_name(), name) for name in ['01', '01b', 'neg10', 'neg10b']]
df = load_names(names, use_cs=args.use_cs)
# filters
if args.min_actions is not None:
df = df[df.na >= args.min_actions]
if args.max_actions is not None:
df = df[df.na <= args.max_actions]
if args.min_features is not None:
df = df[df.nf >= args.min_features]
if args.max_features is not None:
df = df[df.nf <= args.max_features]
if args.min_size is not None:
df = df[df.sz >= args.min_size]
if args.max_size is not None:
df = df[df.sz <= args.max_size]
if args.min_refloss is not None:
df = df[df.refloss >= args.min_refloss]
if args.max_refloss is not None:
df = df[df.refloss <= args.max_refloss]
if args.noval:
val_dss = np.load('ds_val_list.npy')
df = df.loc[df.ds.map(lambda s: s not in val_dss)]
if args.uci:
uci_dss = ['6', '28', '30', '32', '54', '181', '182', '1590']
df = df.loc[df.ds.map(lambda s: s in uci_dss)]
print('num datasets:', len(df.ds.unique()))
if (args.granular_ds_name or args.granular_name or args.avg_std_name) and args.name == 'neg10':
# best fixed algos, selected on 200 datasets, -1/0 with no baseline
g_best = 'epsilon:0:mtr'
r_best = 'regcb:c0:0.001:mtr'
ro_best = 'regcbopt:c0:0.001:mtr'
cnu_best = 'cover:4:psi:0.1:nounif:dr'
cu_best = 'cover:4:psi:0.1:ips'
bg_best = 'bag:4:greedify:mtr'
b_best = 'bag:4:mtr'
eg_best = 'epsilon:0.02:mtr'
a_best = 'epsilon:0.02:nounifa:c0:1e-06:mtr'
elif (args.granular_ds_name or args.granular_name or args.avg_std_name) and args.name == '01':
# best fixed algos, selected on 200 datasets, 0/1 with no baseline
g_best = 'epsilon:0:mtr'
r_best = 'regcb:c0:0.001:mtr'
ro_best = 'regcbopt:c0:0.001:mtr'
cnu_best = 'cover:4:psi:0.01:nounif:dr'
cu_best = 'cover:4:psi:0.1:dr'
bg_best = 'bag:8:greedify:mtr'
b_best = 'bag:16:mtr'
eg_best = 'epsilon:0.02:mtr'
a_best = 'epsilon:0.02:nounifa:c0:1e-06:mtr'
elif args.granular_neg10_bopt:
# best fixed algos, selected on 200 datasets, -1/0 with optimized baseline
g_best = 'epsilon:0:mtr'
r_best = 'regcb:c0:0.001:mtr'
ro_best = 'regcbopt:c0:0.001:mtr'
cnu_best = 'cover:16:psi:0.1:nounif:dr'
cu_best = 'cover:4:psi:0.1:ips'
bg_best = 'bag:4:greedify:mtr'
b_best = 'bag:4:mtr'
eg_best = 'epsilon:0.02:mtr'
a_best = 'epsilon:0.02:nounifa:c0:1e-06:mtr'
psi = '0.1'
if args.granular_opt or args.all:
df_all = preprocess_df_granular(df, all_algos=True)
# optimized name
print('optimized over encoding/baseline')
algs = ['epsilon:0:mtr', 'epsilon:0:dr', 'cover:16:psi:{}:nounif:dr'.format(psi),
'bag:16:mtr', 'bag:16:greedify:mtr', 'epsilon:0.02:mtr', 'cover:16:psi:{}:dr'.format(psi), 'epsilon:1:nounifa:c0:1e-06:dr']
labels = ['G-{}'.format(MTR_LABEL), 'G-dr', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_table(df_all, algs, labels, args=args)
if args.granular or args.all:
print()
print('best fixed encoding/baseline')
df_all = preprocess_df_granular(df, all_algos=True, sep_name=True)
if args.short:
algs = ['epsilon:0:mtr:neg10', 'regcbopt:c0:0.001:mtr:neg10',
'cover:4:psi:0.1:nounif:dr:neg10',
'bag:4:greedify:mtr:neg10b', 'epsilon:0.02:mtr:neg10']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['epsilon:0:mtr:neg10',
'regcb:c0:0.001:mtr:neg10', 'regcbopt:c0:0.001:mtr:neg10b',
'cover:16:psi:0.1:nounif:dr:neg10',
'bag:4:mtr:neg10', 'bag:4:greedify:mtr:neg10b', 'epsilon:0.02:mtr:neg10',
'cover:8:psi:0.1:ips:neg10', 'epsilon:0.02:nounifa:c0:1e-06:mtr:neg10']
labels = ['G', 'R', 'RO',
'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A'] # 'e-d'
print_table(df_all, algs, labels, args=args)
if args.granular_neg10:
print()
print('fixed -1/0, fixed baseline choice (01 for active)')
df_all = preprocess_df_granular(df, all_algos=True, sep_name=True)
algs = ['epsilon:0:mtr:neg10b', 'epsilon:0:dr:neg10b', 'cover:16:psi:{}:nounif:dr:neg10'.format(psi),
'bag:16:mtr:neg10b', 'bag:16:greedify:mtr:neg10b', 'epsilon:0.02:mtr:neg10',
'cover:16:psi:{}:dr:neg10'.format(psi), 'epsilon:1:nounifa:c0:1e-06:dr:01']
labels = ['G-{}'.format(MTR_LABEL), 'G-dr', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_table(df_all, algs, labels, args=args)
if args.granular_01:
print()
print('fixed 0/1, fixed baseline choice')
df_all = preprocess_df_granular(df, all_algos=True, sep_name=True)
algs = ['epsilon:0:mtr:01b', 'epsilon:0:dr:01b', 'cover:16:psi:{}:nounif:dr:01'.format(psi),
'bag:16:mtr:01b', 'bag:16:greedify:mtr:01b', 'epsilon:0.02:mtr:01',
'cover:16:psi:{}:dr:01'.format(psi), 'epsilon:1:nounifa:c0:1e-06:dr:01']
labels = ['G-{}'.format(MTR_LABEL), 'G-dr', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_table(df_all, algs, labels, args=args)
if args.granular_neg10_bopt or args.all:
print()
print('fixed -1/0, baseline optimized')
df_all = preprocess_df_granular(df, all_algos=True, sep_enc=True)
if args.short:
algs = [g_best, ro_best, cnu_best, bg_best, eg_best]
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = [g_best, r_best, ro_best, cnu_best,
b_best, bg_best, eg_best, cu_best, a_best]
labels = ['G', 'R', 'RO',
'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A'] # 'e-d'
for i in range(len(algs)):
algs[i] += ':neg10'
print_table(df_all, algs, labels, args=args)
if args.granular_name or args.all:
print()
print('fixed name', args.name)
assert args.name is not None, 'must specify --name'
name = base_name() + args.name
df_all = df.loc[df.name == name]
df_all = preprocess_df_granular(df_all, all_algos=True)
if args.short:
algs = [g_best, ro_best, cnu_best, bg_best, eg_best]
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = [g_best, r_best, ro_best, cnu_best,
b_best, bg_best, eg_best, cu_best, a_best]
labels = ['G', 'R', 'RO',
'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A'] # 'e-d'
print_table(df_all, algs, labels, args=args)
if args.granular_ds:
print()
print('fixed ds', args.granular_ds)
df_all = df.loc[df.ds == args.granular_ds]
df_all = preprocess_df_granular(df_all, all_algos=True, sep_name=True)
if args.short:
algs = ['epsilon:0:mtr', 'regcbopt:c0:0.001:mtr',
'cover:16:psi:{}:nounif:dr'.format(psi),
'bag:16:greedify:mtr', 'epsilon:0.02:mtr']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['epsilon:0:mtr', 'epsilon:0:dr',
'regcb:c0:0.001:mtr', 'regcbopt:c0:0.001:mtr',
'cover:16:psi:{}:nounif:dr'.format(psi),
'bag:16:mtr', 'bag:16:greedify:mtr', 'epsilon:0.02:mtr',
# 'cover:1:psi:{}:mtr'.format(psi),
'cover:16:psi:{}:dr'.format(psi), 'epsilon:0.02:nounifa:c0:1e-06:mtr']
labels = ['G-{}'.format(MTR_LABEL), 'G-dr', 'R', 'RO',
'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A'] # 'e-d'
print_loss_table_allnames(df_all, algs, labels, args=args)
if args.granular_ds_name:
print()
print('fixed ds', args.granular_ds, 'name', args.name)
assert args.name is not None, 'must specify --name'
name = base_name() + args.name
df_all = df.loc[df.name == name]
df_all = df_all.loc[df_all.ds == args.granular_ds_name]
df_all = preprocess_df_granular(df_all, all_algos=True)
if args.short:
algs = [g_best, ro_best, cnu_best, bg_best, eg_best]
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = [g_best, r_best, ro_best, cnu_best,
b_best, bg_best, eg_best, cu_best, a_best]
labels = ['G', 'R', 'RO',
'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A'] # 'e-d'
print_loss_table(df_all, algs, labels, args=args)
if args.avg_std_name:
print()
print('mean +- std, fixed name', args.name)
assert args.name is not None, 'must specify --name'
name = base_name() + args.name
df_all = df.loc[df.name == name]
df_all = preprocess_df_granular(df_all, all_algos=True)
if args.short:
algs = [g_best, ro_best, cnu_best, bg_best, eg_best]
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = [g_best, r_best, ro_best, cnu_best,
b_best, bg_best, eg_best, cu_best, a_best]
labels = ['G', 'R', 'RO',
'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A'] # 'e-d'
print_loss_table(df_all, algs, labels, args=args, stddev=True)
if args.bag_vs_greedy or args.all:
print()
print('bag/bag-g vs greedy')
df_all = preprocess_df_granular(df, all_algos=True, sep_name=True)
algs_row = ['epsilon:0:mtr:neg10b', 'epsilon:0:dr:neg10b']
labels_row = ['G-{}'.format(MTR_LABEL), 'G-dr']
bag_algs = ['bag:{}:mtr:01b', 'bag:{}:greedify:mtr:01b',
'bag:{}:mtr:neg10b', 'bag:{}:greedify:mtr:neg10b']
bag_labels = ['{}', '{}-g']
print('0/1 + b')
algs_col = [x.format(s) for s in ['4', '8', '16'] for x in bag_algs[:2]]
labels_col = [x.format(s) for s in ['4', '8', '16'] for x in bag_labels]
print_table_rect(df_all, algs_row, algs_col, labels_row, labels_col, args=args)
print_table(df_all, algs_col[:2], labels_col[:2], args=args)
print_table(df_all, algs_col[2:4], labels_col[2:4], args=args)
print_table(df_all, algs_col[4:], labels_col[4:], args=args)
print('-1/0 + b')
algs_col = [x.format(s) for s in ['4', '8', '16'] for x in bag_algs[2:]]
print_table_rect(df_all, algs_row, algs_col, labels_row, labels_col, args=args)
print_table(df_all, algs_col[:2], labels_col[:2], args=args)
print_table(df_all, algs_col[2:4], labels_col[2:4], args=args)
print_table(df_all, algs_col[4:], labels_col[4:], args=args)
if args.opt or args.all:
print()
print('optimize hyperparams, encoding/baseline')
df_all = preprocess_df(df)
if args.short:
algs = ['greedy', 'regcbopt', 'cover_nounif', 'bag_greedy', 'e_greedy']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['greedy', 'regcb', 'regcbopt', 'cover_nounif',
'bag', 'bag_greedy', 'e_greedy',
'cover', 'e_greedy_active']
labels = ['G', 'R', 'RO', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_table(df_all, algs, labels=labels, args=args)
if args.opt_neg10 or args.all:
print()
print('optimize hyperparams, encoding/baseline')
# df_all = df.loc[df.cb_type != 'ips']
df_all = preprocess_df(df, sep_enc=True, sep_b=(args.b is not None))
# df_all = df_all.loc[(df_all.algo != 'greedy:neg10') | (df_all.cb_type != 'ips')]
if args.short:
algs = ['greedy', 'regcbopt', 'cover_nounif', 'bag_greedy', 'e_greedy']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['greedy', 'regcb', 'regcbopt', 'cover_nounif',
'bag', 'bag_greedy', 'e_greedy',
'cover', 'e_greedy_active:01']
labels = ['G', 'R', 'RO', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
for i in range(len(algs)):
algs[i] += ':neg10'
if args.b is not None:
algs[i] += ':' + args.b
print_table(df_all, algs, labels=labels, args=args)
if args.opt_01 or args.all:
print()
print('optimize hyperparams, encoding/baseline')
# df_all = df.loc[df.cb_type != 'ips']
df_all = preprocess_df(df, sep_enc=True, sep_b=(args.b is not None))
# df_all = df_all.loc[(df_all.algo != 'greedy:neg10') | (df_all.cb_type != 'ips')]
if args.short:
algs = ['greedy', 'regcbopt', 'cover_nounif', 'bag_greedy', 'e_greedy']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['greedy', 'regcb', 'regcbopt', 'cover_nounif',
'bag', 'bag_greedy', 'e_greedy',
'cover', 'e_greedy_active:01']
labels = ['G', 'R', 'RO', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
for i in range(len(algs)):
algs[i] += ':01'
if args.b is not None:
algs[i] += ':' + args.b
print_table(df_all, algs, labels=labels, args=args)
if args.opt_name or args.all:
print()
print('optimize hyperparams', 'fixed name', args.name)
assert args.name is not None, 'must specify --name'
name = base_name() + args.name
df_all = df.loc[df.name == name]
# df_all = df_all.loc[df_all.cb_type != 'ips']
df_all = preprocess_df(df_all)
# df_all = df_all.loc[(df.algo != 'greedy') | (df.cb_type != 'ips')]
if args.short:
algs = ['greedy', 'regcbopt', 'cover_nounif', 'bag_greedy', 'e_greedy']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['greedy', 'regcb', 'regcbopt', 'cover_nounif',
'bag', 'bag_greedy', 'e_greedy',
'cover', 'e_greedy_active:01']
labels = ['G', 'R', 'RO', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_table(df_all, algs, labels=labels, args=args)
if False: # args.opt_01 or args.all:
print()
print('optimize hyperparams, encoding/baseline')
df_all = preprocess_df(df, sep_enc=True)
algs = ['greedy', 'cover_nounif',
'bag', 'bag_greedy', 'e_greedy',
'cover', 'e_greedy_active']
for i in range(len(algs)):
algs[i] += ':01'
labels = ['G', 'C-nu',
'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_table(df_all, algs, labels=labels, args=args)
if args.opt_algo or args.all:
print()
print('optimize hyperparams, fixed encoding/baseline')
df_all = preprocess_df(df, sep_name=args.sep_name or args.name,
sep_b=args.sep_b or args.b,
sep_enc=args.sep_enc or args.enc,
sep_reduction=args.sep_cb_type or args.cb_type)
algo = args.algo
algs = [algo]
labels = None
if args.sep_cb_type or args.cb_type:
cb_types = [args.cb_type] if args.cb_type else ['ips', 'dr', 'mtr']
if args.skip_ips:
cb_types.remove('ips')
algs = [a + ':' + red for a in algs for red in cb_types]
if args.sep_cb_type:
labels = [s.replace('mtr', MTR_LABEL) for s in cb_types]
if args.sep_name or args.name:
names = [args.name] if args.name else ['01', '01b', 'neg10', 'neg10b']
algs = [a + ':' + name for a in algs for name in names]
if args.sep_name:
labels = [s.replace('neg', '-') for s in names]
if args.sep_enc or args.enc:
encs = [args.enc] if args.enc else ['01', 'neg10']
algs = [a + ':' + enc for a in algs for enc in encs]
if args.sep_enc:
labels = [s.replace('neg', '-') for s in encs]
if args.sep_b or args.b:
bs = [args.b] if args.b else ['b', 'nb']
algs = [a + ':' + b for a in algs for b in bs]
if args.sep_b:
labels = bs
print_table(df_all, algs, labels=labels, args=args)
if args.comp_enc:
print()
print('compare encodings, no baseline')
assert args.min_size is None
df_big = load_names(names, min_actions=args.min_actions, min_size=10000, use_cs=args.use_cs)
df_all = preprocess_df(df, sep_b=True, sep_enc=True, sep_reduction=True)
df_all_big = preprocess_df(df_big, sep_b=True, sep_enc=True, sep_reduction=True)
if args.short:
algs = ['greedy:mtr:{enc}:nb', 'regcbopt:mtr:{enc}:nb',
'cover_nounif:dr:{enc}:nb', 'bag_greedy:mtr:{enc}:nb',
'e_greedy:mtr:{enc}:nb']
labels = ['G', 'RO', 'C-nu', 'B-g', r'$\epsilon$G']
else:
algs = ['greedy:mtr:{enc}:nb', 'greedy:dr:{enc}:nb', 'regcb:mtr:{enc}:nb', 'regcbopt:mtr:{enc}:nb',
'cover_nounif:dr:{enc}:nb', 'bag:mtr:{enc}:nb', 'bag_greedy:mtr:{enc}:nb',
'e_greedy:mtr:{enc}:nb', 'cover:dr:{enc}:nb', 'e_greedy_active:mtr:{enc}:nb']
labels = ['G-iwr', 'G-dr', 'R', 'RO', 'C-nu', 'B', 'B-g', r'$\epsilon$G', 'C-u', 'A']
print_enc_table(df_all, df_all_big, algs, labels, args=args)
|
992,630 | 9eb5b0a6d577c8a83334acc292eed12d11c2fe38 | from openpyxl import Workbook
import time
import pandas as pd
import operator
from alphaseekerclass import *
book = Workbook()
sheet = book.active
SAVEBOOK = "idk.xlsx"
SPECIFICSTOCK = input("do you want to look at specific stocks (1) or a huge list of stocks (2): ")
if(SPECIFICSTOCK != "1"):
debug = input("do you want to use debug mode (recommended for testing the program unless you want to wait hours) Y/N: ")
def chooseStock(row):
symbol = input("enter a stock press (1) if completed ")
if(symbol == "1"):
return True, row
else:
fullInfo = Stock(symbol)
chooseStockSingle, y_predict, currentReturn = fullInfo.get_predicted()
if(isinstance(chooseStockSingle,bool) == False):
excelWrite(symbol, float(chooseStockSingle),float(y_predict), float(currentReturn), row)
return False, row
else:
return False, row - 1
def notddos(overHeat):
if(overHeat >= 25):
print("overheat check")
time.sleep(-time.time()%180)
return 0
else:
overHeat += 1
return overHeat
def parserFunction(row,stockList):
errorCount = 0
overHeat = 0
howmany = len(stockList)
for symbol in stockList:
print("-----------------on %s, %d to go------------------" %(symbol,howmany))
fullInfo = Stock(symbol)
#overHeat = notddos(overHeat)
#try:
chooseStockSingle, y_predict, currentReturn = fullInfo.get_predicted()
errorCount = 0
if(isinstance(chooseStockSingle,bool) == False):
excelWrite(symbol, float(chooseStockSingle),float(y_predict), float(currentReturn), row)
row += 1
else:
pass
# except:
# print("--------------ERROR 404--------------------")
# errorCount += 1
# if(errorCount > 5):
# time.sleep(-time.time()%75)
# try:
# chooseStockSingle, y_predict, currentReturn = fullInfo.get_predicted()
# errorCount = 0
# if(isinstance(chooseStockSingle,bool) == False):
# excelWrite(symbol, float(chooseStockSingle),float(y_predict), float(currentReturn), row)
# row += 1
# print(symbol, "good")
# else:
# pass
# except:
# print(symbol, "pass")
# pass
howmany = howmany - 1
def excelToTicker():
workbook = pd.read_excel('nasdaq_screener_1619061711441.xlsx')
#stockListDirty = workbook["Symbol"].values
stockList = [x.replace("^","-") for x in workbook["Symbol"].values if isinstance(x,bool) == False]
return stockList
def excelWrite(symbol, getPred,y_predict,currentReturn, row):
sheet.cell(row, 1).value = symbol
sheet.cell(row,2).value = getPred
sheet.cell(row,3).value = y_predict
sheet.cell(row,4).value = currentReturn
def writeExcelAxis():
sheet.cell(1,2).value = "Percent return"
sheet.cell(1,1).value = "Symbol"
sheet.cell(1,3).value = "y_predict"
sheet.cell(1,4).value = "Current Price"
def orderString(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
def analyzeExcel():
workbookAnalyze = pd.read_excel(SAVEBOOK)
gainAnalyzer = workbookAnalyze["Percent return"].values
symbolAnalyzer = workbookAnalyze["Symbol"].values
dictionary = {}
countSymbol = 0
for symbol in symbolAnalyzer:
dictionary[symbol] = gainAnalyzer[countSymbol]
countSymbol += 1
sorted_Dictionary = sorted(dictionary.items(),key=lambda x: x[1], reverse=True)
count = 1
for i in sorted_Dictionary:
print("the %s gain is %s" %(orderString(count),i))
if(count > 20):
break
else:
count += 1
def main():
writeExcelAxis()
row = 2
if(SPECIFICSTOCK == "1"):
correct = False
while not correct:
correct, row = chooseStock(row)
row += 1
else:
if(debug == "N"):
stockList = excelToTicker()
else:
stockList = ["GME","ILMN","AAPL","RBLX"]
parserFunction(2,stockList)
book.save(SAVEBOOK)
analyzeExcel()
main() |
992,631 | 696af1dc104b0d6346b038127cf4e2e893c8358b | import cv2
import numpy as np
import time
from sklearn.cluster import KMeans
def give_shape(cap, arena, w_pos, r):
ret, frame = cap.read()
cv2.imwrite("new_a.jpg", frame)
frame = frame[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]
shape = frame.shape
print(shape)
y = int(w_pos / 5)
x = w_pos % 5
print(y, x)
nr = [(shape[0]/5) * x, (shape[1]/5) * y, (shape[0]/5) * (x+1), (shape[1]/5) * (y+1)]
print(nr)
frame = frame[int(nr[1]):int(nr[3]), int(nr[0]):int(nr[2])]
img_size = frame.shape
X = frame.reshape(img_size[0] * img_size[1], img_size[2])
km = KMeans(n_clusters=12)
km.fit(X)
X_compressed = km.cluster_centers_[km.labels_]
X_compressed = np.clip(X_compressed.astype('uint8'), 0, 255)
new_img = X_compressed.reshape(img_size[0], img_size[1], img_size[2])
red_range = np.load("Red_Range.npy")
yellow_range = np.load("Yellow_Range.npy")
maskBGR = cv2.inRange(new_img, red_range[0], red_range[1])
kernel = np.ones((5, 5), np.uint8)
maskBGR = cv2.erode(maskBGR, kernel, iterations=1)
cv2.imshow("kernel", maskBGR)
cv2.waitKey(0)
cv2.destroyAllWindows()
contours, hierarchy = cv2.findContours(maskBGR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 100:
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h
extent = float(area) / rect_area
# red circle is 1 red square is 2 yellow circle is 3 and yellow square is 4
if extent < 0.8: # circle
num = 1
elif extent >= 0.8: # square
num = 2
maskBGR = cv2.inRange(new_img, yellow_range[0], yellow_range[1])
kernel = np.ones((5, 5), np.uint8)
maskBGR = cv2.erode(maskBGR, kernel, iterations=1)
cv2.imshow("kernel", maskBGR)
cv2.waitKey(0)
cv2.destroyAllWindows()
contours, hierarchy = cv2.findContours(maskBGR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 100:
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h
extent = float(area) / rect_area
# red circle is 1 red square is 2 yellow circle is 3 and yellow square is 4
if extent < 0.8: # circle
num = 3
elif extent >= 0.8: # square
num = 4
print(num)
return num
|
992,632 | f77c45e86086453ca41b31e0efdd6d7ac238c2fb | # -*- coding: utf-8 -*-
# django-read-only-admin
# tests/templatetags/test_read_only_admin_tags.py
from typing import List
from django.test import TestCase
from django.http import HttpRequest
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.template import Context, RequestContext
from read_only_admin.conf import settings
from read_only_admin.templatetags.read_only_admin_tags import (
unescape,
readonly_submit_row,
)
__all__: List[str] = [
"UnescapeTemplatetagTest",
"ReadonlySubmitRowTemplatetagTest",
]
User = get_user_model()
class UnescapeTemplatetagTest(TestCase):
"""Unescape templatetag tests."""
def test_unescape(self) -> None:
"""Test templatetag."""
escaped: str = """<script type="text/javascript">alert('PWND & HACKD!!1')</script>""" # noqa: E501
unescaped: str = """<script type="text/javascript">alert('PWND & HACKD!!1')</script>""" # noqa: E501
self.assertEqual(first=unescape(value=escaped), second=unescaped)
def test_unescape__single_quote(self) -> None:
"""Test templatetag for single quote char."""
escaped: str = "'"
unescaped: str = "'"
self.assertEqual(first=unescape(value=escaped), second=unescaped)
def test_unescape__double_quote(self) -> None:
"""Test templatetag for double quote char."""
escaped: str = """
unescaped: str = '"'
self.assertEqual(first=unescape(value=escaped), second=unescaped)
def test_unescape__less_than(self) -> None:
"""Test templatetag for less than char."""
escaped: str = "<"
unescaped: str = "<"
self.assertEqual(first=unescape(value=escaped), second=unescaped)
def test_unescape__great_than(self) -> None:
"""Test templatetag for great than char."""
escaped: str = ">"
unescaped: str = ">"
self.assertEqual(first=unescape(value=escaped), second=unescaped)
def test_unescape__ampersand(self) -> None:
"""Test templatetag for ampersand char."""
escaped: str = "&"
unescaped: str = "&"
self.assertEqual(first=unescape(value=escaped), second=unescaped)
class ReadonlySubmitRowTemplatetagTest(TestCase):
"""Read only submit row templatetag tests."""
@classmethod
def setUpTestData(cls) -> None:
"""Set up non-modified objects used by all test methods."""
user = User.objects.create(
username="test",
email="test@example.com",
password=User.objects.make_random_password(),
is_staff=True,
)
user.user_permissions.add(*list(Permission.objects.all()))
user.save()
def test_readonly_submit_row__return_context(self) -> None:
"""Test templatetag return context."""
user = User.objects.first()
request: HttpRequest = HttpRequest()
request.user = user # type: ignore
context: RequestContext = RequestContext(
request=request,
dict_={
"user": user,
"add": True,
"change": True,
"is_popup": False,
"save_as": True,
"has_add_permission": True,
"has_change_permission": True,
"has_view_permission": True,
"has_editable_inline_admin_formsets": False,
"has_delete_permission": True,
"opts": "auth.user",
"request": request,
},
)
result: Context = readonly_submit_row(context=context)
self.assertIsInstance(obj=result, cls=Context)
def test_readonly_submit_row(self) -> None:
"""Test templatetag."""
user = User.objects.first()
request: HttpRequest = HttpRequest()
request.user = user # type: ignore
context: RequestContext = RequestContext(
request=request,
dict_={
"user": user,
"add": True,
"change": True,
"is_popup": False,
"save_as": True,
"has_add_permission": True,
"has_change_permission": True,
"has_view_permission": True,
"has_editable_inline_admin_formsets": False,
"has_delete_permission": True,
"opts": "auth.user",
"request": request,
},
)
result: Context = readonly_submit_row(context=context)
self.assertFalse(expr=result["show_delete_link"])
self.assertFalse(expr=result["show_save_and_add_another"])
self.assertFalse(expr=result["show_save_and_continue"])
self.assertFalse(expr=result["show_save"])
def test_readonly_submit_row__for_superuser(self) -> None:
"""Test templatetag for superuser."""
user = User.objects.first()
user.is_superuser = True # type: ignore
user.save(update_fields=["is_superuser"]) # type: ignore
request: HttpRequest = HttpRequest()
request.user = user # type: ignore
context: RequestContext = RequestContext(
request=request,
dict_={
"user": user,
"add": True,
"change": True,
"is_popup": False,
"save_as": True,
"has_add_permission": True,
"has_change_permission": True,
"has_view_permission": True,
"has_editable_inline_admin_formsets": False,
"has_delete_permission": True,
"opts": "auth.user",
"request": request,
},
)
result: Context = readonly_submit_row(context=context)
self.assertTrue(expr=result["show_delete_link"])
self.assertTrue(expr=result["show_save_and_add_another"])
self.assertTrue(expr=result["show_save_and_continue"])
self.assertTrue(expr=result["show_save"])
def test_readonly_submit_row__without__read_only_permissions(self) -> None:
"""Test templatetag without read only permissions."""
Permission.objects.filter(
codename__startswith=settings.READ_ONLY_ADMIN_PERMISSION_PREFIX
).delete()
user = User.objects.first()
request: HttpRequest = HttpRequest()
request.user = user # type: ignore
context: RequestContext = RequestContext(
request=request,
dict_={
"user": user,
"add": True,
"change": True,
"is_popup": False,
"save_as": True,
"has_add_permission": True,
"has_change_permission": True,
"has_view_permission": True,
"has_editable_inline_admin_formsets": False,
"has_delete_permission": True,
"opts": "auth.user",
"request": request,
},
)
result: Context = readonly_submit_row(context=context)
self.assertTrue(expr=result["show_delete_link"])
self.assertTrue(expr=result["show_save_and_add_another"])
self.assertTrue(expr=result["show_save_and_continue"])
self.assertTrue(expr=result["show_save"])
def test_readonly_submit_row__without__read_only_permissions__for_superuser(
self,
) -> None:
"""Test templatetag without read only permissions for superuser."""
user = User.objects.first()
user.is_superuser = True # type: ignore
user.save(update_fields=["is_superuser"]) # type: ignore
request: HttpRequest = HttpRequest()
request.user = user # type: ignore
context: RequestContext = RequestContext(
request=request,
dict_={
"user": user,
"add": True,
"change": True,
"is_popup": False,
"save_as": True,
"has_add_permission": True,
"has_change_permission": True,
"has_view_permission": True,
"has_editable_inline_admin_formsets": False,
"has_delete_permission": True,
"opts": "auth.user",
"request": request,
},
)
result: Context = readonly_submit_row(context=context)
self.assertTrue(expr=result["show_delete_link"])
self.assertTrue(expr=result["show_save_and_add_another"])
self.assertTrue(expr=result["show_save_and_continue"])
self.assertTrue(expr=result["show_save"])
|
992,633 | a0ddbe98f3818573f1f17734873e5ce25cc7d4aa | """
Script illustrating the following points in graph-tool
1- Graph generator
2- Graph view
3- Intervative drawing of the graph
"""
import matplotlib.pyplot as plt
from graph_tool.all import *
import numpy as np;
from gi.repository import Gtk,Gdk,GdkPixbuf,GObject;
plt.switch_backend('GTK3Cairo')
def coordinate_in_lattice(v,n,m):
"""
compute the coordinate of the point v in the lattice of dimension (n,m
:param v: number of the vertex
:param n: rows size
:param m: columns size
:return: return a tuple (x,y) for the coordinate
"""
y=int(v/n);
x=(v%n);
return (x,y)
def update_state():
global g
global win
global comp
"choose a random edge"
edges=list(g.edges())
compo,hist=label_components(g)
if(len(np.unique(compo.a))>=10):
return False
edge=edges[np.random.randint(0,g.num_edges())];
#removing the edge
g.remove_edge(edge)
win.graph.regenerate_surface();
win.graph.queue_draw();
plt.pause(0.5)
return True
if __name__ == '__main__':
#generating the graph
n,m=20,20;
dx,dy=0.5,0.5
print("graph generation")
g=lattice([n,m])
pos=g.new_vertex_property("vector<double>")
for v in g.vertices():
x,y=coordinate_in_lattice(int(v),n,m)
pos[v]=np.array([dx*x,dy*y])
compo,hist=label_components(g)
win=GraphWindow(g,pos=pos,geometry=(500,500))
cid=GObject.idle_add(update_state)
win.connect("delete_event",Gtk.main_quit)
win.show_all()
Gtk.main()
|
992,634 | db970ef9ceb7a617f7e671575d579f7b56b49d0e | def num(a,b):
c = a + b
return c
ret = num(1,2)
print(ret)
|
992,635 | bf1b2e202b1ab4f0ed7f5540eb5b5443c4b6454e | import numpy as np
from scipy.spatial.distance import cdist
def probability(J, nc, pos, vel):
amins = np.argsort(cdist(pos, pos) + 1e3 * np.eye(len(pos)), axis=1)[:,:nc]
esum = np.exp(J / 2 * np.sum(np.dot(vel, np.swapaxes(vel[amins], 1,2)), axis=1))
return esum / np.sum(esum)
def entropy(J, nc, pos, vel):
prob = probability(J, nc, pos, vel)
return -np.sum(prob * np.log(prob))
def fisher(J, nc, pos, vel, h=0.0025):
prob = probability(J, nc, pos, vel)
dprob = (probability(J + h, nc, pos, vel) - probability(J - h, nc, pos, vel)) / (2.0 * h)
return np.sum(np.square(dprob)) |
992,636 | 67ff3be7bb3af26dacde649fbeeb1dd4482d0c3b | import unittest
from fileconversions.helpers import mimetype
class TestMimetypes(unittest.TestCase):
def test_pdf_mimetype(self):
self.assertEquals(
mimetype('hello.pdf'),
'application/pdf'
)
def test_jpeg_mimetype(self):
self.assertEquals(
mimetype('hello.jpeg'),
'image/jpeg'
)
self.assertEquals(
mimetype('hello.jpg'),
'image/jpeg'
)
def test_png_mimetype(self):
self.assertEquals(
mimetype('hello.png'),
'image/png'
)
def test_gif_mimetype(self):
self.assertEquals(
mimetype('hello.gif'),
'image/gif'
)
def test_tiff_mimetype(self):
self.assertEquals(
mimetype('hello.tiff'),
'image/tiff'
)
def test_text_mimetype(self):
self.assertEquals(
mimetype('hello.txt'),
'text/plain'
)
def test_docx_mimetype(self):
self.assertEquals(
mimetype('hello.docx'),
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)
def test_doc_mimetype(self):
self.assertEquals(
mimetype('hello.doc'),
'application/msword'
)
def test_pptx_mimetype(self):
self.assertEquals(
mimetype('hello.pptx'),
'application/vnd.openxmlformats-officedocument.presentationml.presentation'
)
def text_ppt_mimetype(self):
self.assertEquals(
mimetype('hello.ppt'),
'application/vnd.ms-powerpoint'
)
def text_odt_mimetype(self):
self.assertEquals(
mimetype('hello.odt'),
'application/vnd.oasis.opendocument.text'
)
def text_rtf_mimetype(self):
self.assertEquals(
mimetype('hello.rtf'),
'application/rtf'
)
|
992,637 | 0d137592eb75516f59b2bd0cd908b5ced3dc20b1 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 17 16:49:43 2017
@author: xat
"""
from util import read_file
class RuleDetector:
def __init__(self, stop_words='../data/stop_words.txt'):
self.stop_words = self.read_stop_words(stop_words)
def get_line_feature(self, line):
if len(line) < 2:
return [], []
origin_line = []
stem_line = []
for i in range(len(line)-1):
curr_item = line[i]
next_item = line[i+1]
if curr_item[1] == 'N':
if next_item[1] == 'A' or next_item[1] == 'V':
origin_line.append((curr_item[0], next_item[0]))
stem_line.append((curr_item[2], next_item[2]))
elif curr_item[1] == 'A':
if next_item[1] == 'A' or next_item[1] == 'N' or next_item[1] == 'V':
origin_line.append((curr_item[0], next_item[0]))
stem_line.append((curr_item[2], next_item[2]))
elif curr_item[1] == 'D':
if next_item[1] == 'A' or next_item[1] == 'V':
origin_line.append((curr_item[0], next_item[0]))
stem_line.append((curr_item[2], next_item[2]))
elif curr_item[1] == 'E':
if next_item[1] == 'E':
origin_line.append((curr_item[0], next_item[0]))
stem_line.append((curr_item[2], next_item[2]))
return origin_line, stem_line
def read_stop_words(self, file_name):
result = set()
with open(file_name, encoding='utf-8') as f:
for line in f:
line = line.strip()
result.add(line)
return result
def filter_stop_words(self, line):
return list(filter(lambda item:item[0] not in self.stop_words, line))
def get_features(self, tagged_sents):
origin_result = []
stem_result = []
for line in tagged_sents:
new_line = self.filter_stop_words(line)
origin_line, stem_line = self.get_line_feature(new_line)
origin_result.append(origin_line)
stem_result.append(stem_line)
return origin_result, stem_result
def write_features(self, data, file_name):
with open(file_name, 'wt', encoding='utf-8') as f:
for line in data:
if line:
s = ''
for item in line:
s += item[0] + '-' + item[1] + ' '
f.write(s + '\n')
else:
f.write('\n')
def write_word_and_features(self, sents, feature_sents, is_origin ,file_name):
if is_origin:
index = 0
else:
index = 2
with open(file_name, 'wt', encoding='utf-8') as f:
for i in range(len(sents)):
if sents[i]:
text = ' '.join([item[index] for item in sents[i]])
if feature_sents[i]:
text += ' | ' + ' '.join([item[0] + '-' + item[1] for item in feature_sents[i]])
f.write(text + '\n')
if __name__ == '__main__':
neg_data = read_file('../data/negative/final_neg_stem.txt')
pos_data = read_file('../data/negative/final_pos_stem.txt')
rd = RuleDetector()
origin_result, stem_result = rd.get_features(neg_data)
rd.write_features(origin_result, '../data/rule_neg_origin.txt')
rd.write_features(stem_result, '../data/rule_neg_stem.txt')
rd.write_word_and_features(neg_data, origin_result, True, '../data/rule_word_origin_neg.txt')
rd.write_word_and_features(neg_data, stem_result, False, '../data/rule_word_stem_neg.txt')
origin_result, stem_result = rd.get_features(pos_data)
rd.write_features(origin_result, '../data/rule_pos_origin.txt')
rd.write_features(stem_result, '../data/rule_pos_stem.txt')
rd.write_word_and_features(pos_data, origin_result, True, '../data/rule_word_origin_pos.txt')
rd.write_word_and_features(pos_data, stem_result, False, '../data/rule_word_stem_pos.txt')
#
#
#
|
992,638 | d05ed1b392d93ccab4b1f0e5cd1e9d17782ae345 | num1 = 12
num2 = 9
print("num1 = {}\n num2 = {}".format(num1,num2))
num1,num2 = num2,num1
print("num1 = {}\n num2 = {}".format(num1,num2))
|
992,639 | 036c164903428a1dc910f9e28965b944e9bbaea0 | def trouble_sort(L):
done=False
length=len(L)
while (done==False):
done=True
for i in range(0,length-2):
if (L[i]>L[i+2]):
done=False
l_i=L[i]
L[i]=L[i+2]
L[i+2]=l_i
return L
T=int(input())
for i in range(T):
list_length=int(input())
my_inp=list(map(int,input().split(' ')))
sorted_trouble=trouble_sort(my_inp)
sorted_list=sorted(my_inp)
flag=False
index=0
j=0
while ( (not flag) and j<len(my_inp)):
if (sorted_trouble[j]!=sorted_list[j]):
index=j
flag=True
j+=1
if (flag==False):
print("Case #{}: OK".format(i+1))
else:
print("Case #{}: {}".format(i+1,index))
|
992,640 | b5e9bb8c6b91b149ecd4fc702e22bb62bf986d27 | from boolean import AND, FALSE, NOT, OR, TRUE
def test_TRUE():
assert TRUE("lefty")("righty") == "lefty"
def test_FALSE():
assert FALSE("lefty")("righty") == "righty"
def test_NOT_inverts_TRUE():
assert NOT(TRUE) == FALSE
def test_NOT_inverts_FALSE():
assert NOT(FALSE) == TRUE
def test_AND_TT():
assert AND(TRUE)(TRUE) == TRUE
def test_AND_FF():
assert AND(FALSE)(FALSE) == FALSE
def test_AND_TF():
assert AND(TRUE)(FALSE) == FALSE
def test_AND_FT():
assert AND(FALSE)(TRUE) == FALSE
def test_OR_TT():
assert OR(TRUE)(TRUE) == TRUE
def test_OR_FF():
assert OR(FALSE)(FALSE) == FALSE
def test_OR_TF():
assert OR(TRUE)(FALSE) == TRUE
def test_OR_FT():
assert OR(FALSE)(TRUE) == TRUE
|
992,641 | e0f607504d214a9a39e3d367e79008fd6bf6452a | import torch
from torch import nn
class VSC(nn.Module):
def __init__(self, latent_dim, c):
super(VSC, self).__init__()
self.latent_dim = latent_dim
self.c = c
# Initial channels 3 > 128 > 64 > 32
# Initial filters 3 > 3 > 3
# First change 3 > 32 > 64 > 128
# Filters 3 > 3 > 5
# Second change 3 > 32 > 64 > 128 > 256
# Filters 3 > 3 > 5 > 5
# Encoder
# self.encoder_conv1 = self.getConvolutionLayer(3, 128)
self.encoder_conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
# self.encoder_conv2 = self.getConvolutionLayer(128, 64)
self.encoder_conv2 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
# self.encoder_conv3 = self.getConvolutionLayer(64, 32)
self.encoder_conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
# self.encoder_conv4 = nn.Sequential(
# nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=1, padding=2),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=2)
# )
self.flatten = nn.Flatten()
self.encoder_fc1 = nn.Linear(4608, self.latent_dim)
self.encoder_fc2 = nn.Linear(4608, self.latent_dim)
self.encoder_fc3 = nn.Linear(4608, self.latent_dim)
self.encoder_sigmoid = nn.Sigmoid()
self.reparam_sigmoid = nn.Sigmoid()
# Decoder
self.decoder_fc1 = nn.Sequential(
nn.Linear(self.latent_dim, 4608),
nn.ReLU()
)
# Reshape to 32x12x12
self.decoder_upsampler1 = nn.Upsample(scale_factor=(2, 2), mode='nearest')
self.decoder_deconv1 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=(2, 2), mode='nearest')
)
# 48x48x64
self.decoder_deconv2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=(2, 2), mode='nearest')
)
# self.decoder_deconv3 = nn.Sequential(
# nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),
# nn.ReLU(),
# nn.Upsample(scale_factor=(2, 2), mode='nearest')
# )
self.decoder_conv1 = nn.Conv2d(in_channels=128, out_channels=3, kernel_size=3, stride=1, padding=1)
# 96x96x3
def encode(self, x):
x = self.encoder_conv1(x)
x = self.encoder_conv2(x)
x = self.encoder_conv3(x)
x = self.flatten(x)
mu = self.encoder_fc1(x)
sigma = self.encoder_fc2(x)
gamma = self.encoder_fc3(x)
gamma = self.encoder_sigmoid(gamma)
return mu, sigma, gamma
def reparameterize(self, mu, logvar, gamma):
std = torch.exp(0.5 * logvar)
# Keeps shape, samples from normal dist with mean 0 and variance 1
eps = torch.randn_like(std)
# Uniform dist
eta = torch.rand_like(std)
slab = self.reparam_sigmoid(self.c * (eta - 1 + gamma))
return slab * (mu + eps * std)
def decode(self, z):
z = self.decoder_fc1(z)
z = self.decoder_upsampler1(z.view(-1, 32, 12, 12))
z = self.decoder_deconv1(z)
z = self.decoder_deconv2(z)
recon = self.decoder_conv1(z)
return recon
def forward(self, x):
mu, logvar, gamma = self.encode(x)
z = self.reparameterize(mu, logvar, gamma)
return self.decode(z), mu, logvar, gamma
def update_c(self, c):
self.c = c
# Gamma = Spike
def loss_function(recon_x, x, mu, logvar, gamma, alpha=0.5, beta=1):
alpha = torch.tensor(alpha)
gamma = torch.clamp(gamma, 1e-6, 1 - 1e-6)
mse = torch.mean(torch.sum((x - recon_x).pow(2), dim=(1, 2, 3)))
slab = torch.sum((0.5 * gamma) * (1 + logvar - mu.pow(2) - logvar.exp()))
spike_a = (1 - gamma) * (torch.log(1 - alpha) - torch.log(1 - gamma))
spike_b = gamma * (torch.log(alpha) - torch.log(gamma))
spike = torch.sum(spike_a + spike_b)
slab = torch.sum(slab)
kld = -1 * (spike + slab)
loss = mse + kld * beta
return loss, mse, kld, -slab, -spike
|
992,642 | 618682c6b430781eb6637499fd0a80ff5cc71e63 | #coding=utf-8
# 请在此添加代码,使用lambda来创建匿名函数,能够判断输入的两个数值的大小,
#********** Begin *********#
MAXIMUM=lambda a,b:a if a>b else b
MINIMUM=lambda a,b:a if a<b else b
#********** End **********#
# 输入两个正整数
a = int(input())
b = int(input())
# 输出较大的值和较小的值
print('较大的值是:%d' % MAXIMUM(a,b))
print('较小的值是:%d' % MINIMUM(a,b))
|
992,643 | 038f6c2c759e0be9b5a4bb3f90097e598a363e0c | # This file is the log for the in-situ sensors placed for iScape project
# Smart Citizen Kit was placed in UCD
# The file is split into three standardized json files for O3, NO2 and O3
# Date format is transformed to ISO-8601 format
# Value of pollutant is extracted depending on recommendation from iScape Forum
import csv
import json
import pandas as pd
def display():
column_names = []
column_uom = []
count = 0
time = ""
file_path = "5262_PROCESSED.csv"
file_path_OGC_CO = "CO.json"
file_path_OGC_O3 = "O3.json"
file_path_OGC_NO2 = "NO2.json"
CO_file = open(file_path_OGC_CO, "w")
O3_file = open(file_path_OGC_O3, "w")
NO2_file = open(file_path_OGC_NO2, "w")
column_title = pd.read_csv(file_path, nrows=1).columns.tolist()
column_count = len(column_title)
column_names.append("time")
column_uom.append("ISO 8601")
for uom_count in range (1 ,column_count):
uom_start_indx = str(column_title[uom_count]).find('_')
uom_end_indx = (str(column_title[uom_count]).__len__())
column_names.append((column_title[uom_count][0:uom_start_indx]))
with open(file_path,'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
co_file_data = []
no2_file_data = []
o3_file_data = []
for row in reader:
if count == 0:
count = 1
else:
for data_count in range(0, column_count):
if(data_count == 0 ):
parsedDate = row[0].replace(" ", 'T')
time = parsedDate.replace("+00:00", 'Z')
else: # Value of pollutant is extracted depending on recommendation from iScape Forum
if ((("OVL_0-30-50") in str(column_title[data_count])) and not (
("FILTER") in str(column_title[data_count])) and not (
("GB") in str(column_title[data_count]))):
value = row[data_count]
# json_file_data.append(
# {column_names[data_count]: {"value": value, "uom": column_uom[data_count]}})
if(("CO") in str(column_title[data_count])):
co_file_data.append({"value": value, "uom": "ppm",column_names[0]: {"instant": time}})
elif(("NO2") in str(column_title[data_count])):
no2_file_data.append({"value": value, "uom": "ppb",column_names[0]: {"instant": time}})
elif(("O3") in str(column_title[data_count])):
o3_file_data.append(
{"value": value, "uom": "ppb",column_names[0]: {"instant": time}})
CO_DATA = {"CO":co_file_data}
NO2_DATA = {"NO2":no2_file_data}
O3_DATA = {"O3": o3_file_data}
CO_file.write(str(json.dumps(CO_DATA, sort_keys=True, indent=4, ensure_ascii=False)))
NO2_file.write(str(json.dumps(NO2_DATA, sort_keys=True, indent=4, ensure_ascii=False)))
O3_file.write(str(json.dumps(O3_DATA, sort_keys=True, indent=4, ensure_ascii=False)))
return
if __name__ == '__main__':
display() |
992,644 | 2db9980567a6952ddbc5da57b31b2a8d5466ce2a | # Generated by Django 3.0.4 on 2020-04-29 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ATMStatus', '0009_auto_20200429_1024'),
]
operations = [
migrations.AlterField(
model_name='atmdetails',
name='branch_name',
field=models.CharField(max_length=100),
),
]
|
992,645 | c54dde9731332200c9767e6ce8e4eaf02714bacf | from django.db import models
from django.contrib.gis.db import models as gismodels
class Country(gismodels.Model):
"""
Model to represent countries.
"""
isocode = gismodels.CharField(max_length=2)
name = gismodels.CharField(max_length=255)
geometry = gismodels.MultiPolygonField(srid=4326)
objects = gismodels.GeoManager()
def __unicode__(self):
return '%s' % (self.name)
class Animal(models.Model):
"""
Model to represent animals.
"""
name = models.CharField(max_length=255)
image = models.ImageField(upload_to='animals.images')
def __unicode__(self):
return '%s' % (self.name)
def image_url(self):
return u'<img src="%s" alt="%s" width="80"></img>' % (self.image.url,
self.name)
image_url.allow_tags = True
class Meta:
ordering = ['name']
class Sighting(gismodels.Model):
"""
Model to represent sightings.
"""
RATE_CHOICES = (
(1, '*'),
(2, '**'),
(3, '***'),
)
animal = gismodels.ForeignKey(Animal)
date = gismodels.DateTimeField()
description = gismodels.TextField()
rate = gismodels.IntegerField(choices=RATE_CHOICES)
geometry = gismodels.PointField(srid=4326)
objects = gismodels.GeoManager()
def __unicode__(self):
return '%s' % (self.date)
# recipe 2
@property
def date_formatted(self):
return self.date.strftime('%m/%d/%Y')
@property
def animal_name(self):
return self.animal.name
@property
def animal_image_url(self):
return self.animal.image_url()
@property
def country_name(self):
country = Country.objects.filter(geometry__contains=self.geometry)[0]
return country.name
class Meta:
ordering = ['date']
|
992,646 | cfe7bcf6f40a60f48df1b5a359439227ae9121a2 | import os
import wave
from multiprocessing import Process
import pyaudio
class Audio:
def __init__(self, video_path):
audio_path = os.path.splitext(video_path)[0] + ".wav"
if not os.path.exists(audio_path):
os.system("ffmpeg -i " + video_path + " -b:a 128k " + audio_path)
self.audio_thread = Process(target=self.playAudioThread, args=(audio_path,))
self.audio_thread.daemon = True
def playAudioThread(self, audio_path):
chunk = 1024
wf = wave.open(audio_path, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
while True:
audio_data = wf.readframes(chunk)
if audio_data == "": break;
stream.write(audio_data)
def start(self):
self.audio_thread.start()
|
992,647 | beaecd09d4643958f4abb8635f50f051b06890ff | # -*- coding: utf-8 -*-
"""\
Overset simulation interface
-----------------------------
"""
import numpy as np
from mpi4py import MPI
from .. import tioga
from .par_printer import ParallelPrinter
from .par_timer import ParTimer
class OversetSimulation:
"""Representation of an overset simulation"""
def __init__(self, comm):
"""
Args:
comm: MPI communicator instance
"""
#: World communicator instance
self.comm = comm
#: Parallel printer utility
self.printer = ParallelPrinter(comm)
#: TIOGA overset connectivity instance
self.tioga = tioga.get_instance()
self.tioga.set_communicator(comm)
#: List of solvers active in this overset simulation
self.solvers = []
#: Flag indicating whether an AMR solver is active in this simulation
self.has_amr = False
#: Flag indicating whether an unstructured solver is active in this simulation
self.has_unstructured = False
#: Interval for overset updates during timestepping
self.overset_update_interval = 100000000
#: Last timestep run during this simulation
self.last_timestep = 0
#: Flag indicating whether initialization tasks have been performed
self.initialized = False
#: Parallel timer instance
self.timer = ParTimer(comm)
def register_solver(self, solver):
"""Register a solver"""
self.solvers.append(solver)
def _check_solver_types(self):
"""Determine unstructured and structured solver types"""
flag = np.empty((2,), dtype=np.int)
gflag = np.empty((2,), dtype=np.int)
flag[0] = 1 if any(ss.is_amr for ss in self.solvers) else 0
flag[1] = 1 if any(ss.is_unstructured for ss in self.solvers) else 0
self.comm.Allreduce(flag, gflag, MPI.MAX)
self.has_amr = (gflag[0] == 1)
self.has_unstructured = (gflag[1] == 1)
def _determine_overset_interval(self):
"""Determine if we should update connectivity during time integration"""
flag = np.empty((1,), dtype=np.int)
gflag = np.empty((1,), dtype=np.int)
flag[0] = min(ss.overset_update_interval for ss in self.solvers)
self.comm.Allreduce(flag, gflag, MPI.MIN)
self.overset_update_interval = gflag[0]
self.printer.echo("Overset update interval = ", self.overset_update_interval)
def _do_connectivity(self, tstep):
"""Return True if connectivity must be updated at a given timestep"""
return ((tstep > 0) and (tstep % self.overset_update_interval) == 0)
def initialize(self):
"""Initialize all solvers"""
self._check_solver_types()
if not self.has_unstructured:
raise RuntimeError("OversetSimulation requires at least one unstructured solver")
with self.timer("Init"):
for ss in self.solvers:
ss.init_prolog(multi_solver_mode=True)
self._determine_overset_interval()
self.perform_overset_connectivity()
for ss in self.solvers:
ss.init_epilog()
ss.prepare_solver_prolog()
self.exchange_solution()
for ss in self.solvers:
ss.prepare_solver_epilog()
self.comm.Barrier()
self.initialized = True
def perform_overset_connectivity(self):
"""Determine field, fringe, hole information"""
for ss in self.solvers:
ss.pre_overset_conn_work()
tg = self.tioga
if self.has_amr:
tg.preprocess_amr_data()
tg.profile()
tg.perform_connectivity()
if self.has_amr:
tg.perform_connectivity_amr()
for ss in self.solvers:
ss.post_overset_conn_work()
def exchange_solution(self):
"""Exchange solution between solvers"""
for ss in self.solvers:
ss.register_solution()
if self.has_amr:
self.tioga.data_update_amr()
else:
raise NotImplementedError("Invalid overset exchange")
for ss in self.solvers:
ss.update_solution()
def run_timesteps(self, nsteps=1):
"""Run prescribed number of timesteps"""
if not self.initialized:
raise RuntimeError("OversetSimulation has not been initialized")
wclabels = "Pre Conn Solve Post".split()
tstart = self.last_timestep + 1
tend = self.last_timestep + 1 + nsteps
self.printer.echo("Running %d timesteps starting from %d"%(nsteps, tstart))
for nt in range(tstart, tend):
with self.timer("Pre", incremental=True):
for ss in self.solvers:
ss.pre_advance_stage1()
with self.timer("Conn", incremental=True):
if self._do_connectivity(nt):
self.perform_overset_connectivity()
with self.timer("Pre", incremental=False):
for ss in self.solvers:
ss.pre_advance_stage2()
with self.timer("Conn"):
self.exchange_solution()
with self.timer("Solve"):
for ss in self.solvers:
ss.advance_timestep()
with self.timer("Post"):
for ss in self.solvers:
ss.post_advance()
self.comm.Barrier()
wctime = self.timer.get_timings(wclabels)
wctime_str = ' '.join("%s: %.4f"%(k, v) for k, v in wctime.items())
self.printer.echo("WCTime:", "%5d"%nt, wctime_str, "Total:",
"%.4f"%sum(wctime.values()))
self.last_timestep = tend
def summarize_timings(self):
"""Summarize timers"""
tt = self.timer.timers
labels = "Init Pre Conn Solve Post".split()
sep = "-"*80
hdr = "%-20s %5s %12s %12s %12s %12s"%(
"Timer", "Calls", "Tot.", "Avg.", "Min.", "Max.")
self.printer.echo("\n" + sep + "\n" + hdr + "\n")
for kk in labels:
tvals = tt[kk]
self.printer.echo("%-20s %5d %12.4f %12.4f %12.4f %12.4f"%(
kk, tvals[0], tvals[-1], (tvals[-1]/tvals[0]), tvals[2], tvals[3]))
self.printer.echo(sep + "\n")
|
992,648 | d24d035c2138bebce25d21888729e761a2f497b1 | from flask import Flask, request
import json
from FlowrouteMessagingLib.Controllers.APIController import *
from FlowrouteMessagingLib.Models import *
controller = APIController(username="AccessKey", password="SecretKey")
app = Flask(__name__)
app.debug = True
global EXAMPLE_APPOINTMENT
global ORIGINATING_NUMBER
EXAMPLE_APPOINTMENT = {
'name': 'John Smith',
'date': 'March 3rd, 2016',
'location': '1221 2nd Ave STE 300',
'contactNumber': '19515557918',
'status': 'unconfirmed',
}
ORIGINATING_NUMBER = '18445555780'
@app.route('/initiatereminder', methods=['GET', 'POST'])
def initiatereminder():
"""
Sends the appropriate message to the appointment's 'contactNumber' given
the state of the appointment.
"""
if EXAMPLE_APPOINTMENT['status'] == 'unconfirmed':
message_content = ("Hello {}, you have an appointment on {} at {}. "
"Please reply 'YES' or 'NO' to indicate if you "
"are able to make it to this appointment.").format(
EXAMPLE_APPOINTMENT['name'],
EXAMPLE_APPOINTMENT['date'],
EXAMPLE_APPOINTMENT['location'])
dest = str(EXAMPLE_APPOINTMENT['contactNumber'])
msg = Message(
to=dest,
from_=ORIGINATING_NUMBER,
content=message_content)
response = controller.create_message(msg)
EXAMPLE_APPOINTMENT['status'] = 'pending_confirmation'
return str(response)
elif EXAMPLE_APPOINTMENT['status'] == 'pending_confirmation':
return 'The appointment is pending confirmation'
elif EXAMPLE_APPOINTMENT['status'] == 'confirmed':
return 'The appointment has been confirmed'
elif EXAMPLE_APPOINTMENT['status'] == 'cancelled':
return 'The appointment has been cancelled'
@app.route('/handleresponse', methods=['GET', 'POST'])
def handleresponse():
"""
A callback for processing the user's responding text message. Sends
a confirmation message, or prompts the user for valid input.
"""
if str(request.json['from']) == EXAMPLE_APPOINTMENT['contactNumber'] \
and 'YES' in str(request.json['body']).upper():
msg = Message(
to=request.json['from'],
from_=ORIGINATING_NUMBER,
content='Your appointment has been confirmed')
response = controller.create_message(msg)
print response
EXAMPLE_APPOINTMENT['status'] = 'confirmed'
return "Appointment status: " + EXAMPLE_APPOINTMENT['status']
elif str(request.json['from']) == EXAMPLE_APPOINTMENT['contactNumber'] \
and 'NO' in str(request.json['body']).upper():
msg = Message(
to=request.json['from'],
from_=ORIGINATING_NUMBER,
content=("Your appointment has been cancelled. Please call {} to"
"reschedule").format(ORIGINATING_NUMBER))
response = controller.create_message(msg)
print response
EXAMPLE_APPOINTMENT['status'] = 'cancelled'
return "Appointment status: " + EXAMPLE_APPOINTMENT['status']
else:
msg = Message(
to=request.json['from'],
from_=ORIGINATING_NUMBER,
content='Please respond with either "Yes" or "No"')
response = controller.create_message(msg)
print response
return "Appointment status: " + EXAMPLE_APPOINTMENT['status']
@app.route('/')
def index():
return "Hello, I am a web server!"
if __name__ == '__main__':
app.run(
host="0.0.0.0",
port=int("11111")
)
|
992,649 | 4a1f1e1bf9db9fe3974d38b771b5848c3859ef05 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import GPS
import inspect
import os
import imp
import sys
from pygps import *
import pygps.tree
from pygps.tree import *
import pygps.notebook
from pygps.notebook import *
from pygps.project import *
import traceback
import platform
import workflows
from workflows.promises import Promise, timeout, known_tasks
system_is_cygwin = ('uname' in os.__dict__ and
os.uname()[0].find('CYGWIN') != -1)
system_is_windows = system_is_cygwin or platform.system().find("Windows") != -1
system_is_osx = 'uname' in os.__dict__ and os.uname()[0].find('Darwin') != -1
# This should likely be True in general, but on some setups generating events
# results in a storage_error.
can_generate_events = True # not system_is_osx
nightly_testsuite = os.getenv('GPS_TEST_CONTEXT') == 'nightly'
def compare_to_file(editor, filepath):
"""
Compare the content of editor the file at filepath
@type editor: GPS.EditorBuffer
@type filepath: str
"""
with open(filepath) as f:
gps_assert(editor.get_chars().strip(), f.read().strip())
def requires_not_windows(reason=""):
if system_is_windows:
gps_not_run('disabled on Windows %s' % reason)
def test_pygtk():
"""Test whether pygtk support was built in GPS. This is needed if
you want to use pywidget(), but not if you only want to use gtk
functions like idle"""
if 'pywidget' not in GPS.GUI.__dict__:
gps_not_run('PyGTK support not compiled in')
from gi.repository import Gtk, GObject, GLib
def sort_by_value(hash):
items = sorted([(v, k) for (k, v) in hash.items()])
items = [(k, v) for (v, k) in items]
return items
def sort_by_key(hash):
items = sorted([(k, v) for (k, v) in hash.items()])
return items
def remove_extension(str):
last_dot = str.rfind('.')
last_window_sep = str.rfind('\\\\')
last_unix_sep = str.rfind('/')
if last_dot > last_window_sep and last_dot > last_unix_sep:
return str[0:last_dot]
else:
return str
def get_editor_from_title(title):
for b in GPS.EditorBuffer.list():
if b.current_view().title() == title:
return b
return None
def before_exit(hook):
""" Print error messages to stderr and exit GPS """
global before_exit_has_run
if before_exit_has_run == 0:
before_exit_has_run = 1
# We can force now, since any handling for not force has already been
# handled
GPS.exit(force=1, status=exit_status)
return True
before_exit_has_run = 0
GPS.Hook('before_exit_action_hook').add(before_exit)
def gps_not_run(msg=''):
"""Set the exit status to NOT_RUN"""
global exit_status
exit_status = NOT_RUN
GPS.exit(force=1, status=NOT_RUN)
def gps_fatal_error(msg):
"""Unconditional error"""
global exit_status
exit_status = FAILURE
GPS.Logger('TESTSUITE').log(msg)
GPS.exit(force=1)
raise Exception("Fatal Error: %s" % msg)
def log_debug(var):
"""Display a variable. Convenient for debugging test scripts"""
GPS.Logger('testsuite').log("%s" % (var, ))
def simple_error(message):
global exit_status
exit_status = FAILURE
GPS.Logger('TESTSUITE').log(message)
if not nightly_testsuite:
GPS.MDI.dialog(message)
Known_Commands = ["load C/C++ xref info", "load entity db", "load C/C++ xref",
"Semantic tree update", "load constructs",
"Recompute Xref info"]
def safe_exit(expected_commands=[], delay=0, force=1):
"""Close the background tasks which are known to be running, and attempt
to exit.
expected commands contains a list of commands which are known to be
running and which can be safely interrupted.
If force is true, ignore any unsaved files.
Wait at least delay milliseconds before closing.
"""
global Known_Commands
# This is the list of commands that are expected when running tests, and
# which can be interrupted safely.
expected_commands = expected_commands + Known_Commands
commands_found = 0
unexpected_commands = []
# Look through all running commands, and interrupt them.
# Emit an error for every command which is not expected to run.
for L in GPS.Task.list():
if L.block_exit():
name = L.name()
L.interrupt()
commands_found = commands_found + 1
if name not in expected_commands:
unexpected_commands = unexpected_commands + [name]
if unexpected_commands != []:
# If we have encountered unexpected commands, emit an error.
simple_error('Commands still running at end of test: ' +
str(unexpected_commands))
# exit GPS after a timeout, so that the Tasks view has time to remove
# the interrupted commands from the list.
GPS.Timeout(max(delay, 100) + 300 * commands_found,
lambda timeout: GPS.exit(force, status=exit_status))
@workflows.run_as_workflow
def wait_for_mdi_child(name, step=500, n=10):
"""
Wait for the MDI child designated by :param str name: to be added
to the MDI, waiting for the time specified in :param int step: n
times.
"""
k = 0
while GPS.MDI.get(name) is None and k < n:
yield timeout(step)
k += 1
@workflows.run_as_workflow
def wait_until_not_busy(debugger, t=100):
"""
Wait until the given GPS.Debugger is not busy
"""
while debugger.is_busy():
yield timeout(t)
def wait_for_entities(cb, *args, **kwargs):
"""Execute cb when all entities have finished loading.
This function is not blocking"""
def on_timeout(timeout):
if GPS.Command.list() == []:
timeout.remove()
cb(*args, **kwargs)
GPS.Timeout(200, on_timeout)
def wait_for_tasks(cb, *args, **kwargs):
"""Execute cb when all tasks have completed."""
def internal_on_idle():
cb(*args, **kwargs)
def internal_wait_until_no_tasks(timeout):
if GPS.Task.list() == []:
timeout.remove()
# Tasks can update locations view, so wait until locations view
# has completed its operations also.
process_all_events()
GLib.idle_add(internal_on_idle)
GPS.Timeout(400, internal_wait_until_no_tasks)
def wait_for_idle(cb, *args, **kwargs):
def internal_on_idle():
cb(*args, **kwargs)
process_all_events()
windows = Gtk.Window.list_toplevels()
GLib.idle_add(internal_on_idle)
def record_time(t):
""" Record the time t in the time.out file.
t should be a float representing the number of seconds we want to
record.
"""
f = open('time.out', 'w')
f.write(str(t))
f.close()
def recompute_xref():
""" Force an Xref recomputation immediately. """
import cross_references
cross_references.r.recompute_xref()
############
# Editors #
############
def get_all_tags(buffer, name=''):
"""return a string listing all highlighting tags used in buffer. Each
line starts with name, then the name of the tag and the starting line
and column, then the ending line and column.
"""
if name:
name = name + ' '
result = ''
loc = buffer.beginning_of_buffer()
while loc < buffer.end_of_buffer():
over = loc.get_overlays()
if over != []:
loc2 = loc.forward_overlay(over[0]) - 1
result = result + name + over[0].name() \
+ ' %s:%s %s:%s\n' % (loc.line(), loc.column(),
loc2.line(), loc2.column())
loc = loc2 + 1
else:
loc = loc.forward_overlay()
return result
def open_and_raise(filename, line, col):
"""Open an editor, if needed, raise it, and move the cursor to the
specified (line, column).
"""
buffer = GPS.EditorBuffer.get(GPS.File(filename))
GPS.MDI.get_by_child(buffer.current_view()).raise_window()
buffer.current_view().goto(buffer.at(line, col))
def get_completion():
"""
Return the content, as a list of strings, of the completion window. Waits
until it stops computing
:rtype: Promise[Iterator[str]]
"""
p = Promise()
def timeout_handler(t):
try:
pop_tree = get_widget_by_name("completion-view")
comps = [row[0] for row in dump_tree_model(pop_tree.get_model())]
if comps[-1] != 'Computing...':
t.remove()
p.resolve(comps)
except Exception as e:
pass
GPS.Timeout(100, timeout_handler)
return p
def send_keys(*input_seq):
"""
Workflow
Given an input sequence composed of strings and character codes, send them
to the application, waiting a small amount of time between each keystroke,
to simulate human keyboard input. Returns nothing
"""
for chunk in input_seq:
if isinstance(chunk, int):
send_key_event(chunk)
yield timeout(10)
elif isinstance(chunk, str):
for c in chunk:
if c == "\n":
send_key_event(GDK_RETURN)
else:
send_key_event(ord(c))
yield timeout(10)
######################################
# The following functions are only available if PyGTK is available
######################################
try:
from gi.repository import Gtk, GObject, GLib
def enqueue(fun, timeout=200):
""" Register fun to be executed once, after timeout milliseconds.
This function is useful for programming tests that require GPS to
process events in a sequence."""
GLib.idle_add(fun)
def get_current_focus():
"""Return the widget that has the current keyboard focus"""
grab = Gtk.grab_get_current()
if grab:
return grab
for win in Gtk.Window.list_toplevels():
if win.get_property('has-toplevel-focus'):
return win.get_focus()
return None
# #####################
# # Shortcuts editor ##
# #####################
def select_action_in_shortcuts_editor(action, key):
"""Select the line corresponding to action in the key shortcuts editor.
Check that the keybinding is the one we are expecting"""
editor = get_widget_by_name('Key shortcuts')
gps_not_null(editor, 'Key shortcuts editor not open')
toggle_local_config(editor, 'Show categories', False)
process_all_events()
tree = get_widget_by_name('Key shortcuts tree', [editor])
for m in tree.get_model():
if m[0].lower() == action.lower():
current = m[1].decode('utf-8')
gps_assert(current, key, 'Shortcut for ' + action +
' is "%s", expecting "%s"' % (current, key))
tree.get_selection().select_path(m.path)
return editor
gps_assert(False, True, action + ' not found in key shortcuts editor')
return editor
###############################
# Startup scripts and themes ##
###############################
def load_xml_startup_script(name):
"""Load an XML startup script. Name must include the .xml extension"""
for dir in ("%sshare/gps/support/core/" % GPS.get_system_dir(),
"%sshare/gps/support/ui/" % GPS.get_system_dir(),
"%sshare/gps/library/" % GPS.get_system_dir(),
"%sshare/gps/plug-ins/" % GPS.get_system_dir()):
try:
f = file("%s%s" % (dir, name)).read()
break
except:
f = None
GPS.parse_xml(f)
process_all_events()
def load_python_startup_script(name):
"""Load a python startup script, and initializes it immediately so
that its menus are visible"""
try:
return sys.modules[name]
except KeyError:
pass
(fp, pathname, description) = imp.find_module(name)
try:
module = imp.load_module(name, fp, pathname, description)
# Special to GPS: if the module has a on_gps_started function,
# execute it
module.on_gps_started('gps_started')
except AttributeError:
pass
finally:
if fp:
fp.close()
return module
class PyConsole(GPS.Console):
def write(self, text):
GPS.Console.write(self, text)
GPS.Logger('UNEXPECTED_EXCEPTION').log(text)
# Redirect the standard error from the Messages window to an instance of
# the PyConsole class based on the Messages window. Each python error
# will therefore be displayed both in the Messages window and in the traces
# (under the UNEXPECTED_EXCEPTION debug handle).
# Disabled on Windows for now so that we can concentrate on the other
# issues ???
if os.name != 'nt':
sys.stderr = PyConsole('Messages')
##############
# Notebooks ##
##############
def switch_notebook_page(notebook, label):
result = pygps.notebook.switch_notebook_page(notebook, label)
if result == -1:
gps_fatal_error("Notebook doesn't contain " + label + ' page')
return result
######################
# Open From Project ##
######################
def open_from_project(on_open, *args, **kwargs):
"""Focus in the global search box to search files from project.
Then call on_open and pass it the search field:
on_open (completionList, entry, tree, *args, **kwargs)
"""
GPS.execute_action("Global Search in context: file names")
def on_timeout(timeout):
timeout.remove()
field = get_widget_by_name("global-search")
gps_not_null(field, "Global search field not found")
field = get_widgets_by_type(Gtk.Entry, field)[0]
gps_not_null(field, "Global search contains no GtkEntry")
popup = get_widget_by_name("completion-list")
gps_not_null(popup, "Global search's completion list not found")
tree = get_widgets_by_type(Gtk.TreeView, popup)[0]
on_open(*(popup, field, tree) + args, **kwargs)
GPS.Timeout(200, on_timeout)
############
# Dialogs ##
############
def open_key_shortcuts(on_open, *args, **kwargs):
"""Open the keyshortcuts editor, and call
on_open (dialog, *args, **kwargs)"""
open_menu('/Edit/Key Shortcuts...', on_open, [], args, kwargs)
def open_file_switches(on_open, *args, **kwargs):
"""Open the file-specific switches editor, and call
on_open (mdichild, tree, *args, **kwargs)"""
def on_timeout(timeout):
timeout.remove()
mdi = GPS.MDI.get('Project Switches')
tree = get_widgets_by_type(Gtk.TreeView,
mdi.get_child().pywidget())[0]
on_open(*(mdi, tree) + args, **kwargs)
GPS.Timeout(1000, on_timeout)
GPS.Menu.get(
'/Project/Edit File Switches...').action.execute_if_possible()
def open_breakpoint_editor(on_open, *args, **kwargs):
"""Open the breakpoint editor dialog and call
on_open (MDIWindow, *args, **kwargs)"""
def __internal():
m = GPS.MDI.get('Breakpoints')
if not m:
return True # Wait again
on_open(*(m, ) + args, **kwargs)
return False
GLib.timeout_add(200, __internal)
GPS.Menu.get('/Debug/Data/Breakpoints').action.execute_if_possible()
############
# Wizards ##
############
def wizard_current_page(wizard):
"""Return the widget currently visible in the wizard"""
contents = get_widget_by_name('wizard contents', wizard)
for w in contents.get_children():
if w.get_mapped():
return w
return None
############################
# TextView and TextBuffer ##
############################
def iter_from_location(loc):
"""Creates a Gtk.TextIter from an EditorLocation"""
view = text_view_from_location(loc)
b = view.get_buffer()
mark_name = "iter_from_loc_temp_mark"
_ = loc.create_mark(mark_name)
mark = b.get_mark(mark_name)
return b.get_iter_at_mark(mark)
def compare_editor_contextual(loc, expected, indexes=None, msg='',
when_done=None):
"""Check the contextual menu in an editor at a specific location.
indexes could be set to range(0,2) to only check part of the
menu. when_done is done when the contextual menu has been computed
(since this is done asynchronously)"""
def on_contextual(windows):
menu = dump_contextual(windows)
if indexes:
menu = [menu[r] for r in indexes]
gps_assert(expected, menu, msg)
close_contextual(windows)
if when_done:
when_done()
def wait_for_editor():
windows = Gtk.Window.list_toplevels()
GLib.idle_add(on_contextual, windows)
click_in_text(loc, button=3)
GLib.idle_add(wait_for_editor) # Make sure editor is displayed
###########
# Canvas ##
###########
def click_in_canvas(canvas, xoffset=0, yoffset=0, button=1,
events=single_click_events):
origin = canvas.get_window().get_origin()
click_in_widget(
canvas.get_window(), x=origin[0] + xoffset, y=origin[1] +
yoffset, button=button, events=events)
#####################
# Dialogs ##
#####################
# Dialogs are open asynchronously, so if you want to detect whether a
# dialog has been opened, you must use code similar to:
# def on_gps_started (h):
# before_dialog (on_dialog, args)
# ... action that opens the dialog
#
# def on_dialog (dialog, args):
# ...
def get_new_toplevels(old_toplevels):
"""
Compare the current list of toplevel windows with the one stored
in old_toplevels, and returns list of new windows.
This can be used to get a handle on a window that was just opened by
an action:
old = Gtk.Window.list_toplevels()
...
dialog = get_new_toplevels(old)
"""
return [w for w in Gtk.Window.list_toplevels()
if w not in old_toplevels and w.get_mapped()]
def before_dialog(callback, args=[], kwargs=dict()):
"""Return the current context, needed to compute later on what dialogs
were opened in between.
Callback's first argument is the first window opened
Use wait_for_dialog() instead
"""
def on_dialog(windows):
new = [w for w in Gtk.Window.list_toplevels() if w not in
windows and w.get_mapped()]
if new:
params = [new[0]]
else:
params = [None]
callback(*params + args, **kwargs)
windows = Gtk.Window.list_toplevels()
GLib.idle_add(on_dialog, windows)
def wait_for_dialog(func):
"""
Execute func() and wait until a new dialog appears on screen.
Returns that dialog.
dialog = yield wait_for_dialog(button.click)
"""
windows = Gtk.Window.list_toplevels()
func()
while True:
yield wait_idle()
new = [w for w in Gtk.Window.list_toplevels() if w not in
windows and w.get_mapped()]
if new:
yield new[0]
break
#####################
# Contextual menus ##
#####################
def get_contextual(old_windows, is_fatal=True):
"""Return the contextual menu that was displayed. old_windows is the
list of windows before you opened the contextual menu"""
c = [w for w in Gtk.Window.list_toplevels() if w not in
old_windows and w.get_mapped()]
if not c:
if is_fatal:
gps_fatal_error('No contextual menu created')
return None
return c[0]
def activate_contextual(old_windows, label, accel_prefix="<gps>/"):
"""Activate a contextual menu. Old_Windows should be the list of
toplevel windows that existed before the contextual menu was
displayed:
windows = Gtk.Window.list_toplevels ()
...
activate_contextual (windows, "FOO")
This is a low-level function, consider using select_editor_contextual
when dealing with editors.
"""
contextual = get_contextual(old_windows)
contextual = MenuTree(contextual, accel_prefix=accel_prefix)
goal = '%s%s' % (accel_prefix, label)
for (menu, menu_label, accel, level) in contextual:
if menu_label == goal:
menu.activate()
return True
gps_fatal_error("Couldn't find contextual menu %s" % label)
return False
def dump_contextual(old_windows):
"""Dump the contextual menu (see dump_menu). old_windows is the
list of toplevel windows that existed before the contextual menu
is displayed
:param old_windows: a list of Gtk.Window, which is used
to find a new window and use it as the contextual menu
"""
try:
contextual = get_contextual(old_windows, is_fatal=False)
return dump_menu('', topwidget=contextual)
except:
return None
def close_contextual(old_windows):
"""Close the contextual menu opened since old_windows was computed"""
try:
contextual = get_contextual(old_windows, is_fatal=False)
contextual.destroy()
except:
pass
def select_widget_contextual(widget, menuName, onselected, *args,
**kwargs):
"""Display the contexual menu on any widget"""
process_all_events()
windows = Gtk.Window.list_toplevels()
click_in_widget(widget.get_window(), button=3)
def internal_onselected(windows):
process_all_events()
onselected(*args, **kwargs)
GLib.idle_add(internal_onselected, windows)
activate_contextual(windows, menuName)
def select_editor_contextual(menuName, onselected=None, *args, **kwargs):
"""Select the selection of a contextual menu in the current editor.
When the menu item has been selected, the menu is closed and
onselected is called with the extra arguments passed to this
function.
"""
process_all_events()
windows = Gtk.Window.list_toplevels()
click_in_text(GPS.EditorBuffer.get().current_view().cursor(), button=3)
def internal_onselected(windows):
close_contextual(windows)
process_all_events()
if onselected:
onselected(*args, **kwargs)
GLib.idle_add(internal_onselected, windows)
activate_contextual(windows, menuName)
def toggle_local_config(view, text, value=None):
"""
Open the local config menu for the view, and selects the menu with
the "text" label (either set it active, inactive, or just toggle,
depending on value)
"""
def onidle(windows):
menu = get_contextual(windows)
for m in WidgetTree(menu):
if isinstance(m, Gtk.Menu):
for w in MenuTree(m):
if w[1] == '<gps>/%s' % text:
if value is None:
w[0].emit("toggled")
elif value:
w[0].set_active(True)
else:
w[0].set_active(False)
process_all_events()
return
GPS.Logger('TESTSUITE').log('Local config not found "%s"' % text)
windows = Gtk.Window.list_toplevels()
p = view
while p.get_parent() and p.__class__.__name__ != 'AdaMDIChild':
p = p.get_parent()
b = get_widget_by_name('local-config', [p])
button = b.get_child()
assert isinstance(button, Gtk.Button)
# ??? Sending an event doesn't seem to work because there is a grab
# pending. The error might be because we generate our events with a
# 0 timestamp, which might be "older" than the grab timestamp.
click_in_widget(button.get_window(), button=1,
events=[Gdk.EventType.BUTTON_PRESS])
GLib.idle_add(onidle, windows)
def select_locations_contextual(menuName, onselected, *args, **kwargs):
"""Select the selection of a contextual menu in the locations window.
When the menu item has been selected, the menu is closed and
onselected is called with the extra arguments passed to this
function
"""
def internal_onidle(windows):
tree = pygps.get_widgets_by_type(
Gtk.TreeView,
GPS.MDI.get('Locations').pywidget())[0]
model = tree.get_model()
if tree.get_selection().get_mode() == Gtk.SelectionMode.MULTIPLE:
m, selected = tree.get_selection().get_selected_rows()
path = selected[0]
else:
path = model.get_path(tree.get_selection().get_selected()[1])
process_all_events()
click_in_tree(tree, path, button=3)
def internal_onselected(windows):
close_contextual(windows)
process_all_events()
onselected(*args, **kwargs)
GLib.idle_add(internal_onselected, windows)
activate_contextual(windows, menuName)
process_all_events()
windows = Gtk.Window.list_toplevels()
GLib.idle_add(internal_onidle, windows)
def select_coverage_contextual(menuName, onselected, *args, **kwargs):
"""Select the selection of a contextual menu in the Code Coverage
window.
When the menu item has been selected, the menu is closed and
onselected is called with the extra arguments passed to this
function
"""
def internal_onidle(windows):
tree = get_widget_by_name('Coverage')
model = tree.get_model()
path = model.get_path(tree.get_selection().get_selected()[1])
process_all_events()
click_in_tree(tree, path, button=3)
def internal_onselected(windows):
close_contextual(windows)
process_all_events()
onselected(*args, **kwargs)
GLib.idle_add(internal_onselected, windows)
activate_contextual(windows, menuName)
process_all_events()
windows = Gtk.Window.list_toplevels()
GLib.idle_add(internal_onidle, windows)
def dump_locations_tree():
mdi = GPS.MDI.get('Locations')
if mdi is None:
simple_error('Locations window is not opened')
safe_exit()
else:
tree = pygps.get_widgets_by_type(Gtk.TreeView, mdi.pywidget())[0]
return Tree(tree).dump_model(column=7)
def load_messages_from_file(name, onload, *args, **kwargs):
"""Loads contents of the Messages window and parse it to fill Locations
view.
"""
def internal_onloaded():
onload(*args, **kwargs)
def internal_onfileopendialog(dialog):
entry = pygps.get_widgets_by_type(Gtk.Entry, dialog)[0]
entry.set_text(name)
get_stock_button(dialog, Gtk.STOCK_OK).clicked()
wait_for_tasks(internal_onloaded)
before_dialog(internal_onfileopendialog)
GPS.execute_action('Messages load from file')
class Test_Queue:
"""A list of tests to perform. One test is executed when the previous
has finished (after setting an explicit flag). The goal is that
tests that need idle_callbacks can still be performed sequentially.
Example of use:
q = Test_Queue ()
def my_test (p1, p2):
...
q.test_finished ()
q.add (my_test, param1, param2)
q.add (my_test, param3, param4)
The queue will automatically start executing in the "gps_started"
hook callback, unless you pass auto_execute to False in the
constructor. This means you do not have to connect to that hook
yourself
"""
def __init__(self, auto_exec=True):
"""If auto_exec is True, execute the loop automatically when the
gps_started callback is called. Otherwise no automatic execution,
you'll need to call execute() explicitly"""
self.list = []
if auto_exec:
GPS.Hook('gps_started').add(self.execute)
def add(self, callback, *args, **kwargs):
"""Add a new test to be executed when the previous ones have
finished.
"""
self.list.append((callback, args, kwargs))
def test_finished(self):
"""Should be called by tests after they have finished executing"""
# We'll start the next test in an idle, so that the current one is
# properly terminated, and we do not execute in its context
GLib.idle_add(self._do_test)
def _do_test(self):
"""Execute the next test in the list"""
process_all_events()
if self.list:
(callback, args, kwargs) = self.list.pop(0)
callback(*args, **kwargs)
else:
safe_exit(force=1)
def execute(self, *args):
"""Execute all tests on the list, and then exit GPS.
This function returns when the first test has finished executing
or is waiting in an idle loop"""
# We accept any number of args because this can either be called
# explicitely by the user, or as part of the gps_started hook
self._do_test()
except:
# No graphical mode
def enqueue(fun, timeout=200):
""" Register fun to be executed once, after timeout milliseconds.
This function is useful for programming tests that require GPS to
process events in a sequence."""
def on_timeout(timeout):
timeout.remove()
fun()
GPS.Timeout(timeout, on_timeout)
################################
# Below are just examples for now
# Open a menu from PyGtk (instead of using a GPS action):
# GPS.Menu.get ("/File/New").pywidget().activate()
# Getting the second column of the first grand-child of the root of a
# TreeModel
# print model["0:0:0"][1]
# or print model[(0,0,0)][1]
# Last line of a treeModel:
# model[-1].path
# Print the second column for all top-level nodes of a TreeModel
# for row in model:
# print row[1]
# Same as above, get result as list
# values = [row[1] for row in model]
# Delete a row from a tree model
# del model[0]
# Getting the tree view widget:
# mdi_widget=GPS.MDI.get("Project").pywidget().get_child() \
# .get_children()[1]
# scrolled = mdi_widget.get_children()[0].get_children()[0]
# tree = scrolled.get_child()
# model = tree.get_model()
# This can also be done by getting the widget by its name:
# box = get_widget_by_name ("Project")
# Make visible for tests that only to "from testsuite import *"
from driver import *
from dialogs import *
from asserts import *
from tree import *
from menu import *
from editor import *
from vcs import *
|
992,650 | 1b02eeccc248dad062284e53c641d494af09bdd7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gw_backend_redis.example
~~~~~~~~~~~~~~~~~~~~~~~~
Stati example to use Redis pub/sub transport
:copyright: (c) 2014 by GottWall team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
:github: http://github.com/GottWall/gottwall-backend-redis
"""
STORAGE = 'gottwall.storages.MemoryStorage'
BACKENDS = {
'gw_backend_redis.backend.RedisBackend': {
'HOST': "127.0.0.1",
'PORT': 6379,
'PASSWORD': None,
'DB': 2,
"CHANNEL": "gottwall"
}}
TEMPLATE_DEBUG = True
STORAGE_SETTINGS = dict(
HOST = 'localhost',
PORT = 6379,
PASSWORD = None,
DB = 2
)
REDIS = {"CHANNEL": "gottwall"}
USERS = ["alexandr.s.rus@gmail.com"]
SECRET_KEY = "dwefwefwefwecwef"
# http://pulic_key:secret_key@host.com
PROJECTS = {"test_project": "my_public_key",
"another_project": "public_key2"}
cookie_secret="fkewrlhfwhrfweiurbweuybfrweoubfrowebfioubweoiufbwbeofbowebfbwup2XdTP1o/Vo="
TEMPLATE_DEBUG = True
DATABASE = {
"ENGINE": "postgresql+psycopg2",
"HOST": "localhost",
"PORT": 5432,
"USER": "postgres",
"PASSWORD": "none",
"NAME": "gottwall"
}
PREFIX = ""
|
992,651 | 540a6b98e4b3617fab0362f360e086fd49295a34 | import ROOT
from ROOT import TCanvas, TH1F, TLegend
from NNDefs import build_and_train_class_nn
from LayersDefs import get_signal_and_background_frames, predict_nn_on_all_frame, calculate_derived_et_columns, roc_curve, \
background_eff_at_target_signal_eff
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
#random.seed(7)
#np.random.seed(7)
signal_frame, background_frame = get_signal_and_background_frames()
full_background_frame = background_frame.sample(n=len(background_frame))
# Sample from background frame so there are the same number of signal and background events
background_frame = background_frame.sample(n=len(signal_frame))
# Create new columns combining base columns
calculate_derived_et_columns(signal_frame, background_frame)
calculate_derived_et_columns(signal_frame, background_frame, layer_weights=[1, 1], column_names=['L0Et', 'L1Et'],
output_column_name='L0+L1Et')
calculate_derived_et_columns(signal_frame, background_frame, layer_weights=[1, 1], column_names=['L2Et', 'L3Et'],
output_column_name='L2+L3Et')
# Calculate 3 Et with minimum weights
calculate_derived_et_columns(signal_frame, background_frame, layer_weights=[1, 1.3, 8.4], column_names=['L0+L1Et', 'L2+L3Et', 'HadEt'],
output_column_name='3EtWeighted')
# Calculate 5 Et with minimum weights
calculate_derived_et_columns(signal_frame, background_frame, layer_weights=[1, .3, 3.6], column_names=['L0+L1Et', 'L2+L3Et', 'HadEt'],
output_column_name='5EtWeighted')
# Combine signal and background
all_frame = pd.concat([signal_frame, background_frame])
#predicted_signal_frame, predicted_background_frame, _ = predict_nn_on_all_frame(all_frame, ['L0Et', 'L1Et', 'L2Et', 'L3Et', 'HadEt'], ['IsSignal'])
twohidden_predicted_signal_frame, twohidden_predicted_background_frame, twohidden_model = predict_nn_on_all_frame(all_frame, ['L0Et', 'L1Et', 'L2Et', 'L3Et', 'HadEt'], ['IsSignal'], epochs=30, hidden_layers=2)
# Create ROC curves by cutting on total Et and also cutting on trained network classifier value
gr0 = roc_curve(background_frame[['TotalEt']], signal_frame[['TotalEt']], 300)
gr1 = roc_curve(twohidden_predicted_background_frame, twohidden_predicted_signal_frame, 300)
gr2 = roc_curve(background_frame[['3EtWeighted']], signal_frame[['3EtWeighted']], 1000)
gr3 = roc_curve(background_frame[['5EtWeighted']], signal_frame[['5EtWeighted']], 1000)
c1 = TCanvas("c1", "Graph Draw Options", 200, 10, 600, 400)
gr0.Draw()
gr0.SetTitle('Training Scenario ROC Curves')
gr0.GetXaxis().SetTitle('Background Efficiency')
gr0.GetYaxis().SetTitle('Signal Efficiency')
gr0.SetMaximum(1)
gr0.SetMinimum(0.8)
gr1.Draw('same')
gr1.SetLineColor(4)
gr2.Draw('same')
gr2.SetLineColor(8)
gr3.Draw('same')
gr3.SetLineColor(2)
leg = TLegend(0.45, 0.1, 0.9, 0.3)
leg.SetHeader('Layer Configuration')
leg.AddEntry(gr0, 'No training')
leg.AddEntry(gr1, 'Network Trained - Two Hidden Layers')
leg.AddEntry(gr2, 'Manually Trained to 90% - L0+L1, L2+L3, Had Layers')
leg.AddEntry(gr3, 'Manaully Trained to 95% - L0+L1, L2+L3, Had Layers')
leg.SetTextSize(0.02)
leg.Draw()
c1.Print('LayerFrame/SelectROCCurves.pdf') |
992,652 | cb29e03adffebdaa11bd8c4ecaf1bdecd65b98c9 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import address.models
class Migration(migrations.Migration):
dependencies = [
('ossi', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='variety',
name='active',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='breeder',
field=models.ForeignKey(to='ossi.Breeder', null=True),
),
migrations.AlterField(
model_name='variety',
name='breeder_address',
field=address.models.AddressField(to='address.Address', null=True),
),
migrations.AlterField(
model_name='variety',
name='breeder_affiliation',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='variety',
name='breeder_email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='variety',
name='breeder_name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='variety',
name='breeding_crosses',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='breeding_differ',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='breeding_generations',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='variety',
name='breeding_goals',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='breeding_processes',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='crop_common_name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='variety',
name='crop_latin_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='variety',
name='description',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='image',
field=models.FileField(null=True, upload_to=b''),
),
migrations.AlterField(
model_name='variety',
name='name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='variety',
name='origin_characteristics',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='origin_parents',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='origin_population',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='origin_selection_stabilization',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='origin_single_parent',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='origin_two_or_more',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='sold_commercially',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='stability',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='variety',
name='submission_IP',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='submission_IP_details',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='submission_permission',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='submission_permission_details',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='submission_signature',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='variety',
name='submission_sole_breeder',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='variety',
name='submission_sole_breeder_details',
field=models.TextField(),
),
migrations.AlterField(
model_name='variety',
name='where_sold_commercially',
field=models.TextField(),
),
]
|
992,653 | 4436bf10636d4d86cf21a20a72639f0524855acf | # -*- coding: utf-8 -*-
import pandas as pd
# from shapely.geometry import Point, shape
from flask import Flask
from flask import render_template
from flask import request
# from flask import Blueprint
import json
app = Flask(__name__)
data_path = './data/'
@app.route("/")
def index():
return render_template("index.html")
# @app.route("/article",view_func=Main.as_view('page'))
# def index():
# return render_template("index_article_topic.html")
@app.route('/cool_form', methods=['GET'])
def cool_form():
return render_template("index_article_topic.html")
@app.route("/data/data_tree.json", methods=['GET', 'POST'])
def get_articles():
path = 'data/data_tree.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/data_tree/<string:aid>", methods=['GET', 'POST'])
def get_articles_byID(aid):
path = 'data/data_tree.json'
data = json.load(open(path))
data = data[aid]
return json.dumps(data)
@app.route("/data/gnm_articles/<string:aid>", methods=['GET', 'POST'])
def get_gnm_articles_byID(aid):
path = 'data/gnm_articles.csv'
df = pd.read_csv(path)
df = df.loc[df['article_id']== int(aid)]
return df.to_json(orient='records')
@app.route("/data/clean_gnm_comments_compact/<string:aid>", methods=['GET', 'POST'])
def get_gnm_articles_compact_byID(aid):
path = 'data/clean_gnm_comments_compact.csv'
df = pd.read_csv(path)
df = df.loc[df['article_id']== int(aid)]
return df.to_json(orient='records')
@app.route("/data_article_topic")
def get_data():
df_clean = pd.read_csv(data_path+'topic_visulization_FINAL.csv')
return df_clean.to_json(orient='records')
@app.route('/comment_form', methods=['GET'])
def comment_form():
return render_template("index_surounding_topics.html")
@app.route("/data/data.json", methods=['GET', 'POST'])
def get_surrounding_topics_count():
path = 'data/data.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_0.json", methods=['GET', 'POST'])
def get_data0():
# content = pd.read_json('article_comment.json')
path = 'data/topic_0.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_1.json", methods=['GET', 'POST'])
def get_data1():
# content = pd.read_json('article_comment.json')
path = 'data/topic_1.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_2.json", methods=['GET', 'POST'])
def get_data2():
# content = pd.read_json('article_comment.json')
path = 'data/topic_2.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_3.json", methods=['GET', 'POST'])
def get_data3():
# content = pd.read_json('article_comment.json')
path = 'data/topic_3.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_4.json", methods=['GET', 'POST'])
def get_data4():
# content = pd.read_json('article_comment.json')
path = 'data/topic_4.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_5.json", methods=['GET', 'POST'])
def get_data5():
# content = pd.read_json('article_comment.json')
path = 'data/topic_5.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_6.json", methods=['GET', 'POST'])
def get_data6():
# content = pd.read_json('article_comment.json')
path = 'data/topic_6.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_7.json", methods=['GET', 'POST'])
def get_data7():
# content = pd.read_json('article_comment.json')
path = 'data/topic_7.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_8.json", methods=['GET', 'POST'])
def get_data8():
# content = pd.read_json('article_comment.json')
path = 'data/topic_8.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_9.json", methods=['GET', 'POST'])
def get_data9():
# content = pd.read_json('article_comment.json')
path = 'data/topic_9.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_10.json", methods=['GET', 'POST'])
def get_data10():
# content = pd.read_json('article_comment.json')
path = 'data/topic_10.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_11.json", methods=['GET', 'POST'])
def get_data11():
# content = pd.read_json('article_comment.json')
path = 'data/topic_11.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_12.json", methods=['GET', 'POST'])
def get_data12():
# content = pd.read_json('article_comment.json')
path = 'data/topic_12.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_13.json", methods=['GET', 'POST'])
def get_data13():
# content = pd.read_json('article_comment.json')
path = 'data/topic_13.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_14.json", methods=['GET', 'POST'])
def get_data14():
# content = pd.read_json('article_comment.json')
path = 'data/topic_14.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_15.json", methods=['GET', 'POST'])
def get_data15():
# content = pd.read_json('article_comment.json')
path = 'data/topic_15.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_16.json", methods=['GET', 'POST'])
def get_data16():
# content = pd.read_json('article_comment.json')
path = 'data/topic_16.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_17.json", methods=['GET', 'POST'])
def get_data17():
# content = pd.read_json('article_comment.json')
path = 'data/topic_17.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_18.json", methods=['GET', 'POST'])
def get_data18():
# content = pd.read_json('article_comment.json')
path = 'data/topic_18.json'
data = json.load(open(path))
return json.dumps(data)
@app.route("/data/topic_19.json", methods=['GET', 'POST'])
def get_data19():
# content = pd.read_json('article_comment.json')
path = 'data/topic_19.json'
data = json.load(open(path))
return json.dumps(data)
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5001,debug=True)
|
992,654 | 06a760304941b35f7cd50c00035db40ace20fac3 | # -*- coding: utf-8 -*-
import tensorflow as tf
with tf.Graph().as_default():
a = tf.constant([[1,2,3], [3,4,5]]) # shape (2,3)
b = tf.constant([[7,8,9], [10,11,12]]) # shape (2,3)
a_concat_b = tf.concat([a, b], axis=0) # shape (4,3)
print("a concat b shape: %s" % a_concat_b.shape)
a_stack_b = tf.stack([a, b], axis=2) # shape (2,2,3)
print("a stack b shape: %s" % a_stack_b.shape)
sess = tf.Session()
#### [[[ 1 2 3]
#### [ 3 4 5]]
####
#### [[ 7 8 9]
#### [10 11 12]]]
print(sess.run(a_stack_b))
|
992,655 | bb3957192abd77c25e77bb73de73ca3582fdd76c | from webapp.core import db
from flask.ext.sqlalchemy import SQLAlchemy
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
fullname = db.Column(db.String(255))
google_plus_id = db.Column(db.String(255), unique=True)
def __init__(self, fullname, google_plus_id):
self.fullname = fullname
self.google_plus_id = google_plus_id
@property
def serialize(self):
return {
"id": self.id,
"fullName": self.fullname,
"googlePlusId": self.google_plus_id
}
|
992,656 | a0f4de310f16ee476efccffb3eb9cc64ef046ea0 | import logging
import os
import pkg_resources
import pytest
import yaml
from ambianic import __version__, config, load_config
from ambianic.webapp.fastapi_app import app, set_data_dir
from fastapi.testclient import TestClient
log = logging.getLogger(__name__)
def reset_config():
config.reload()
# session scoped test setup
# ref: https://docs.pytest.org/en/6.2.x/fixture.html#autouse-fixtures-fixtures-you-don-t-have-to-request
@pytest.fixture(autouse=True, scope="session")
def setup_session(tmp_path_factory):
"""setup any state specific to the execution of the given module."""
reset_config()
data_dir = tmp_path_factory.mktemp("data")
# convert from Path object to str
data_dir_str = data_dir.as_posix()
set_data_dir(data_dir=data_dir_str)
@pytest.fixture
def client():
test_client = TestClient(app)
return test_client
def test_hello(client):
rv = client.get("/")
assert "Ambianic Edge!" in rv.json()
def test_health_check(client):
rv = client.get("/healthcheck")
assert "is running" in rv.json()
def test_status(client):
rv = client.get("/api/status")
data = rv.json()
assert (data["status"], data["version"]) == ("OK", __version__)
def test_get_timeline(client):
rv = client.get("/api/timeline")
assert rv.json()["status"] == "success"
def test_initialize_premium_notification(client):
testId = "auth0|123456789abed"
endpoint = "https://localhost:5050"
request = client.get(
"/api/auth/premium-notification?userId={}¬ification_endpoint={}".format(
testId, endpoint
)
)
response = request.json()
assert isinstance(response, dict)
configDir = pkg_resources.resource_filename("ambianic.webapp", "premium.yaml")
with open(configDir) as file:
file_data = yaml.safe_load(file)
config_provider = file_data["credentials"]["USER_AUTH0_ID"]
email_endpoint = file_data["credentials"]["NOTIFICATION_ENDPOINT"]
assert isinstance(config_provider, str)
assert config_provider == testId
assert isinstance(email_endpoint, str)
assert email_endpoint == endpoint
assert os.path.isfile(configDir)
assert isinstance(response["status"], str)
assert isinstance(response["message"], str)
assert response["status"] == "OK"
assert response["message"] == "AUTH0_ID SAVED"
def test_get_config(client):
_dir = os.path.dirname(os.path.abspath(__file__))
load_config(os.path.join(_dir, "test-config.yaml"), True)
rv = client.get("/api/config")
data = rv.json()
# dynaconf conversts to uppercase all root level json keys
log.debug(f"config: {data}")
assert data["pipelines".upper()] is not None
assert data["ai_models".upper()] is not None
assert data["sources".upper()] is not None
def test_save_source(client):
src_target = {"id": "test1", "uri": "test", "type": "video", "live": True}
rv = client.put("/api/config/source", json=src_target)
data = rv.json()
log.debug(f"JSON response: {data}")
assert data
assert data["id"] == "test1"
assert data["uri"] == "test"
assert data["type"] == "video"
assert data["live"] is True
# changes to data should be saved correctly
src_target["uri"] = "test1.2"
src_target["type"] = "image"
src_target["live"] = False
rv = client.put("/api/config/source", json=src_target)
data = rv.json()
assert data
assert data["id"] == "test1"
assert data["uri"] == "test1.2"
assert data["type"] == "image"
assert data["live"] is False
def test_delete_source(client):
src_target = {"id": "test1", "uri": "test", "type": "video", "live": True}
rv = client.put("/api/config/source", json=src_target)
assert rv.json()["id"] == "test1"
rv = client.delete("/api/config/source/test1")
assert rv.json()["status"] == "success"
# attempting to delete the same source again should fail
rv = client.delete("/api/config/source/test1")
assert rv.status_code == 404
assert rv.json() == {"detail": "source id not found"}
def test_ping(client):
rv = client.get("/api/ping")
assert rv.json() == "pong"
|
992,657 | c44a4a7c9015c6e5c543f01cfc06944932fbe3d6 | def main():
S = list(input())
acgt = ['A','C', 'G', 'T']
ans = 0
for i in range(0,len(S)):
if S[i] not in acgt:
continue
l = 1
for j in range(i+1, len(S)):
if S[j] not in acgt:
break
l += 1
ans = max(ans,l)
print(ans)
if __name__ == '__main__':
main()
|
992,658 | 51abea03d54dc5df9777e2da576e2f45e82a0c7e | import StringIO
from lxml import isoschematron
from lxml import etree
def main():
# Example adapted from http://lxml.de/validation.html#id2
f = 'rijksSchema.xml'
# Parse schema
sct_doc = etree.parse(f)
schematron = isoschematron.Schematron(sct_doc, store_report = True)
# XML to validate
passes = open('rijksDemoPass.xml')
fails = open('rijksDemoFail.xml')
# Parse xml
docPass = etree.parse(passes)
docFail = etree.parse(fails)
# Validate against schema
validationResult = schematron.validate(docPass)
validationResultFail = schematron.validate(docFail)
# Validation report
report = schematron.validation_report
print("Did the 'Pass' file pass?: " + str(validationResult))
print("Did the 'Fail' file pass?: " + str(validationResultFail))
# print(type(report))
# print(report)
main()
|
992,659 | 688008eac3fac96c327b61ad07c8c4623263d018 | from django.test import TestCase
from django.apps import apps
from django.contrib.auth import get_user_model
from .models import Entry
from . import views
from django.urls import resolve
from django.http import HttpRequest
# Create your tests here.
class EntryModelTest(TestCase):
# def test_gialap_fail(self):
# self.fail("TODO Test incomplete")
def test_string_representation(self):
entry = Entry(title="My entry title")
self.assertEqual(str(entry), entry.title)
def test_verbose_name_plural(self):
self.assertEqual(str(Entry._meta.verbose_name_plural), "entries")
class ProjectTests(TestCase):
def test_homepage(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
class HomePageTests(TestCase):
"""Test whether our blog entries show up on the homepage"""
def setUp(self):
self.user = get_user_model().objects.create(username='some_user')
def test_one_entry(self):
Entry.objects.create(title='1-title', body='1-body', author=self.user)
response = self.client.get('/')
#self.assertContains(response, '1-title')
#self.assertContains(response, '1-body')
def test_two_entries(self):
Entry.objects.create(title='1-title', body='1-body', author=self.user)
Entry.objects.create(title='2-title', body='2-body', author=self.user)
response = self.client.get('/')
#self.assertContains(response, '1-title')
#self.assertContains(response, '1-body')
#self.assertContains(response, '2-title')
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func, views.detail())
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
self.assertTrue(response.content.startswith(b'<html>'))
self.assertIn(b'<title>To-Do lists</title>', response.content)
self.assertTrue(response.content.endswith(b'</html>')) |
992,660 | d4babb38cf8c7ac53d47fd267488df59dbef7ce7 | import geojson
from django.db.models import Count
from actstream.models import Action
from django_filters import FilterSet, DateFilter, MultipleChoiceFilter
from rest_framework import filters, generics, permissions, viewsets
from rest_framework.response import Response
from .serializers import ActionSerializer
class ActionFilter(FilterSet):
min_timestamp = DateFilter(name='timestamp', lookup_type='gte')
max_timestamp = DateFilter(name='timestamp', lookup_type='lte')
verb = MultipleChoiceFilter(choices=(
('added garden', 'added garden'),
('added garden group', 'added garden group'),
('downloaded garden group spreadsheet', 'downloaded garden group spreadsheet'),
('downloaded garden report', 'downloaded garden report'),
('downloaded garden spreadsheet', 'downloaded garden spreadsheet'),
('joined Farming Concrete', 'joined Farming Concrete'),
('recorded', 'recorded'),
))
class Meta:
model = Action
fields = ['timestamp', 'verb',]
class ActionsViewset(viewsets.ReadOnlyModelViewSet):
filter_backends = (filters.DjangoFilterBackend,)
filter_class = ActionFilter
permission_classes = (permissions.IsAdminUser,)
queryset = Action.objects.all().order_by('-timestamp')
serializer_class = ActionSerializer
class ActionsSummaryView(generics.ListAPIView):
filter_backends = (filters.DjangoFilterBackend,)
filter_class = ActionFilter
permission_classes = (permissions.IsAdminUser,)
queryset = Action.objects.all()
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
# Get count of actions by month
counts = queryset.extra(select={
'month': 'EXTRACT(month FROM timestamp)',
'year': 'EXTRACT(year from timestamp)',
}) \
.values('month', 'year') \
.order_by('year', 'month') \
.annotate(count=Count('timestamp'))
return Response({
'counts': counts,
})
class ActionsGeojsonView(generics.ListAPIView):
filter_backends = (filters.DjangoFilterBackend,)
filter_class = ActionFilter
permission_classes = (permissions.IsAdminUser,)
queryset = Action.objects.all()
coordinate_cache = {}
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
features = self.get_features(queryset)
return Response(geojson.FeatureCollection(features))
def get_coordinates(self, action):
return [action.place.x, action.place.y]
def get_features(self, queryset):
for action in queryset.filter(place__isnull=False):
yield geojson.Feature(
id=action.pk,
geometry=geojson.Point(coordinates=self.get_coordinates(action))
)
|
992,661 | 7c7b5ae8ebcba6660e059dd666f9be797e8efe23 | """
RuuviTagReactive and Reactive Extensions Subject examples
"""
from reactivex import operators as ops
from ruuvitag_sensor.ruuvi_rx import RuuviTagReactive
tags = {"F4:A5:74:89:16:57": "sauna", "CC:2C:6A:1E:59:3D": "bedroom", "BB:2C:6A:1E:59:3D": "livingroom"}
interval_in_s = 10.0
ruuvi_rx = RuuviTagReactive(list(tags.keys()))
# Print all notifications
ruuvi_rx.get_subject().subscribe(print)
# Get updates only for F4:A5:74:89:16:57
ruuvi_rx.get_subject().pipe(ops.filter(lambda x: x[0] == "F4:A5:74:89:16:57")).subscribe(lambda x: print(x[1]))
# Print only last updated every 10 seconds for F4:A5:74:89:16:57
ruuvi_rx.get_subject().pipe(ops.filter(lambda x: x[0] == "F4:A5:74:89:16:57"), ops.sample(interval_in_s)).subscribe(
lambda data: print(data)
) # pylint: disable=unnecessary-lambda
# Print only last updated every 10 seconds for every foud sensor
ruuvi_rx.get_subject().pipe(ops.group_by(lambda x: x[0])).subscribe(
lambda x: x.pipe(ops.sample(interval_in_s)).subscribe(print)
)
# Print all from the last 10 seconds for F4:A5:74:89:16:57
ruuvi_rx.get_subject().pipe(
ops.filter(lambda x: x[0] == "F4:A5:74:89:16:57"), ops.buffer_with_time(interval_in_s)
).subscribe(
lambda data: print(data)
) # pylint: disable=unnecessary-lambda
# Execute subscribe only once for F4:A5:74:89:16:57
# when temperature goes over 80 degrees
ruuvi_rx.get_subject().pipe(
ops.filter(lambda x: x[0] == "F4:A5:74:89:16:57"), ops.filter(lambda x: x[1]["temperature"] > 80), ops.take(1)
).subscribe(lambda x: print(f'Sauna is ready! Temperature: {x[1]["temperature"]}'))
# Execute only every time when pressure changes for F4:A5:74:89:16:57
ruuvi_rx.get_subject().pipe(
ops.filter(lambda x: x[0] == "F4:A5:74:89:16:57"), ops.distinct_until_changed(lambda x: x[1]["pressure"])
).subscribe(lambda x: print(f'Pressure changed: {x[1]["pressure"]}'))
|
992,662 | bfb2ab068231396fa8d118fb3e328fc679c5a6a4 | if not os.path.exists('w51_progressive_test_small.ms'):
os.system('cp -r w51_test_small.ms w51_progressive_test_small.ms')
# split(vis='w51_spw3_continuum_flagged.ms',
# outputvis='w51_progressive_test_small.ms',
# field='31,32,33,39,40,24,25',
# spw='',
# datacolumn='data',
# )
assert os.path.exists('w51_progressive_test_small.ms')
clearcal(vis='w51_progressive_test_small.ms')
os.system('rm -rf progressive_test_mfs_dirty.*')
flagmanager(vis='w51_progressive_test_small.ms', versionname='flagdata_1', mode='restore')
clean(vis='w51_progressive_test_small.ms', imagename="progressive_test_mfs_dirty", field="", spw='',
mode='mfs', outframe='LSRK', interpolation='linear', imagermode='mosaic',
interactive=False, niter=0, imsize=[512,512],
cell='0.06arcsec', phasecenter='J2000 19h23m43.905 +14d30m28.08',
weighting='briggs', usescratch=True, pbcor=False, robust=-2.0)
exportfits('progressive_test_mfs_dirty.image', 'progressive_test_mfs_dirty.image.fits', dropdeg=True, overwrite=True)
os.system('rm -rf progressive_test_mfs.*')
clean(vis='w51_progressive_test_small.ms', imagename="progressive_test_mfs", field="", spw='',
mode='mfs', outframe='LSRK', interpolation='linear', imagermode='mosaic',
interactive=False, niter=1000, threshold='50.0mJy', imsize=[512,512],
cell='0.06arcsec', phasecenter='J2000 19h23m43.905 +14d30m28.08',
weighting='briggs', usescratch=True, pbcor=False, robust=-2.0)
exportfits('progressive_test_mfs.image', 'progressive_test_mfs.image.fits', dropdeg=True, overwrite=True)
gaincal(vis='w51_progressive_test_small.ms', caltable="phase.cal", field="", solint="30s",
calmode="p", refant="", gaintype="G")
#plotcal(caltable="phase.cal", xaxis="time", yaxis="phase", subplot=331,
# iteration="antenna", plotrange=[0,0,-30,30], markersize=5,
# fontsize=10.0,)
flagmanager(vis='w51_progressive_test_small.ms', mode='save', versionname='backup')
applycal(vis="w51_progressive_test_small.ms", field="", gaintable=["phase.cal"],
interp="linear", uvrange='400~2000', applymode='calonly')
flagmanager(vis='w51_progressive_test_small.ms', mode='restore', versionname='backup')
os.system('rm -rf w51_progressive_test_small_selfcal.ms')
os.system('rm -rf w51_progressive_test_small_selfcal.ms.flagversions')
split(vis="w51_progressive_test_small.ms", outputvis="w51_progressive_test_small_selfcal.ms",
datacolumn="corrected")
os.system('rm -rf progressive_test_selfcal_mfs.*')
clean(vis='w51_progressive_test_small_selfcal.ms', imagename="progressive_test_selfcal_mfs",
field="", spw='', mode='mfs', outframe='LSRK',
interpolation='linear', imagermode='mosaic', interactive=False,
niter=1000, threshold='50.0mJy', imsize=[512,512], cell='0.06arcsec',
phasecenter='J2000 19h23m43.905 +14d30m28.08', weighting='briggs',
usescratch=True, pbcor=False, robust=-2.0)
exportfits('progressive_test_selfcal_mfs.image', 'progressive_test_selfcal_mfs.image.fits', dropdeg=True, overwrite=True)
os.system("rm -rf phase_2.cal")
gaincal(vis="w51_progressive_test_small_selfcal.ms", caltable="phase_2.cal", field="",
solint="30s", calmode="p", refant="", gaintype="G")
#plotcal(caltable="phase_2.cal", xaxis="time", yaxis="phase", subplot=331,
# iteration="antenna", plotrange=[0,0,-30,30], markersize=5,
# fontsize=10.0,)
flagmanager(vis='w51_progressive_test_small_selfcal.ms', mode='save', versionname='backup')
applycal(vis="w51_progressive_test_small_selfcal.ms", field="", gaintable=["phase_2.cal"],
interp="linear", uvrange='200~2000', applymode='calonly')
flagmanager(vis='w51_progressive_test_small_selfcal.ms', mode='restore', versionname='backup')
os.system('rm -rf w51_progressive_test_small_selfcal_2.ms')
os.system('rm -rf w51_progressive_test_small_selfcal_2.ms.flagversions')
split(vis="w51_progressive_test_small_selfcal.ms", outputvis="w51_progressive_test_small_selfcal_2.ms",
datacolumn="corrected")
os.system('rm -rf progressive_test_selfcal_2_mfs.*')
clean(vis='w51_progressive_test_small_selfcal_2.ms', imagename="progressive_test_selfcal_2_mfs",
field="", spw='', mode='mfs', outframe='LSRK',
interpolation='linear', imagermode='mosaic', interactive=False,
niter=1000, threshold='50.0mJy', imsize=[512,512], cell='0.06arcsec',
phasecenter='J2000 19h23m43.905 +14d30m28.08', weighting='briggs',
usescratch=True, pbcor=False, robust=-2.0)
exportfits('progressive_test_selfcal_2_mfs.image', 'progressive_test_selfcal_2_mfs.image.fits', dropdeg=True, overwrite=True)
os.system("rm -rf phase_3.cal")
gaincal(vis="w51_progressive_test_small_selfcal_2.ms", caltable="phase_3.cal", field="",
solint="30s", calmode="p", refant="", gaintype="G")
#plotcal(caltable="phase_3.cal", xaxis="time", yaxis="phase", subplot=331,
# iteration="antenna", plotrange=[0,0,-30,30], markersize=5,
# fontsize=10.0,)
flagmanager(vis='w51_progressive_test_small_selfcal_2.ms', mode='save',
versionname='backup')
applycal(vis="w51_progressive_test_small_selfcal_2.ms", field="",
gaintable=["phase_3.cal"],
interp="linear", applymode='calonly')
flagmanager(vis='w51_progressive_test_small_selfcal.ms', mode='restore',
versionname='backup')
os.system('rm -rf w51_progressive_test_small_selfcal_3.ms')
os.system('rm -rf w51_progressive_test_small_selfcal_3.ms.flagversions')
split(vis="w51_progressive_test_small_selfcal_2.ms",
outputvis="w51_progressive_test_small_selfcal_3.ms",
datacolumn="corrected")
os.system('rm -rf progressive_test_selfcal_3_mfs.*')
clean(vis='w51_progressive_test_small_selfcal_3.ms',
imagename="progressive_test_selfcal_3_mfs",
field="", spw='', mode='mfs', outframe='LSRK',
interpolation='linear', imagermode='mosaic', interactive=False,
niter=1000, threshold='50.0mJy', imsize=[512,512], cell='0.06arcsec',
phasecenter='J2000 19h23m43.905 +14d30m28.08', weighting='briggs',
usescratch=True, pbcor=False, robust=-2.0)
exportfits('progressive_test_selfcal_3_mfs.image',
'progressive_test_selfcal_3_mfs.image.fits', dropdeg=True,
overwrite=True)
from astropy.io import fits
print("Stats (mfs):")
print("dirty: peak={1:0.5f} sigma={0:0.5f}".format(fits.getdata('progressive_test_mfs_dirty.image.fits')[:200,:200].std(), fits.getdata('progressive_test_mfs_dirty.image.fits').max()))
print("clean: peak={1:0.5f} sigma={0:0.5f}".format(fits.getdata('progressive_test_mfs.image.fits')[:200,:200].std(), fits.getdata('progressive_test_mfs.image.fits').max()))
print("selfcal: peak={1:0.5f} sigma={0:0.5f}".format(fits.getdata('progressive_test_selfcal_mfs.image.fits')[:200,:200].std(), fits.getdata('progressive_test_selfcal_mfs.image.fits').max()))
print("selfcal2: peak={1:0.5f} sigma={0:0.5f}".format(fits.getdata('progressive_test_selfcal_2_mfs.image.fits')[:200,:200].std(), fits.getdata('progressive_test_selfcal_2_mfs.image.fits').max()))
print("selfcal3: peak={1:0.5f} sigma={0:0.5f}".format(fits.getdata('progressive_test_selfcal_3_mfs.image.fits')[:200,:200].std(), fits.getdata('progressive_test_selfcal_3_mfs.image.fits').max()))
|
992,663 | 27be0670e2ea2e315302593ec88576de388de4f8 | # Usage:
# docker run --rm -ti \
# -v /path-to/model:/sly_task_data/model
# [model docker image name]
# python -- /workdir/src/rest_inference.py
from inference import ObjectDetectionSingleImageApplier
import os
from supervisely_lib.worker_api.rpc_servicer import InactiveRPCServicer
from supervisely_lib.nn.inference.rest_server import ModelRest, RestInferenceServer
from supervisely_lib.nn.inference.rest_constants import REST_INFERENCE_PORT
if __name__ == '__main__':
port = os.getenv(REST_INFERENCE_PORT, '')
model_deploy = ModelRest(model_applier_cls=ObjectDetectionSingleImageApplier, rpc_servicer_cls=InactiveRPCServicer)
server = RestInferenceServer(model=model_deploy.serv_instance, name=__name__, port=port)
server.run()
|
992,664 | 15d4c3a91688a5559cb8990d06744bbaccad774b | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-10 17:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grid', '0029_auto_20160710_1232'),
]
operations = [
migrations.AlterField(
model_name='plan',
name='public',
field=models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False),
),
]
|
992,665 | aff86fcb60345e2d700f773656fdaca1c5dbb747 | from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index),
path('new', views.new),
path('create', views.create),
path('detail/<int:g_pk>', views.detail),
path('delete/<int:g_pk>', views.delete),
path('fix/<int:g_pk>', views.fix),
path('edit/<int:g_pk>', views.edit),
path('delete_all', views.delete_all),
]
|
992,666 | d1a36ccd9c109ce563318c14c0a80f8b46059970 | from django import forms
from .models import Artikel
class ArtikelForm(forms.ModelForm):
class Meta:
model = Artikel
fields = ('judul', 'isi', 'penulis', 'foto',)
widgets = {
'judul': forms.TextInput(
attrs={
'placeholder': 'Judul jangan kosong!'
}
),
'penulis': forms.TextInput(
attrs={
'placeholder': 'Plis Otong jangan ikut campur'
}
)
}
labels = {
'isi': 'konten'
}
|
992,667 | c1e0b7f056b40b58a7570e91e42f2f0e476c83de | from django.shortcuts import render
import requests
import json
from datetime import datetime
import time
import random
import sqlite3
from monitor.models import Products
from discord_webhook import DiscordWebhook, DiscordEmbed
def send_webhook(webhook, product_title, price, image_url, desc, handle):
embed = DiscordEmbed(title=product_title, url="https://funkoeurope.com/products/"+ handle, description=desc, color=242424)
embed.set_author(
name="Funko",
url="https://funkoeurope.com/",
icon_url="https://cdn.shopify.com/s/files/1/0433/1952/5529/files/Funko_Logo_White_140x@2x.png?v=1602310645",
)
#embed.set_footer(text="Embed Footer Text")
# set thumbnail
embed.set_thumbnail(url=image_url)
embed.set_timestamp()
## Set `inline=False` for the embed field to occupy the whole line
embed.add_embed_field(name="Status", value="Available", inline=False)
embed.add_embed_field(name="Price", value=price, inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if response[0].status_code != 200:
embed = webhook.get_embeds()
for i in range(len(embed)):
webhook.remove_embed(i)
response = webhook.execute()
exit()
class PRODUCTDATABASE:
con = sqlite3.connect('pop.db')
cur = con.cursor()
def create_table(self, url):
try:
self.cur.execute('''create table if not exists products
(product_id text, title text, handle text, variant_id text, variant_title text, available numeric, price REAL, image_url text)''')
self.cur.execute("SELECT * FROM products")
result = self.cur.fetchone()
if result == None:
### THIS IS EXECUTED ON ALL RUNS
self.set_up_db(url)
except Exception as e:
print(e)
def set_up_db(self, url):
print("Setting up database")
x =0
while True:
products_url = url + str(x)
x+= 1
try:
response = requests.get(products_url)
product_data = response.json()
product_data = product_data["products"]
if len(product_data):
for product in product_data:
for variant in product["variants"]:
product_id = product["id"]
title = product["title"].replace("'","")
handle = product["handle"]
variant_id = variant["id"]
variant_title = variant["title"]
available = variant["available"]
price = variant["price"]
for image in product["images"]:
image_url = image["src"]
new_product = Products(
product_id = product_id,
title = title,
handle = handle,
variant_id = variant_id,
variant_title = variant_title,
available = available,
price = price,
image_url = image_url
)
new_product.save()
else:
break
except Exception as e:
print(e)
def check_record_exists(self, variant_id):
try:
self.cur.execute("SELECT * FROM products WHERE variant_id = {}".format(variant_id))
if self.cur.fetchall():
return True
else:
return False
except Exception as e:
print(e)
return False
def check_record_availability(self, variant_id):
try:
self.cur.execute("SELECT * FROM products WHERE variant_id = {}".format(variant_id))
result = self.cur.fetchone()
return result[5]
except Exception as e:
print(e)
return None
def update_record(self, variant_id, available):
try:
self.cur.execute("UPDATE products SET available={} WHERE variant_id = {}".format(available, variant_id))
except Exception as e:
print(e)
def insert_record(self, product_id, title, handle, variant_id, variant_title, available, price, image_url):
try:
self.cur.execute("INSERT INTO products VALUES ('{}', '{}','{}', '{}','{}', '{}','{}', '{}')".format(product_id, title, handle, variant_id, variant_title, available, price, image_url))
print('Product added successfully to database ')
self.con.commit()
except Exception as e:
print(e)
def run_monitor(webhook, watch_list):
url = "https://funkoeurope.com/products.json?page="
db = PRODUCTDATABASE()
if Products.objects.first() == None:
db.set_up_db(url)
# watch_list = ["6-exodia-the-forbidden-one-yu-gi-oh", "concept-series-snowtrooper-star-wars", "stefon-diggs-nfl-bills"]
# with open("watch_list.txt", "r") as f:
# for line in f:
# line = line.replace("\n", "")
# watch_list.append(line)
payload = {}
headers = {}
client = requests.Session()
x = 1
while True:
products_url = url + str(x)
try:
response = client.get(products_url)
product_data = response.json()
product_data = product_data["products"]
x += 1
if len(product_data):
for product in product_data:
datetimeObj = datetime.now()
for variant in product["variants"]:
product_id = product["id"]
title = product["title"].replace("'","")
handle = product["handle"]
variant_id = variant["id"]
variant_title = variant["title"]
available = variant["available"]
price = variant["price"]
for image in product["images"]:
image_url = image["src"]
# insert product into db if it doesnt exist already
Products.objects.filter(product_id = product_id).first()
if Products.objects.filter(product_id = product_id).first() == None :
new_product = Products(
product_id = product_id,
title = title,
handle = handle,
variant_id = variant_id,
variant_title = variant_title,
available = available,
price = price,
image_url = image_url
)
new_product.save()
send_webhook(webhook, title, price, image_url, "New Product", handle)
time.sleep(random.randint(0,30))
else:
# check if availabilty matches
print('availability check')
if available != Products.objects.filter(variant_id = variant_id).first().available:
p = Products.objects.filter(variant_id = variant_id).first()
p.available = available
p.save()
for handle in watch_list:
if handle in product["handle"]:
if variant["available"]:
send_webhook(webhook, title, price, image_url, "Product Is Available", handle)
time.sleep(random.randint(0,30))
else:
x = 0
except Exception as e:
print(e)
print('Delaying next request')
time.sleep(random.randint(0,20))
def index(request):
if request.method == "POST":
webhook = DiscordWebhook(url=request.POST["webhook_url"], username="Funky Monitor")
watch_list = request.POST["watch_list"]
if watch_list:
watch_list = watch_list.replace("\n","")
watch_list = watch_list.replace("\r","")
watch_list = watch_list.split()
run_monitor(webhook, watch_list)
return render(
request,
"monitor/index.html")
else:
return render(
request,
"monitor/index.html") |
992,668 | 0064659304226287d1c91217df90c094487accfd | from prefix_sums import *
import unittest
class PrefixSums(unittest.TestCase):
@unittest.skip
def test_prefix_sums(self):
A = [3, 4, 5, 6]
print(A[1:2])
P = prefix_sums(A)
self.assertTrue(P[1], 3)
self.assertTrue(P[2], 7)
self.assertTrue(P[4],18)
@unittest.skip
def test_count_total(self):
A = [3, 4, 5, 6]
P = prefix_sums(A)
print(P)
sum = count_total(P,1,2)
self.assertEqual(sum, 9)
sum = count_total(P,1,3)
self.assertEqual(sum, 15)
sum = count_total(P,0,2)
self.assertEqual(sum, 12)
@unittest.skip
def test_count_div(self):
self.assertEqual(count_div(6,11,2), 3)
self.assertEqual(count_div(0,20000,2), 10000)
self.assertEqual(count_div(12,12,2), 1)
# self.assertEqual(count_div(0,2_000_000_000,2), 1_000_000_000)
@unittest.skip
def test_compute_dna(self):
P = [2,5, 0]
Q = [4,5,6]
dna = "CAGCCTA"
result = compute_dna(dna,P,Q)
expected = [2,4, 1 ]
# self.assertListEqual(result, expected)
def test_compute_dna_effecient(self):
P = [2,5, 0]
Q = [4,5,6]
dna = "CAGCCTA"
result = compute_dna_effecient(dna,P,Q)
expected = [2,4, 1 ]
# self.assertListEqual(result, expected)
if __name__ == "__main__":
unittest.main()
|
992,669 | 4fc7a68a5d408de136fc64ea84a1ff9e1675b63a | import numpy as np
import cv2
import tensorflow as tf
import os
gpus= tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
filename = 'video.avi'
frames_per_second = 10.0
res = '720p'
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# Video Encoding, might require additional installs
# Types of Codes: http://www.fourcc.org/codecs.php
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
# Set resolution for the video capture
# Function adapted from https://kirr.co/0l6qmh
def change_res(cap, width, height):
cap.set(3, width)
cap.set(4, height)
# grab resolution dimensions and set video capture to it.
def get_dims(cap, res='1080p'):
width, height = STD_DIMENSIONS["480p"]
if res in STD_DIMENSIONS:
width,height = STD_DIMENSIONS[res]
## change the current caputre device
## to the resulting resolution
change_res(cap, width, height)
return width, height
def get_video_type(filename):
filename, ext = os.path.splitext(filename)
if ext in VIDEO_TYPE:
return VIDEO_TYPE[ext]
return VIDEO_TYPE['avi']
# Loading model
IMG_HEIGHT, IMG_WIDTH = 224, 224
model_path = "checkpoints/mobilenet_aug_lite/Epoch_275_model.hp5"
model = tf.keras.models.load_model(model_path)
# Labels
labels = ['cans', 'oranges', 'plastic']
#Get frame
cap = cv2.VideoCapture(0)
out = cv2.VideoWriter(filename, get_video_type(filename), 25, get_dims(cap, res))
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Resize and reshape frame
img = cv2.resize(frame, (IMG_HEIGHT, IMG_WIDTH), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
# Predict
pred = model.predict(img)
# Get the label class
label = labels[np.argmax(pred)]
# Score
score = str(round(np.max(pred),2))
# Label to print
label2print = label+": "+score
# Display the resulting frame
cv2.putText(frame,label2print, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,(0, 255, 255),2,cv2.LINE_4)
cv2.imshow('', frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows() |
992,670 | 8335a84e6ccbe78829c075ab7cdec5aef23f42e0 | def anagramMappings(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
pos = {}
for i in range(len(B)):
if B[i] in pos:
pos[B[i]].append(i)
else:
pos[B[i]] = [i]
P = []
for i in range(len(A)):
idx = pos[A[i]].pop()
P.append(idx)
return P |
992,671 | c0a8d16d830cefe888575e82d5f10e62cf8ac278 | import pytest
from test_case.web_test.pages.baidu_page import BaiduPage
from test_case.web_test.pages.login_page import LoginPage
from test_case.web_test.pages.menu_page import MenuPage
from test_case.web_test.pages.add_goods_page import AddGoodsPage
from test_case.web_test.pages.goods_list_page import GoodsListPage
from datetime import datetime
import os
@pytest.fixture
def selenium(selenium):
selenium.implicitly_wait(10)
# selenium.maxmize_window()
return selenium
@pytest.fixture
def chrome_options(chrome_options):
chrome_options.add_argument('--start-maximize')
#不弹出界面运行
# chrome_options.add_argument('--headless')
return chrome_options
@pytest.fixture
def baidu_page(selenium):
selenium.get('http://www.baidu.com')
page = BaiduPage(selenium)
return page
@pytest.fixture
def login_page(selenium):
#1.进入这个页面
selenium.get('http://39.104.14.232/ecshop/wwwroot/admin/privilege.php?act=login')
#2.生成并返回页面对像
return LoginPage(selenium)
@pytest.fixture
def menu_page(selenium,login_page):
#1.进入这个页面
login_page.login('admin','123456')
#2.生成并返回页面对像
return MenuPage(selenium)
@pytest.fixture
def add_goods_page():
#1.进入这个页面
menu_page.click_menu('商品管理','添加商品')
#2.把页面对像返回给用例
return AddGoodsPage(selenium)
@pytest.fixture
def goods_list_page():
#1.进入这个页面
menu_page.click_menu('商品管理', '商品列表')
#2.把页面对像返回
return GoodsListPage(selenium)
def pytest_configure(config):
# print('------------------')
# # print("给我买瓶水")
# # #print(dir(config))
# # #print(config.__dict__)
# report = config.getoption("htmlpath")
if config.getoption("htmlpath"):
now = datetime.now().strftime("%Y%m%d_%H%M%s")
# print(report)
config.option.htmlpaty = os.path.join(config.rootdir,'reports',f'WEB_report_{now}.html') |
992,672 | fe7970308c05d2937e94481c6f348f09591e72d6 | import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
import albumentations as A
from albumentations import *
from warnings import filterwarnings
filterwarnings("ignore")
from config import *
################# Augmentation ###############
# # Plain Training Augmentation
# Transforms_Train = A.Compose([
# A.Resize(IMG_SIZE, IMG_SIZE),
# A.Normalize()
# ])
# Training Augmentation
Transforms_Train = A.Compose([
A.RandomResizedCrop(IMG_SIZE, IMG_SIZE, scale=(0.8, 1.2), p=1),
A.HorizontalFlip(p=0.5),
# Brightness + Contract
A.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.5),
# Blurring + Distortion
A.OneOf([
A.GaussNoise(var_limit=[5.0, 30.0]), A.MotionBlur(blur_limit=5),
A.MedianBlur(blur_limit=5), A.GaussianBlur(blur_limit=5)], p=0.25),
A.OneOf([
A.OpticalDistortion(distort_limit=1.0), A.GridDistortion(num_steps=5, distort_limit=1.),
A.ElasticTransform(alpha=3)], p=0.25),
# Some final Shift+Saturation
A.CLAHE(clip_limit=(1,4), p=0.25),
A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=15, val_shift_limit=10, p=0.25),
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=45, p=0.25),
# Resize
A.Resize(IMG_SIZE, IMG_SIZE),
# cut holes on imgs
A.Cutout(max_h_size=int(IMG_SIZE * 0.10), max_w_size=int(IMG_SIZE * 0.10), num_holes=3, p=0.35),
A.Normalize(),
])
# Validation Augmentation
Transforms_Valid = A.Compose([
A.Resize(IMG_SIZE, IMG_SIZE),
A.Normalize()
])
################# Augmentation ###############
class Train_Dataset(Dataset):
def __init__(self, df, mode, transform=None):
self.df = df.reset_index(drop=True)
self.mode = mode
self.transform = transform
self.labels = df[TARGET_COLS].values # 11 cols to predict
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.loc[index]
img = cv2.imread(row.file_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.transform is not None:
res = self.transform(image=img)
img = res['image']
img = img.astype(np.float32)
img = img.transpose(2,0,1)
label = torch.tensor(self.labels[index]).float()
if self.mode == 'test':
return torch.tensor(img).float()
else:
return torch.tensor(img).float(), label
class Test_Dataset(Dataset):
def __init__(self, df, mode, transform=None):
self.df = df.reset_index(drop=True)
self.mode = mode
self.transform = transform
self.labels = df[TARGET_COLS].values # 11 cols to predict
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.loc[index]
img = cv2.imread(row.file_path)
# preprocessing to remove black
mask = img > 0
image = img[np.ix_(mask.any(1), mask.any(0))]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.transform is not None:
res = self.transform(image=img)
img = res['image']
img = img.astype(np.float32)
img = img.transpose(2,0,1)
label = torch.tensor(self.labels[index]).float()
if self.mode == 'test':
return torch.tensor(img).float()
else:
return torch.tensor(img).float(), label
|
992,673 | 6c539cec4f3af78ba241c6aa08002b8cc62a375f | class BaseMetric(object):
def __init__(self, metric_names, eval_intermediate=True, eval_validation=True):
self._names = tuple(metric_names)
self._eval_intermediate = eval_intermediate
self._eval_validation = eval_validation
def eval_intermediate(self):
return self._eval_intermediate
def eval_validation(self):
return self._eval_validation
def names(self):
return self._names
def add(self, predictions, ground_truth):
raise NotImplementedError
def value(self):
raise NotImplementedError
|
992,674 | 88cb8d5cc7e628092493d2e9a98ccca3d9a3269f | from django.conf.urls import url
from appusrs import views
from django.urls import path,include
#TEMPLATE URLS
app_name='appusrs'
urlpatterns=[
path('register/',views.register,name='register'),
path('user_login/',views.user_login,name='user_login')
] |
992,675 | a8737bf2e307789f00793654ac4fb1bd3c6645f3 | """
[ dictionary 형 ]
1- 키와 값으로 구성 ( 자바의 map 와 유사 )
2- 저장된 자료의 순서는 의미 없음
3- 중괄호 {} 사용
4- 변경가능
` 사전.keys() : key만 추출 (임의의 순서)
` 사전.values() : value만 추출 (임의의 순서)
` 사전.items() : key와 value를 튜플로 추출 (임의의 순서)
"""
print('--------- 1. 딕셔너리 요소 --------------- ')
dt = {1:'one', 2:'two', '3':'three', 1:'하나', 3:'셋'}
print(dt)
# 키값이 1인 요소 출력
print(dt[1])
# 키값이 3인 요소 출력 => 존재하지않음
# print(dt[3])
# 키값이 '3'인 요소 출력
print(dt['3'])
# 키는 숫자와 문자 그리고 튜플이여야 한다. 즉 리스트는 안된다.
# 리스트의 값이 변경 가능하다. 그러나 키값을 변경하면 안되므로 리스트는 안된다
dt2 = {1:'one', 2:'two', (3, 4):'turple'}
print(dt2[3,4])
print('--------- 2. 딕셔너리 추가 및 수정 --------------- ')
# 딕셔너리에 값 추가 및 수정
dt['korea'] = 'seoul'
print(dt)
dt['korea'] = '서울'
print(dt)
# 여러개 추가할 때
dt.update({5:'kim', 6:'hong', 7:'kang'})
print(dt)
print('--------- 3. Key로 Value값 찾기 --------------- ')
print(dt.get(9, '없음')) # 존재하지 않으면 None
# Key와 Value만 따로 검색
print(dt.keys())
print(dt.values())
print(dt.items()) |
992,676 | f9e447d5f9d05b86bf03c0ab3cd2083049ece24f | from models.user import User
from .interfaces.user import UserRepoInterface
from .repo import Repo
class UserRepo(Repo, UserRepoInterface):
def __init__(self, session):
super().__init__(session)
self._entity_type = User
def get_by_email(self, email):
user = self._session.query(User).filter(User.email == email).one()
return user
def get_by_token(self, token):
user = self._session.query(User).filter(User.token == token).one()
return user
def is_owner_of_blog(self, user, blog):
return user.id == blog.user_id
|
992,677 | e965c33cc7d1efd61f228187feb6871c807925f9 | T = int(raw_input())
for caseID in range(1,T+1):
n = int(raw_input())
aa = map(int,raw_input().split())
answer = 1e9
for i in range(n):
for j in range(i+1,n):
a = float(aa[j]-aa[i]) / float(j-i)
l = 0
r = 1e9
for iter in range(70):
m = (l + r) / 2.0
# T[i] - aa[i] <= m for i=0..n-1
bmin, bmax = -1e9, 1e9
for k in range(n):
# abs(a*k+b - aa[k]) <= m
# aa[k]-m <= a*k+b <= aa[k]+m
# b >= aa[k]-m-a*k
# b <= aa[k]+m-a*k
bmin = max(bmin, aa[k]-m-a*k)
bmax = min(bmax, aa[k]+m-a*k)
if bmin <= bmax:
r = m
else:
l = m
answer = min(answer, l)
print "Case #%d: %.9f" % (caseID, answer)
|
992,678 | 0963fbc5447bd033f393a6b79a32386cb3ec028d | import time
from winsound import Beep
fg = int(input("Please enter time: "))
while fg:
fgt = 60
fg -= 1
time.sleep(1)
while fgt:
fgt -= 1
print(f"{fg}:{fgt} \r", end="")
time.sleep(1)
Beep(1000, 200)
print("End")
|
992,679 | 92fdae9d36aba7682ba65ff32dbe3b83758af65c | from __future__ import absolute_import, unicode_literals
import argparse
import importlib
import logging
import logging.config
import os
import sys
import traceback
import signal
import attr
from pysoa.client import Client
from pysoa.common.constants import (
ERROR_CODE_RESPONSE_TOO_LARGE,
ERROR_CODE_SERVER_ERROR,
ERROR_CODE_UNKNOWN,
)
from pysoa.common.transport.exceptions import (
MessageReceiveError,
MessageReceiveTimeout,
MessageTooLarge,
)
from pysoa.common.types import (
ActionResponse,
Error,
JobResponse,
UnicodeKeysDict,
)
from pysoa.server.internal.types import RequestSwitchSet
from pysoa.server.errors import (
ActionError,
JobError,
)
from pysoa.server.logging import (
PySOALogContextFilter,
RecursivelyCensoredDictWrapper,
)
from pysoa.server.types import EnrichedActionRequest
from pysoa.server.schemas import JobRequestSchema
from pysoa.server.settings import PolymorphicServerSettings
import pysoa.version
class Server(object):
"""
Base class from which all SOA Service Servers inherit.
Required Attributes for Subclasses:
service_name: a string name of the service.
action_class_map: a dictionary mapping action name strings
to Action subclasses.
"""
settings_class = PolymorphicServerSettings
use_django = False
service_name = None
action_class_map = {}
def __init__(self, settings):
# Check subclassing setup
if not self.service_name:
raise AttributeError('Server subclass must set service_name')
# Store settings and extract transport
self.settings = settings
self.metrics = self.settings['metrics']['object'](**self.settings['metrics'].get('kwargs', {}))
self.transport = self.settings['transport']['object'](
self.service_name,
self.metrics,
**self.settings['transport'].get('kwargs', {})
)
# Set initial state
self.shutting_down = False
# Instantiate middleware
self.middleware = [
m['object'](**m.get('kwargs', {}))
for m in self.settings['middleware']
]
# Set up logger
self.logger = logging.getLogger('pysoa.server')
self.job_logger = logging.getLogger('pysoa.server.job')
# Set these as the integer equivalents of the level names
self.request_log_success_level = logging.getLevelName(self.settings['request_log_success_level'])
self.request_log_error_level = logging.getLevelName(self.settings['request_log_error_level'])
self._default_status_action_class = None
self._idle_timer = None
def handle_next_request(self):
if not self._idle_timer:
# This method may be called multiple times before receiving a request, so we only create and start a timer
# if it's the first call or if the idle timer was stopped on the last call.
self._idle_timer = self.metrics.timer('server.idle_time')
self._idle_timer.start()
# Get the next JobRequest
try:
request_id, meta, job_request = self.transport.receive_request_message()
except MessageReceiveTimeout:
# no new message, nothing to do
self.perform_idle_actions()
return
# We are no longer idle, so stop the timer and reset for the next idle period
self._idle_timer.stop()
self._idle_timer = None
PySOALogContextFilter.set_logging_request_context(request_id=request_id, **job_request['context'])
request_for_logging = RecursivelyCensoredDictWrapper(job_request)
self.job_logger.log(self.request_log_success_level, 'Job request: %s', request_for_logging)
try:
self.perform_pre_request_actions()
# Process and run the Job
job_response = self.process_job(job_request)
# Prepare the JobResponse for sending by converting it to a message dict
try:
response_message = attr.asdict(job_response, dict_factory=UnicodeKeysDict)
except Exception as e:
self.metrics.counter('server.error.response_conversion_failure').increment()
job_response = self.handle_error(e, variables={'job_response': job_response})
response_message = attr.asdict(job_response, dict_factory=UnicodeKeysDict)
response_for_logging = RecursivelyCensoredDictWrapper(response_message)
# Send the response message
try:
self.transport.send_response_message(request_id, meta, response_message)
except MessageTooLarge:
self.metrics.counter('server.error.response_too_large').increment()
self.logger.error(
'Could not send a response because it was too large',
exc_info=True,
extra={'data': {'request': request_for_logging, 'response': response_for_logging}},
)
job_response = JobResponse(errors=[
Error(
code=ERROR_CODE_RESPONSE_TOO_LARGE,
message='Could not send the response because it was too large',
),
])
self.transport.send_response_message(
request_id,
meta,
attr.asdict(job_response, dict_factory=UnicodeKeysDict),
)
finally:
if job_response.errors or any(a.errors for a in job_response.actions):
if (
self.request_log_error_level > self.request_log_success_level and
self.job_logger.getEffectiveLevel() > self.request_log_success_level
):
# When we originally logged the request, it may have been hidden because the effective logging
# level threshold was greater than the level at which we logged the request. So re-log the
# request at the error level, if set higher.
self.job_logger.log(self.request_log_error_level, 'Job request: %s', request_for_logging)
self.job_logger.log(self.request_log_error_level, 'Job response: %s', response_for_logging)
else:
self.job_logger.log(self.request_log_success_level, 'Job response: %s', response_for_logging)
finally:
PySOALogContextFilter.clear_logging_request_context()
self.perform_post_request_actions()
def make_client(self, context):
"""
Gets a client router to pass down to middleware or Actions that will
propagate the passed `context`.
"""
return Client(self.settings['client_routing'], context=context)
@staticmethod
def make_middleware_stack(middleware, base):
"""
Given a list of in-order middleware callables `middleware`
and a base function `base`, chains them together so each middleware is
fed the function below, and returns the top level ready to call.
"""
for ware in reversed(middleware):
base = ware(base)
return base
def process_job(self, job_request):
"""
Validate, execute, and run Job-level middleware for JobRequests.
Args:
job_request: a JobRequest dictionary.
Returns:
A JobResponse instance.
"""
try:
# Validate JobRequest message
validation_errors = [
Error(
code=error.code,
message=error.message,
field=error.pointer,
)
for error in (JobRequestSchema.errors(job_request) or [])
]
if validation_errors:
raise JobError(errors=validation_errors)
# Add a client router in case a middleware wishes to use it
job_request['client'] = self.make_client(job_request['context'])
# Build set of middleware + job handler, then run job
wrapper = self.make_middleware_stack(
[m.job for m in self.middleware],
self.execute_job,
)
job_response = wrapper(job_request)
except JobError as e:
self.metrics.counter('server.error.job_error').increment()
job_response = JobResponse(
errors=e.errors,
)
except Exception as e:
# Send an error response if no middleware caught this.
# Formatting the error might itself error, so try to catch that
self.metrics.counter('server.error.unhandled_error').increment()
return self.handle_error(e)
return job_response
def handle_error(self, error, variables=None):
"""
Makes a last-ditch error response
"""
# Get the error and traceback if we can
try:
error_str, traceback_str = str(error), traceback.format_exc()
except Exception:
self.metrics.counter('server.error.error_formatting_failure').increment()
error_str, traceback_str = 'Error formatting error', traceback.format_exc()
# Log what happened
self.logger.exception(error)
# Make a bare bones job response
error_dict = {
'code': ERROR_CODE_SERVER_ERROR,
'message': 'Internal server error: %s' % error_str,
'traceback': traceback_str,
}
if variables is not None:
try:
error_dict['variables'] = {key: repr(value) for key, value in variables.items()}
except Exception:
self.metrics.counter('server.error.variable_formatting_failure').increment()
error_dict['variables'] = 'Error formatting variables'
return JobResponse(errors=[error_dict])
def execute_job(self, job_request):
"""
Processes and runs the ActionRequests on the Job.
"""
# Run the Job's Actions
job_response = JobResponse()
job_switches = RequestSwitchSet(job_request['context']['switches'])
for i, raw_action_request in enumerate(job_request['actions']):
action_request = EnrichedActionRequest(
action=raw_action_request['action'],
body=raw_action_request.get('body', None),
switches=job_switches,
context=job_request['context'],
control=job_request['control'],
client=job_request['client'],
)
action_in_class_map = action_request.action in self.action_class_map
if action_in_class_map or action_request.action in ('status', 'introspect'):
# Get action to run
if action_in_class_map:
action = self.action_class_map[action_request.action](self.settings)
elif action_request.action == 'introspect':
from pysoa.server.action.introspection import IntrospectionAction
action = IntrospectionAction(server=self)
else:
if not self._default_status_action_class:
from pysoa.server.action.status import make_default_status_action_class
self._default_status_action_class = make_default_status_action_class(self.__class__)
action = self._default_status_action_class(self.settings)
# Wrap it in middleware
wrapper = self.make_middleware_stack(
[m.action for m in self.middleware],
action,
)
# Execute the middleware stack
try:
action_response = wrapper(action_request)
except ActionError as e:
# Error: an error was thrown while running the Action (or Action middleware)
action_response = ActionResponse(
action=action_request.action,
errors=e.errors,
)
else:
# Error: Action not found.
action_response = ActionResponse(
action=action_request.action,
errors=[Error(
code=ERROR_CODE_UNKNOWN,
message='The action "{}" was not found on this server.'.format(action_request.action),
field='action',
)],
)
job_response.actions.append(action_response)
if (
action_response.errors and
not job_request['control'].get('continue_on_error', False)
):
# Quit running Actions if an error occurred and continue_on_error is False
break
return job_response
def handle_shutdown_signal(self, *_):
if self.shutting_down:
self.logger.warning('Received double interrupt, forcing shutdown')
sys.exit(1)
else:
self.logger.warning('Received interrupt, initiating shutdown')
self.shutting_down = True
def harakiri(self, *_):
if self.shutting_down:
self.logger.warning('Graceful shutdown failed after {}s. Exiting now!'.format(
self.settings['harakiri']['shutdown_grace']
))
sys.exit(1)
else:
self.logger.warning('No activity during {}s, triggering harakiri with grace {}s'.format(
self.settings['harakiri']['timeout'],
self.settings['harakiri']['shutdown_grace'],
))
self.shutting_down = True
signal.alarm(self.settings['harakiri']['shutdown_grace'])
def setup(self):
"""
Runs just before the server starts, if you need to do one-time loads or cache warming. Call super().setup() if
you override.
"""
pass
def _close_old_django_connections(self):
if self.use_django:
from django.conf import settings
if not getattr(settings, 'DATABASES'):
# No database connections are configured, so we have nothing to do
return
from django.db import transaction
try:
if transaction.get_autocommit():
from django.db import close_old_connections
self.logger.debug('Cleaning Django connections')
close_old_connections()
except BaseException as e:
# `get_autocommit` fails under PyTest without `pytest.mark.django_db`, so ignore that specific error.
try:
from _pytest.outcomes import Failed
if not isinstance(e, Failed):
raise e
except ImportError:
raise e
def perform_pre_request_actions(self):
"""
Runs just before the server accepts a new request. Call super().perform_pre_request_actions() if you override.
Be sure your purpose for overriding isn't better met with middleware.
"""
if self.use_django:
from django.conf import settings
if getattr(settings, 'DATABASES'):
from django.db import reset_queries
self.logger.debug('Resetting Django query log')
reset_queries()
self._close_old_django_connections()
def perform_post_request_actions(self):
"""
Runs just after the server processes a request. Call super().perform_post_request_actions() if you override. Be
sure your purpose for overriding isn't better met with middleware.
"""
self._close_old_django_connections()
def perform_idle_actions(self):
"""
Runs periodically when the server is idle, if it has been too long since it last received a request. Call
super().perform_idle_actions() if you override.
"""
self._close_old_django_connections()
def run(self):
"""
Start the SOA Server run loop.
"""
self.logger.info(
'Service "{service}" server starting up, pysoa version {pysoa}, listening on transport {transport}.'.format(
service=self.service_name,
pysoa=pysoa.version.__version__,
transport=self.transport,
)
)
self.setup()
self.metrics.commit()
signal.signal(signal.SIGINT, self.handle_shutdown_signal)
signal.signal(signal.SIGTERM, self.handle_shutdown_signal)
signal.signal(signal.SIGALRM, self.harakiri)
try:
while not self.shutting_down:
# reset harakiri timeout
signal.alarm(self.settings['harakiri']['timeout'])
# Get, process, and execute the next JobRequest
self.handle_next_request()
self.metrics.commit()
except MessageReceiveError:
self.logger.exception('Error receiving message from transport; shutting down')
except Exception:
self.metrics.counter('server.error.unknown').increment()
self.logger.exception('Unhandled server error; shutting down')
finally:
self.metrics.commit()
self.logger.info('Server shutting down')
@classmethod
def main(cls):
"""
Command-line entry point for running a PySOA service Server.
"""
parser = argparse.ArgumentParser(
description='Server for the {} SOA service'.format(cls.service_name),
)
parser.add_argument(
'-d', '--daemon',
action='store_true',
help='run the server process as a daemon',
)
if not cls.use_django:
# If Django mode is turned on, we use the Django settings framework
# to get our settings, so the caller needs to set DJANGO_SETTINGS_MODULE.
parser.add_argument(
'-s', '--settings',
help='The settings file to use',
required=True,
)
cmd_options, _ = parser.parse_known_args(sys.argv[1:])
# Load settings from the given file (or use Django and grab from its settings)
if cls.use_django:
# noinspection PyUnresolvedReferences
from django.conf import settings as django_settings
try:
settings = cls.settings_class(django_settings.SOA_SERVER_SETTINGS)
except AttributeError:
raise ValueError('Cannot find SOA_SERVER_SETTINGS in the Django settings')
else:
try:
settings_module = importlib.import_module(cmd_options.settings)
except ImportError as e:
raise ValueError('Cannot import settings module %s: %s' % (cmd_options.settings, e))
try:
settings_dict = getattr(settings_module, 'SOA_SERVER_SETTINGS')
except AttributeError:
try:
settings_dict = getattr(settings_module, 'settings')
except AttributeError:
raise ValueError(
"Cannot find 'SOA_SERVER_SETTINGS' or 'settings' variable in settings module {}.".format(
cmd_options.settings,
)
)
settings = cls.settings_class(settings_dict)
PySOALogContextFilter.set_service_name(cls.service_name)
# Set up logging
logging.config.dictConfig(settings['logging'])
# Optionally daemonize
if cmd_options.daemon:
pid = os.fork()
if pid > 0:
print('PID={}'.format(pid))
sys.exit()
# Set up server and signal handling
server = cls(settings)
# Start server event loop
server.run()
|
992,680 | a9ffd434de0dc8b95cc50a71423ed7b6ed0b9960 | #
# Helper functions for boxes.
#
# Categories of helper functions:
# 1. Overlap functions.
# 2. Spatial-aware object embedding functions.
# 3. Misc.
#
import numpy as np
from scipy.stats import entropy
from sklearn.metrics import average_precision_score
#
# Helper functions category 1: Overlap functions.
#
#
# 1.1
# Compute the intersection between two boxes.
#
def boxintersect(a,b):
if a[0] > b[2] or b[0] > a[2] or a[1] > b[3] or b[1] > a[3]:
return 0
w = min(a[2], b[2]) - max(a[0], b[0])
h = min(a[3], b[3]) - max(a[1], b[1])
return w * h
#
# 1.2
# Overlap between two boxes.
#
def box_overlap(a, b):
if a[2] > b[0] and b[2] > a[0] and a[3] > b[1] and b[3] > a[1]:
i = min(a[2],b[2]) - max(a[0],b[0])
i *= min(a[3],b[3]) - max(a[1],b[1])
i = float(i)
a1 = ((a[2]-a[0]) * (a[3]-a[1]))
a2 = ((b[2]-b[0]) * (b[3]-b[1]))
return i / (a1 + a2 - i)
return 0.
#
# 1.3
# Overlap between box and list of other boxes.
#
def liou(a, b):
iou = np.zeros(b.shape[0])
for i in xrange(b.shape[0]):
iou[i] = box_overlap(a, b[i])
return iou
#
# 1.4
# Compute the intersection-over-union score for two proposals from the
# same video.
# Optional parameter: ss (stride for first tube, in case of sparse annotation
# for second tube.
#
def tube_iou(p1, p2, ss=1):
# Frame indices.
p2idxs = np.where(p2[:,2] >= 0)[0]
p1 = p1[::ss,:]
p1f, p2f = p1[:,0].astype(int), p2[:,0].astype(int)
p2f = p2f[p2idxs]
p2 = p2[p2idxs,:]
# Determine union of frame span.
tmin = min(np.min(p1f), np.min(p2f))
tmax = max(np.max(p1f), np.max(p2f))
# Initialize the overlap scores across frame span.
span = np.arange(tmin, tmax+1, ss)
overlaps = np.zeros(len(span), dtype=np.float)
# Go through the frame span.
for d in xrange(len(span)):
i = span[d]
p1i, p2i = np.where(p1f == i)[0], np.where(p2f == i)[0]
# Compute the overlap if frame in both proposals.
if len(p1i) == 1 and len(p2i) == 1:
a,b = p1[p1i[0],1:], p2[p2i[0],1:]
a = [min(a[0],a[2]), min(a[1],a[3]), max(a[0],a[2]), max(a[1],a[3])]
b = [min(b[0],b[2]), min(b[1],b[3]), max(b[0],b[2]), max(b[1],b[3])]
# Only compute overlap if there is any
if a[2] > b[0] and b[2] > a[0] and a[3] > b[1] and b[3] > a[1]:
intersection = (min(a[2],b[2]) - max(a[0],b[0]))
intersection *= (min(a[3],b[3]) - max(a[1],b[1]))
intersection = float(intersection)
area1 = ((a[2]-a[0]) * (a[3]-a[1]))
area2 = ((b[2]-b[0]) * (b[3]-b[1]))
overlaps[d] = intersection / (area1 + area2 - intersection)
# Return the mean overlap over the frame span
return np.mean(overlaps)
#
# Helper functions category 2: Embedding functions.
#
#
# 2.1
# Minimal edge distance between two boxes.
#
def box_dist(a, b):
if boxintersect(a,b) > 0:
return 0
ae = np.array([[a[0],a[1]], [a[2],a[1]], [a[0],a[3]], [a[2],a[3]]])
be = np.array([[b[0],b[1]], [b[2],b[1]], [b[0],b[3]], [b[2],b[3]]])
mind = np.min(np.linalg.norm(ae-be[0], axis=1))
for i in xrange(1, be.shape[0]):
nd = np.min(np.linalg.norm(ae-be[i], axis=1))
mind = min(mind, nd)
return mind
#
# 2.2
# Tile distributions with 9 tiles. a=person, b=object.
#
def tiledist(a, b):
d = np.zeros(9)
e = 1e6
# Above left.
d[0] = boxintersect([0, 0, a[0], a[1]], b)
# Above center.
d[1] = boxintersect([a[0], 0, a[2], a[1]], b)
# Above right.
d[2] = boxintersect([a[2], 0, e, a[1]], b)
# Left.
d[3] = boxintersect([0, a[1], a[0], a[3]], b)
# On.
d[4] = boxintersect(a, b)
# Right.
d[5] = boxintersect([a[2], a[1], e, a[3]], b)
# Below left.
d[6] = boxintersect([0, a[3], a[0], e], b)
# Below center.
d[7] = boxintersect([a[0], a[3], a[2], e], b)
# Below right.
d[8] = boxintersect([a[2], a[3], e, e], b)
return d / float((b[2] - b[0]) * (b[3] - b[1]))
#
# 2.3
# Find pairs of high scoring and high overlapping boxes for Viterbi.
#
def viterbi_scores(b1, s1, b2, s2, iouw):
scores = np.zeros((s1.shape[0], s2.shape[0]))
for i in xrange(s1.shape[0]):
iou = liou(b1[i], b2)
scores[i] = s1[i] + s2 + iouw * iou
return scores
#
# Helper functions category 3: Misc functions.
#
#
# 3.1
# Jensen-Shannon Divergence.
#
def jensen_shannon_divergence(p, q):
apq = 0.5 * (p + q)
return 0.5 * entropy(p, apq, 2) + 0.5 * entropy(q, apq, 2)
#
# 3.2
# Remove elements from a tube that correspond to non-annotations.
# Used for experiments on Hollywood2Tubes, where the lack of action is
# annotated with -1 values for the coordindates in the frame.
#
def tube_trim(tube):
keep = np.where(tube[:,1] >= 0)[0]
return tube[keep,:]
#
# 3.3
# Interpolate a tube (done e.g. for UCF-101 due to its many videos).
#
def tube_interpolate(tube, scores, stride, nr_frames):
if tube.shape[0] == nr_frames:
return tube
ntube = np.zeros((nr_frames, tube.shape[1]), dtype=tube.dtype)
nscores = np.zeros(nr_frames)
for i in xrange(nr_frames):
i1, i2 = i / stride, i / stride + 1
w = (i % stride) / float(stride)
ntube[i,0] = i
if i2 < tube.shape[0]:
ntube[i,1] = (1-w) * tube[i1,1] + w * tube[i2,1]
ntube[i,2] = (1-w) * tube[i1,2] + w * tube[i2,2]
ntube[i,3] = (1-w) * tube[i1,3] + w * tube[i2,3]
ntube[i,4] = (1-w) * tube[i1,4] + w * tube[i2,4]
nscores[i] = (1-w) * scores[i1] + w * nscores[i2]
else:
ntube[i,1] = tube[i1,1]
ntube[i,2] = tube[i1,2]
ntube[i,3] = tube[i1,3]
ntube[i,4] = tube[i1,4]
nscores[i] = scores[i1]
return ntube, nscores
|
992,681 | 1d75b72a1e6fb6cb6d30ed64f3d0e0b6ba18e4ba | # -*- coding: utf-8 -*-
from lai.config import DATABASE
UPDATE_RESPONSE = 1
COMMIT_RESPONSE = 2
class DatabaseException(Exception):
pass
class Database(object):
"""Factory"""
def __new__(cls, engine=None, config=None):
if engine is None:
engine = DATABASE['ENGINE']
if config is None:
config = DATABASE
if engine == 'sqlite':
from lai.db import DBSqlite
return DBSqlite(config)
if engine == 'mongo':
from lai.db import DBMongo
return DBMongo(config)
else:
raise Exception('Invalid engine ' + engine)
|
992,682 | fc7f44d4107caddf0b8a3e5e6733bb6e7fd21561 | import random
import util
from Queue import Queue
class DecisionTree:
def __init__(self, debugging=False):
self.root = None
self.debugging = debugging
def fit(self, examples, attributes, fitness_metric='information_gain',
pruning=False):
self.attributes = attributes
if fitness_metric == 'information_gain':
self.fitness = util.info_gain
else:
raise ValueError('invalid fitness metric')
if pruning:
random.shuffle(examples)
validation_set = examples[:len(examples)/3]
training_set = examples[len(examples)/3:]
self.root = self.build_tree(training_set)
self.prune(validation_set)
else:
self.root = self.build_tree(examples)
def predict(self, example):
return self.descend(example, self.root)
def build_tree(self, examples, target_index=-1):
if len(set([example[-1] for example in examples])) == 1:
return examples[0][-1]
target_counts = {}
for example in examples:
target = example[target_index]
if target in target_counts:
target_counts[target] += 1
else:
target_counts[target] = 1
fitness_dict = {}
for attribute in self.attributes:
fitness_dict[attribute] = self.fitness(examples, attribute)
best_attr = max(fitness_dict.keys(), key=lambda k: fitness_dict[k][0])
worst_attr = min(fitness_dict.keys(), key=lambda k: fitness_dict[k][0])
if fitness_dict[best_attr][0] == fitness_dict[worst_attr][0]:
best_attr = random.choice(self.attributes)
node = TreeNode(best_attr, threshold=fitness_dict[best_attr][1])
node.target_counts = target_counts
subsets = {}
if best_attr.type_ == 'discrete':
for example in examples:
key = example[best_attr.index]
if key in subsets:
subsets[key].append(example)
else:
subsets[key] = [example]
else:
subsets[' >= '] = []
subsets[' < '] = []
for example in examples:
if example[best_attr.index] >= node.threshold:
subsets[' >= '].append(example)
else:
subsets[' < '].append(example)
# examples with identical attributes but different classes
if len(subsets) == 1:
return examples[0][-1]
for value, subset in subsets.iteritems():
node.children[value] = self.build_tree(subset)
return node
def descend(self, example, node):
if not isinstance(node, TreeNode):
return node
value = example[node.attribute.index]
if node.attribute.type_ == 'continuous':
if value >= node.threshold:
return self.descend(example, node.children[' >= '])
else:
return self.descend(example, node.children[' < '])
# discrete attribute
if value not in node.children:
return None
return self.descend(example, node.children[value])
def prune(self, examples, target_index=-1):
init_accuracy = self.accuracy(examples, target_index)
max_accuracy = init_accuracy
q = Queue()
q.put(self.root)
nodes = [self.root]
while not q.empty():
node = q.get()
for child in node.children.values():
if isinstance(child, TreeNode):
q.put(child)
nodes.append(child)
while len(nodes) > 0:
node = nodes.pop()
for value in node.children:
child = node.children[value]
if not isinstance(child, TreeNode):
continue
majority_target = max(
child.target_counts.keys(),
key=lambda x: child.target_counts[x]
)
node.children[value] = majority_target
current_accuracy = self.accuracy(examples, target_index)
if current_accuracy > max_accuracy:
max_accuracy = current_accuracy
else:
node.children[value] = child
if self.debugging:
print 'Accuracy improved from {} to {} after pruning.'.format(
init_accuracy,
max_accuracy)
def accuracy(self, examples, target_index=-1):
count = 0
for example in examples:
if example[target_index] == self.predict(example):
count += 1
return 1.0 * count / len(examples)
def display_tree(self):
self.display_tree_dfs(self.root, 0)
def display_tree_dfs(self, node, level):
if not isinstance(node, TreeNode):
return
for value, child in node.children.iteritems():
target_info = child
if isinstance(child, TreeNode):
target_info = child.target_counts
if node.attribute.type_ == 'continuous':
print '{}{}{}{} [{}]'.format(
' ' * level,
node.attribute.name,
value,
node.threshold,
target_info)
else:
print '{}{} = {} [{}]'.format(
' ' * level,
node.attribute.name,
value,
target_info)
self.display_tree_dfs(child, level + 1)
class TreeNode:
def __init__(self, attribute, threshold=None):
self.attribute = attribute
self.threshold = threshold
self.children = {}
self.target_counts = {}
class Attribute:
def __init__(self, name, type_, index):
self.name = name
self.type_ = type_
self.index = index
|
992,683 | 2b21269689985d7aa74b36859ff76baa7b50bda6 | # Spy-Pi code version 2
# takes an image every minute, and saves 1 week (7 days) of data
# it also makes the most current image available
# adapted from the book
# Make: JUMPSTARTING Raspberry Pi Vision
# Sandy Antunes and James West
# Maker Media, Inc
import os
import time
import shutil
# choose a delay time in seconds by modifying the next line
delay = 30
filename = "spycam"
stem = ".jpg"
# also, this is the file the webserver expects
# this is the command to run.
mycommand = "raspistill -h 640 -w 480 --nopreview -o"
# this is the actual 'do stuff' part. It runs forever
while True:
myfile = directory + filename + "_" + str(icount) + stem
runme = mycommand + " " + myfile
os.system(runme)
time.sleep(delay)
|
992,684 | 6c196205a86c0edf2354075792be34f699d9b570 | from django.db import models
from django.contrib.auth.models import User
class Patient(models.Model):
name = models.CharField(max_length=300,null=False)
gender_choices = [('M', 'Male'), ('F', 'Female')]
blood_group_choices = [('A+','A+'),('A-','A-'),('B+','B+')
,('B-','B-'),('O+','O+'),('O-','O-'),('AB+','AB+'),('AB-','AB-')]
dob = models.DateField(
max_length=10,
help_text="format : YYYY-MM-DD",
null=False)
gender = models.CharField(
choices=gender_choices,
max_length=1,
default=None,
null=False)
blood_group = models.CharField(
choices = blood_group_choices,
max_length = 3,
default = None,
null = False
)
disease = models.CharField(
max_length = 200,
null = False
)
contact_no = models.CharField(
max_length = 12,
default = None
)
address = models.TextField(
default = None
)
def __str__(self):
return self.name
class Donor(models.Model):
user = models.OneToOneField(
User,
default=None,
null=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=300,null=True)
gender_choices = [('M', 'Male'), ('F', 'Female')]
blood_group_choices = [('A+','A+'),('A-','A-'),('B+','B+')
,('B-','B-'),('O+','O+'),('O-','O-'),('AB+','AB+'),('AB-','AB-')]
dob = models.DateField(
max_length=10,
help_text="format : YYYY-MM-DD",
null=True)
gender = models.CharField(
choices=gender_choices,
max_length=1,
default=None,
null=True)
blood_group = models.CharField(
choices = blood_group_choices,
max_length = 3,
default = None,
null = True
)
contact_no = models.CharField(
max_length = 12,
default = None,
null = True
)
blood_bank = models.ForeignKey('BloodBank',on_delete = models.CASCADE,null = True)
reports = models.FileField(
help_text = 'upload reports in PDF format',
null = True
)
address = models.TextField(
default = None,
null = True
)
def __str__(self):
return self.name
class BloodBank(models.Model):
name = models.CharField(max_length=300,null=False)
contact_no = models.CharField(
max_length = 12,
default = None
)
city = models.CharField(
null =True,
default = None,
max_length = 200
)
address = models.TextField(
default = None
)
def __str__(self):
return self.name
# def get_donor(self):
# return "\n".join([p.name for p in self.donors.all()])
|
992,685 | 41058e8c13227ca2f38e1c86efadeab687025b2a | #!/usr/bin/python3
def is_sorted(arr, N) :
for i in range(N-1) :
if arr[i] > arr[i+1] :
return False
return True
def sort_arr(arr, D, N) :
swaps = 0
for i in range(N) :
if is_sorted(arr, N) :
return swaps
to_be_swapped = None
for j in range(i+1, N) :
if arr[i] - arr[j] == D :
if to_be_swapped == None or arr[to_be_swapped] > arr[j] :
to_be_swapped = j
if to_be_swapped == None :
return -1
arr[i], arr[to_be_swapped] = arr[to_be_swapped], arr[i]
# print(arr)
swaps += 1
return swaps
try :
T = int(input())
except :
quit()
for test_case in range(T) :
N, D = map(int, input().split())
arr = list(map(int, input().split()))
swaps = sort_arr(arr, D, N)
print(swaps) |
992,686 | 497c5980dce27ffe702598db7da323f424b50828 | import random
import matplotlib.pyplot as plt
import numpy as np
import numpy.random
import math
def SavePlot(X, Y, w_t, w, count):
filename = './Plots/Plot_' + str(count) + '.png'
Red = X[Y[:]==-1, :]
# print (Red)
# print ()
Blue = X[Y[:]==1, :]
# print (Blue)
plt.axis((0,1,0,1))
plt.scatter(Red[:,1], Red[:,2], c='r')
plt.scatter(Blue[:,1], Blue[:,2], c='b')
plt.plot([0,1],[(w_t[0] + 0*w_t[1])/(-w_t[2]), (w_t[0] + 1*w_t[1])/(-w_t[2])], c='g')
plt.plot([0,1],[(w[0] + 0*w[1])/(-w[2]), (w[0] + 1*w[1])/(-w[2])], c='r')
plt.savefig(filename)
plt.close()
def Generate_Linearly_Separable_Data(w, n):
X = np.empty((0,3))
Y = np.empty(0)
for i in range (n):
x = np.array([1,random.random(),random.random()])
X = np.vstack((X,x))
if np.dot(w,x)>0:
y = np.array(-1)
else:
y = np.array(1)
Y = np.append(Y,y)
for i, x in enumerate(X):
print (x, Y[i])
return X, Y
def Perceptron(X, Y, w, gamma):
A = np.matmul(X,w)
# print ()
# print (A)
A = np.multiply(A,Y)
# print ()
# print (A)
# print ()
B = [i for i, a in enumerate(A) if A[i]<0]
# print (B)
if len(B)>0:
i = B[0]
w = w + gamma*Y[i]*X[i]
# if w[0] !=0:
# w = w/(-w[0])
# print (len(B), w/w[0])
return w, len(B)
def Main():
# Target function
w_t = np.array([1,-1,-1])
n = 100
gamma = 0.01
X, Y = Generate_Linearly_Separable_Data(w_t,n)
# Initial Guess
w = np.array([-0.9,0,1])
i=0
b = 1
while b>0:
w, b = Perceptron(X, Y, w, gamma)
if i%10==0:
SavePlot(X, Y, w_t, w, i)
print (i, b, w/w[0])
i += 1
SavePlot(X, Y, w_t, w, i)
Main()
|
992,687 | f9adf4b6c9566336603902cced353b889c944dbb | import numpy as np
from mmdet3d.apis import inference_detector, init_detector
from kitti_util import *
class Detector:
def __init__(self, checkpoint, config, calib_file, from_video=True):
self.model = init_detector(config, checkpoint)
self.calib = Calibration(calib_file, from_video=from_video)
def run(self, data_bin, threshold=0.3):
result, data = inference_detector(self.model, data_bin)
obj_ind = result[0]['scores_3d'] >= threshold
pred_3d = result[0]['boxes_3d'].corners[obj_ind, ...]
pred_2d = []
for obj in pred_3d:
obj_2d = self.calib.project_velo_to_image(obj)
pred_2d.append([np.min(obj_2d, axis=0)[0], np.min(obj_2d, axis=0)[1],
np.max(obj_2d, axis=0)[0], np.max(obj_2d, axis=0)[1]])
return pred_2d, [xx.squeeze() for xx in np.split(pred_3d, pred_3d.shape[0], axis=0)]
if __name__ == "__main__":
detector = Detector(checkpoint="/home/yzy/PycharmProjects/AutoDrive/mmdetection3d/checkpoints/second/epoch_40.pth",
config="/home/yzy/PycharmProjects/AutoDrive/mmdetection3d/configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py",
calib_file="/home/yzy/Downloads/2011_09_26/2011_09_26_drive_0023_sync")
pred_2d, pred_3d = detector.run("/home/yzy/Downloads/2011_09_26/2011_09_26_drive_0023_sync/velodyne_points/data/0000000000.bin", 0.5)
print(len(pred_2d), pred_3d) |
992,688 | 197f28cfceb3959e2f45c7a5662bf1b2527862e6 | import time
import wiringpi as wp
from constants import *
from utils import angle_to_time, cm_to_time
gpio = wp.GPIO(wp.GPIO.WPI_MODE_PINS)
class Car:
def __init__(self):
self.setup()
def setup(self):
wp.wiringPiSetup()
for pin in OUTPUTS:
wp.pinMode(pin, 1)
for pin in INPUTS:
wp.pinMode(pin, gpio.INPUT)
wp.softPwmCreate(MOTOR_1, MIN_SPEED, MAX_SPEED)
wp.softPwmCreate(MOTOR_2, MIN_SPEED, MAX_SPEED)
wp.softPwmCreate(MOTOR_3, MIN_SPEED, MAX_SPEED)
wp.softPwmCreate(MOTOR_4, MIN_SPEED, MAX_SPEED)
def forward(self, speed=100):
wp.softPwmWrite(MOTOR_1, int(MAX_SPEED / (100 / speed)))
wp.softPwmWrite(MOTOR_2, MIN_SPEED)
wp.softPwmWrite(MOTOR_3, int(MAX_SPEED / (100 / speed)))
wp.softPwmWrite(MOTOR_4, MIN_SPEED)
def stop(self):
wp.softPwmWrite(MOTOR_1, MIN_SPEED)
wp.softPwmWrite(MOTOR_2, MIN_SPEED)
wp.softPwmWrite(MOTOR_3, MIN_SPEED)
wp.softPwmWrite(MOTOR_4, MIN_SPEED)
def right(self):
wp.softPwmWrite(MOTOR_1, MAX_SPEED)
wp.softPwmWrite(MOTOR_2, MIN_SPEED)
wp.softPwmWrite(MOTOR_3, MIN_SPEED)
wp.softPwmWrite(MOTOR_4, MAX_SPEED)
def left(self):
wp.softPwmWrite(MOTOR_1, MIN_SPEED)
wp.softPwmWrite(MOTOR_2, MAX_SPEED)
wp.softPwmWrite(MOTOR_3, MAX_SPEED)
wp.softPwmWrite(MOTOR_4, MIN_SPEED)
def backward(self, speed=100):
wp.softPwmWrite(MOTOR_1, MIN_SPEED)
wp.softPwmWrite(MOTOR_2, int(MAX_SPEED / (100 / speed)))
wp.softPwmWrite(MOTOR_3, MIN_SPEED)
wp.softPwmWrite(MOTOR_4, int(MAX_SPEED / (100 / speed)))
def smooth_left(self):
wp.softPwmWrite(MOTOR_1, int(MAX_SPEED/2))
wp.softPwmWrite(MOTOR_2, MIN_SPEED)
wp.softPwmWrite(MOTOR_3, int(MAX_SPEED))
wp.softPwmWrite(MOTOR_4, MIN_SPEED)
def smooth_right(self):
wp.softPwmWrite(MOTOR_1, MAX_SPEED)
wp.softPwmWrite(MOTOR_2, MIN_SPEED)
wp.softPwmWrite(MOTOR_3, int(MAX_SPEED / 8))
wp.softPwmWrite(MOTOR_4, MIN_SPEED)
def get_distance(self):
start_time, end_time = 0, 0
wp.digitalWrite(trig_pin, gpio.HIGH)
time.sleep(0.00001)
wp.digitalWrite(trig_pin, gpio.LOW)
while wp.digitalRead(echo_pin) == 0:
start_time = time.time()
while wp.digitalRead(echo_pin) == 1:
end_time = time.time()
distance = (end_time - start_time) * 34300 / 2
return round(distance)
def get_trace(self):
left_tracer = int(wp.digitalRead(LEFT_TRACER))
right_tracer = int(wp.digitalRead(RIGHT_TRACER))
if left_tracer == NOT_BLACK and right_tracer == BLACK:
return RIGHT
elif right_tracer == NOT_BLACK and left_tracer == BLACK:
return LEFT
elif right_tracer == BLACK and left_tracer == BLACK:
return FORWARD
elif right_tracer == NOT_BLACK and left_tracer == NOT_BLACK:
return STOP
def get_obstacle(self):
left_ir = int(wp.digitalRead(LEFT_IR))
right_ir = int(wp.digitalRead(RIGHT_IR))
if left_ir == OBSTACLE and right_ir == NOT_OBSTACLE:
return LEFT
elif left_ir == NOT_OBSTACLE and right_ir == OBSTACLE:
return RIGHT
elif left_ir == NOT_OBSTACLE and right_ir == NOT_OBSTACLE:
return FORWARD
elif left_ir == OBSTACLE and right_ir == OBSTACLE:
return STOP
def right_angle_turn(self, angle):
self.right()
time.sleep(angle_to_time(angle))
self.stop()
def left_angle_turn(self, angle):
self.left()
time.sleep(angle_to_time(angle))
self.stop()
def metered_forward(self, cm):
self.forward(50)
time.sleep(cm_to_time(cm))
self.stop()
def metered_backward(self, cm):
self.backward(50)
time.sleep(cm_to_time(cm))
self.stop()
|
992,689 | b007ab218561947c65d17e3ccec9841a1e65d71a | from django.conf.urls import url
from . import views
app_name = 'notepad'
urlpatterns = [
url(r'^notepad/random$', views.random, name='random'),
url(r'^notepad/topage$', views.topage, name='topage'),
url(r'^notepad/add/(?P<page_name>.+)$', views.add, name='add'),
url(r'^notepad/hideform/(?P<page_name>.+)$', views.hideform, name='hideform'),
url(r'^notepad/hide/(?P<page_name>.+)$', views.hide, name='hide'),
url(r'^notepad/editform/(?P<page_name>.+)$', views.editform, name='editform'),
url(r'^notepad/edit/(?P<page_name>.+)$', views.edit, name='edit'),
url(r'^notepad/moveform/(?P<page_name>.+)$', views.moveform, name='moveform'),
url(r'^notepad/move/(?P<page_name>.+)$', views.move, name='move'),
url(r'^notepad/monitor$', views.monitor, name='monitor'),
url(r'^(?P<page_name>.+)$', views.view, name='view'),
]
|
992,690 | 2bd2f4ce7f5eaa99c71b82d5ab56bca2891b8778 | """Test snippets to try out stuff from the book "Gaussian Processes for Machine
Learning" by Rasmussen & Williams (a.k.a. R&W in the docstrings below)."""
import sys
from math import pi, log
import numpy as np
from numpy import dot, identity, transpose
import scipy
import scipy.stats as stats
from scipy.linalg import cholesky
from scipy.stats import norm
from scipy.spatial.distance import sqeuclidean
from numpy.linalg import inv
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
def axes_maker(rows, cols):
"""Returns a closure that, when called, will return the next subplot in a
figure. 'rows' and 'cols' indicate the number of subplots."""
fig = plt.figure()
current_subplot = [1] # Use list in order to modify
def next_axes(**kwargs):
current_subplot[0] += 1
axes = fig.add_subplot(rows, cols, current_subplot[0] - 1, **kwargs)
return axes
return next_axes
def squared_exp_cov(x_p, x_q):
"""Calculates the squared exponential covariance function between the
outputs f(x_p) and f(x_q) given the input vectors x_p and x_q, as per Eq. 2.16 of
R&W.
NOTE: In contrast to sqeuclidean used below, the sq_dist function from the
code accompanying the book calculates ALL pairwise distances between column
vectors of two matrices."""
return np.exp(-0.5 * sqeuclidean(x_p, x_q))
def my_multivariate_sample(mean, cov, cholesky_epsilon=1e-8):
"""Generate a multivariate sample, as per Sec. A.2 of R&W. Add
cholesky_epsilon times the identity to ensure numerical stability."""
n = mean.size
sample_indep = norm.rvs(0, 1, size=(n, 1))
joint = dot(cholesky(cov + np.identity(n) * cholesky_epsilon, lower=True),
sample_indep)
return mean + joint
def multivariate_sample(mean, cov, cholesky_epsilon=1e-8):
'''Use scipy to create multivariate sample. Fall back on homemade algorithm.'''
if scipy.__version__ < '0.14.0':
print 'You have an old Scipy (version < 0.14.0). Falling back on '\
'homemade algorithm to sample from multivariate normal.'
return my_multivariate_sample(mean, cov, cholesky_epsilon=1e-8)
else:
rv = scipy.stats.multivariate_normal(mean, cov)
return rv.rvs()
def make_cov_array(X_p, X_q, cov_fun):
"""Create a covariance matrix (actually a 2-D array), given input matrices
X_p and X_q. These are D x n matrices, where D is the dimension of the
input space and n is the number of input (e.g. training) cases."""
n_p = X_p.shape[1]
n_q = X_q.shape[1]
K = np.array(np.zeros((n_p, n_q)))
for i in range(n_p):
for j in range(n_q):
K[i, j] = cov_fun(X_p[:,i], X_q[:,j])
return K
def make_se_cov_array(X_p, X_q):
return make_cov_array(X_p, X_q, cov_fun=squared_exp_cov)
def unconditioned_sample(x_star=np.arange(-5, 5, 0.2),
cov_mtx_calculator=make_se_cov_array):
"""Create an unconditioned sample for one-dimensional test inputs x_star."""
if len(x_star.shape) == 1:
x_star.shape = (1, x_star.shape[0])
K = cov_mtx_calculator(x_star, x_star)
return (x_star, multivariate_sample(np.zeros(x_star.shape).T, K))
def conditioned_mean_cov_old(X, y,
x_star=np.arange(-5, 5, 0.2),
noise_var=0,
cov_mtx_calculator=make_se_cov_array):
'''Estimate mean and covariance for test inputs x_star, conditioned on
the observations in X. X should be a D x n matrix and y a column(!)
vector of length n. x_star is a column vector of test inputs.
'''
if len(x_star.shape) == 1:
x_star.shape = (1, x_star.shape[0])
K_x_xs = cov_mtx_calculator(X, x_star)
K_x_x = cov_mtx_calculator(X, X)
K_xs_x = cov_mtx_calculator(x_star, X)
K_xs_xs = cov_mtx_calculator(x_star, x_star)
K_x_x_inv = inv(K_x_x + noise_var * np.identity(K_x_x.shape[0]))
mean = dot(dot(K_xs_x, K_x_x_inv), y)
cov = K_xs_xs - dot(dot(K_xs_x, K_x_x_inv), K_x_xs)
return mean, cov
def conditioned_mean_cov(X, y, x_star, noise_var=0,
cov_mtx_calculator=make_se_cov_array):
'''Estimate mean and covariance for test inputs x_star, conditioned on
the observations in X. X should be a D x n matrix and y a column(!)
vector of length n. x_star is a column vector of test inputs.
Based on Algorithm 2.1 of R&W.
'''
K = cov_mtx_calculator(X, X)
K_star = cov_mtx_calculator(X, x_star)
K_xs_xs = cov_mtx_calculator(x_star, x_star)
L = cholesky(K + noise_var * identity(K.shape[0]), lower=True)
alpha = np.linalg.solve(L.T, np.linalg.solve(L, y))
mean = dot(K_star.T, alpha)
v = np.linalg.solve(L, K_star)
cov = K_xs_xs - dot(v.T, v)
return mean, cov
def conditioned_sample(X, y,
x_star=np.arange(-5, 5, 0.2),
noise_var=0,
cov_mtx_calculator=make_se_cov_array):
'''Create a sample conditioned on the observations. X should be a D x n
matrix and y a column(!) vector of length n. x_star is a column
vector of test inputs.
'''
mean, cov = conditioned_mean_cov(X, y, x_star, noise_var, cov_mtx_calculator)
return (x_star, multivariate_sample(mean, cov), cov)
def gaussian_process_mean_pred(X, y, noise_var, x_star,
cov_mtx_calculator=make_se_cov_array):
"""Implementation of Algorithm 2.1 of R&W. X is a D x n matrix of
observed driver variables, and y is the corresponding n-element
column vector of observed dependent variables. x_star are the test
inputs.
"""
K = cov_mtx_calculator(X, X)
L = cholesky(K + noise_var * identity(K.shape[0]), lower=True)
L_inv = inv(L)
a = dot(dot(inv(L.T), L_inv), y)
def single_input_regression(x_in):
x_in.shape = (x_in.shape[0], 1)
k_in = cov_mtx_calculator(X, x_in)
mean = dot(k_in.T, a)
v = dot(L_inv, k_in)
var = cov_mtx_calculator(x_in, x_in) - dot(v.T, v)
return mean[0], var[0]
mean_var = [single_input_regression(x_star[:,i]) \
for i in range(x_star.shape[1])]
mean, var = zip(*mean_var)
n = X.shape[1]
log_marg_lik = -0.5 * dot(y.T, a) - L.diagonal().sum() - n/2. * log(2*pi)
return np.array(mean), np.array(var), log_marg_lik
if __name__ == '__main__':
next_axes = axes_maker(2, 3)
# # Testing the multivariate sampler.
# cov = np.array([[1, .9], [.9, 1]])
# mean = np.array([0, 0])
# samples = [multivariate_sample(mean, cov) for i in range(10000)]
# x, y = zip(*samples)
# axes = next_axes()
# axes.plot(x, y, '.')
# axes.set_title("Multivariate sample,\ncov = [%2.1f %2.1f; %2.1f %2.1f]." % \
# tuple(np.array(cov).flatten()))
# Plot the covariance matrix for the above observations and a subset of the
# predictions
x = np.arange(-5, 5, 0.25)
x.shape = (1, x.shape[0])
cov = make_cov_array(x, x, squared_exp_cov)
i = np.arange(cov.shape[0])
j = np.arange(cov.shape[1])
i, j = np.meshgrid(i, j)
axes = next_axes(projection="3d")
surf = axes.plot_surface(i, j, cov, rstride=1, cstride=1,
linewidth=1, antialiased=True)
axes.set_title("Covariance matrix for unconditioned GP")
# Unconditioned samples from GP with SE covariance function
axes = next_axes()
for i in range(4):
x, y = unconditioned_sample()
print "y.shape:", y.shape
axes.plot(x.T, y)
axes.set_xlim([-5, 5])
axes.set_title("Samples from unconditioned GP,\nref Fig. 2.2 (a) of R&W")
# Adding observations, sample from the posterior
observations = np.array(((-4, -2), (-3, 0), (-1, 1), (0, 2), (1, -1)))
X = observations[:,0]
X.shape = (1, X.shape[0])
y = observations[:,1]
y.shape = (y.shape[0], 1)
axes = next_axes()
axes.plot(X.T, y, '+', ms=10)
for i in range(4):
x_sampled, y_sampled, cov = conditioned_sample(X, y)
print "y_sampled.shape:", y_sampled.shape
axes.plot(x_sampled.T, y_sampled)
axes.set_xlim([-5, 5])
axes.set_title("Samples from conditioned GP,\nref Fig. 2.2 (b) of R&W")
# Plot the covariance matrix for the above observations and a subset of the
# predictions
axes = next_axes(projection="3d")
step=0.25
cov = conditioned_sample(X, y, x_star=np.arange(-5, 5, step))[2]
i = np.arange(cov.shape[0])
j = np.arange(cov.shape[1])
i, j = np.meshgrid(i, j)
surf = axes.plot_surface(i, j, cov, rstride=1, cstride=1,
linewidth=1, antialiased=True)
axes.set_title("Covariance matrix for conditioned GP\n (showing regressions " \
"with step length %f)" % step)
# GP mean prediction on the observations
axes = next_axes()
axes.plot(X.T, y, '+', ms=10)
x_star = np.arange(-5, 5, 0.1)
x_star.shape = (1, x_star.shape[0])
noise_var=0.01
mean, var, log_lik = gaussian_process_mean_pred(X, y, noise_var=noise_var,
x_star=x_star)
axes.plot(x_star.T, mean)
axes.plot(x_star.T, mean + var, '--')
axes.plot(x_star.T, mean - var, '--')
axes.set_xlim([-5, 5])
axes.set_title("GP mean prediction with noise %f +/- variance" % noise_var)
# "Predicting" a periodic signal with a non-periodic covariance function
X = np.linspace(-25, 25, 50 * 4)
X.shape = (1, X.shape[0])
y = np.sin(X.T)
x_star=np.arange(0, 40, 0.4)
axes = next_axes()
axes.plot(X.T, y, '+')
for i in range(3):
# Regression only on the last part of the observations due to numerical
# instability of Cholesky decomposition.
x_s, y_s, cov = conditioned_sample(X, y, x_star, noise_var=0.1)
axes.plot(x_s.T, y_s)
axes.set_xlim([-25, 40])
axes.set_title("\"Predicting\" a sine using SE covariance func")
plt.show()
|
992,691 | 551a974e4f676b539c1c435a0b352d0b70c3bb97 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-05 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beaches', '0003_auto_20180305_0050'),
]
operations = [
migrations.AddField(
model_name='beach',
name='is_camping_friendly',
field=models.BooleanField(default=False),
),
]
|
992,692 | 8347763d97e724af4a7bd5f007c7a9f2a920a77a | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 02:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hpscil', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='people',
name='sex',
field=models.IntegerField(choices=[(0, '\u7537'), (1, '\u5973')], default=0, verbose_name='\u6027\u522b'),
),
migrations.AlterField(
model_name='people',
name='title',
field=models.IntegerField(choices=[(0, '\u8bb2\u5e08'), (1, '\u526f\u6559\u6388'), (2, '\u6559\u6388'), (3, '\u9662\u58eb')], default=0, verbose_name='\u804c\u79f0'),
),
]
|
992,693 | 114bb5380d86a64975194b63228510ad60af73b9 | # -*- coding:utf-8 -*-
'''
Test module for renju.
@auther: Arata Kokubun
@date: 2018/1/3
'''
# Imports
import unittest as ut
from unittest.mock import MagicMock
from parameterized import parameterized
from gym_renju.envs.core.domain.player import PlayerColor
from gym_renju.envs.renju import RenjuBoard, RenjuState
from gym_renju.envs.utils.generator import BoardStateGenerator as bsg
class RenjuBoardTest(ut.TestCase):
def test_act(self):
board_size = 15
action = 1
before_board = RenjuBoard(board_size)
actual_board = before_board.act(action, PlayerColor.WHITE)
expected_board_state = bsg.generate_empty(board_size)
expected_board_state[action] = 2
self.assertEqual(15, actual_board.get_board_size())
self.assertEqual(expected_board_state, actual_board.get_board_state())
self.assertEqual(1, actual_board.get_move_count())
self.assertEqual(1, actual_board.get_last_action())
class RenjuStateTest(ut.TestCase):
def test_act(self):
before_board = RenjuBoard(15)
expected_board = RenjuBoard(9)
before_board.act = MagicMock()
before_board.act.return_value = expected_board
before_state = RenjuState(before_board, None, PlayerColor.BLACK)
actual_state = before_state.act(19)
self.assertEqual(expected_board, actual_state.get_board())
self.assertEqual(PlayerColor.BLACK, actual_state.get_latest_player())
self.assertEqual(PlayerColor.WHITE, actual_state.get_next_player())
before_board.act.assert_called_with(19, PlayerColor.BLACK)
|
992,694 | d349fa8eb2fcf6faafbd462d21e1bf946e7004d2 | import mock
import libvirt
import difflib
import unittest
from see.context.resources import lxc
def compare(text1, text2):
"""Utility function for comparing text and returining differences."""
diff = difflib.ndiff(text1.splitlines(True), text2.splitlines(True))
return '\n' + '\n'.join(diff)
class DomainXMLTest(unittest.TestCase):
def test_domain_xml(self):
"""XML with no network and no filesystem."""
config = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices /></domain>"""
results = lxc.domain_xml('foo', config, [])
self.assertEqual(results, expected, compare(results, expected))
def test_domain_xml_filesystem(self):
"""XML with filesystem."""
config = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices><filesystem type="mount">""" +\
"""<source dir="foo" /><target dir="bar" /></filesystem></devices></domain>"""
results = lxc.domain_xml('foo', config, [('foo', 'bar')])
self.assertEqual(results, expected, compare(results, expected))
def test_domain_xml_modifies(self):
"""Fields are modified if existing."""
config = """<domain><name>bar</name><uuid>bar</uuid></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices><filesystem type="mount">""" +\
"""<source dir="foo" /><target dir="bar" /></filesystem></devices></domain>"""
results = lxc.domain_xml('foo', config, [('foo', 'bar')])
self.assertEqual(results, expected, compare(results, expected))
def test_domain_xml_network(self):
"""XML with network fields are modified if existing."""
config = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices><filesystem type="mount">""" +\
"""<source dir="foo" /><target dir="bar" /></filesystem><interface type="network">""" +\
"""<source network="foo" /></interface></devices></domain>"""
results = lxc.domain_xml('foo', config, [('foo', 'bar')], network_name='foo')
self.assertEqual(results, expected, compare(results, expected))
def test_domain_xml_network_modifies(self):
"""XML with network."""
config = """<domain><devices><interface type="network">""" +\
"""<source network="bar"/></interface></devices></domain>"""
expected = """<domain><devices><interface type="network"><source network="foo" /></interface>""" +\
"""<filesystem type="mount"><source dir="foo" /><target dir="bar" /></filesystem>""" +\
"""</devices><name>foo</name><uuid>foo</uuid></domain>"""
results = lxc.domain_xml('foo', config, [('foo', 'bar')], network_name='foo')
self.assertEqual(results, expected, compare(results, expected))
class DomainCreateTest(unittest.TestCase):
def test_create(self):
"""Create with no network and no filesystem."""
xml = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices /></domain>"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
with mock.patch('see.context.resources.lxc.open', mock.mock_open(read_data=xml), create=True):
lxc.domain_create(hypervisor, 'foo', {'configuration': '/foo'})
results = hypervisor.defineXML.call_args_list[0][0][0]
self.assertEqual(results, expected, compare(results, expected))
def test_create_filesystem(self):
"""Create with single filesystem."""
xml = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices><filesystem type="mount">""" +\
"""<source dir="/bar/foo" /><target dir="/baz" /></filesystem></devices></domain>"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
with mock.patch('see.context.resources.lxc.open', mock.mock_open(read_data=xml), create=True):
with mock.patch('see.context.resources.lxc.os.makedirs'):
lxc.domain_create(hypervisor, 'foo', {'configuration': '/foo', 'filesystem':
{'source_path': '/bar',
'target_path': '/baz'}})
results = hypervisor.defineXML.call_args_list[0][0][0]
self.assertEqual(results, expected, compare(results, expected))
def test_create_filesystems(self):
"""Create with multiple filesystem."""
xml = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices><filesystem type="mount">""" +\
"""<source dir="/bar/foo" /><target dir="/baz" /></filesystem><filesystem type="mount">""" +\
"""<source dir="/dead/foo" /><target dir="/beef" /></filesystem></devices></domain>"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
with mock.patch('see.context.resources.lxc.open', mock.mock_open(read_data=xml), create=True):
with mock.patch('see.context.resources.lxc.os.makedirs'):
lxc.domain_create(hypervisor, 'foo', {'configuration': '/foo', 'filesystem':
[{'source_path': '/bar',
'target_path': '/baz'},
{'source_path': '/dead',
'target_path': '/beef'}]})
results = hypervisor.defineXML.call_args_list[0][0][0]
self.assertEqual(results, expected, compare(results, expected))
def test_create_network(self):
"""Create with network."""
xml = """<domain></domain>"""
expected = """<domain><name>foo</name><uuid>foo</uuid><devices><filesystem type="mount">""" +\
"""<source dir="/bar/foo" /><target dir="/baz" /></filesystem><interface type="network">""" +\
"""<source network="foo" /></interface></devices></domain>"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
with mock.patch('see.context.resources.lxc.open', mock.mock_open(read_data=xml), create=True):
with mock.patch('see.context.resources.lxc.os.makedirs'):
lxc.domain_create(hypervisor, 'foo', {'configuration': '/foo', 'filesystem':
{'source_path': '/bar',
'target_path': '/baz'}}, network_name='foo')
results = hypervisor.defineXML.call_args_list[0][0][0]
self.assertEqual(results, expected, compare(results, expected))
class DomainDelete(unittest.TestCase):
def test_delete_destroy(self):
"""Domain is destroyed if active."""
domain = mock.Mock()
logger = mock.Mock()
domain.isActive.return_value = True
lxc.domain_delete(domain, logger, None)
self.assertTrue(domain.destroy.called)
def test_delete_destroy_error(self):
"""Domain destroy raises error."""
domain = mock.Mock()
logger = mock.Mock()
domain.isActive.return_value = True
domain.destroy.side_effect = libvirt.libvirtError("BOOM")
lxc.domain_delete(domain, logger, None)
self.assertTrue(domain.undefine.called)
def test_delete_undefine(self):
"""Domain is undefined."""
domain = mock.Mock()
logger = mock.Mock()
domain.isActive.return_value = False
lxc.domain_delete(domain, logger, None)
self.assertTrue(domain.undefine.called)
@mock.patch('see.context.resources.lxc.os.path.exists')
def test_delete_undefine_error(self, os_mock):
"""Domain undefine raises error."""
domain = mock.Mock()
logger = mock.Mock()
domain.isActive.return_value = False
domain.undefine.side_effect = libvirt.libvirtError("BOOM")
lxc.domain_delete(domain, logger, '/foo/bar/baz')
self.assertTrue(os_mock.called)
@mock.patch('see.context.resources.lxc.shutil.rmtree')
@mock.patch('see.context.resources.lxc.os.path.exists')
def test_delete_filesystem(self, os_mock, rm_mock):
"""Domain is undefined."""
domain = mock.Mock()
logger = mock.Mock()
domain.isActive.return_value = False
os_mock.return_value = True
lxc.domain_delete(domain, logger, 'foo/bar/baz')
rm_mock.assert_called_with('foo/bar/baz')
class ResourcesTest(unittest.TestCase):
@mock.patch('see.context.resources.lxc.libvirt')
@mock.patch('see.context.resources.lxc.domain_create')
def test_initialize_default(self, create_mock, libvirt_mock):
"""Resources initializer with no extra value."""
resources = lxc.LXCResources('foo', {'domain': 'bar'})
libvirt_mock.open.assert_called_with('lxc:///')
create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)
@mock.patch('see.context.resources.lxc.libvirt')
@mock.patch('see.context.resources.lxc.domain_create')
def test_initialize_hypervisor(self, create_mock, libvirt_mock):
"""Resources initializer with hypervisor."""
resources = lxc.LXCResources('foo', {'domain': 'bar', 'hypervisor': 'baz'})
libvirt_mock.open.assert_called_with('baz')
create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)
@mock.patch('see.context.resources.lxc.libvirt')
@mock.patch('see.context.resources.lxc.domain_create')
@mock.patch('see.context.resources.network.create')
def test_initialize_network(self, network_mock, create_mock, libvirt_mock):
"""Resources initializer with network."""
network = mock.Mock()
network.name.return_value = 'baz'
network_mock.return_value = network
resources = lxc.LXCResources('foo', {'domain': 'bar', 'network': 'baz'})
network_mock.assert_called_with(resources.hypervisor, 'foo', 'baz')
create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name='baz')
@mock.patch('see.context.resources.lxc.libvirt')
@mock.patch('see.context.resources.lxc.domain_create')
@mock.patch('see.context.resources.network.delete')
@mock.patch('see.context.resources.lxc.domain_delete')
def test_cleanup(self, delete_mock, network_delete_mock, create_mock, libvirt_mock):
"""Resources are released on cleanup."""
resources = lxc.LXCResources('foo', {'domain': 'bar'})
resources._domain = mock.Mock()
resources._network = mock.Mock()
resources._hypervisor = mock.Mock()
resources.cleanup()
delete_mock.assert_called_with(resources.domain, mock.ANY, None)
network_delete_mock.assert_called_with(resources.network)
self.assertTrue(resources._hypervisor.close.called)
@mock.patch('see.context.resources.lxc.libvirt')
@mock.patch('see.context.resources.lxc.domain_create')
@mock.patch('see.context.resources.network.delete')
@mock.patch('see.context.resources.lxc.domain_delete')
def test_cleanup_filesystem(self, delete_mock, network_delete_mock, create_mock, libvirt_mock):
"""Shared folder is cleaned up."""
resources = lxc.LXCResources('foo', {'domain': 'bar', 'filesystem':
{'source_path': '/bar',
'target_path': '/baz'}})
resources._domain = mock.Mock()
resources._network = mock.Mock()
resources._hypervisor = mock.Mock()
resources.cleanup()
delete_mock.assert_called_with(resources.domain, mock.ANY, '/bar/foo')
|
992,695 | 11ff604767060ce3d99bdc80c6f6bfc44f3c25a9 | import pandas as pd
def clean_data():
'''
Since the data of the api is 24 hours, I hope it is more like the data of the url.
:return:
'''
# future temperatures from api are converted to day and night temperature
future_temperature_from_api = pd.read_csv('future_temperature_from_api.csv')
day_temperature_list = list()
night_temperature_list = list()
future_dates =list()
for i in range(len(future_temperature_from_api.values)):
day_temperature = 0
night_temperature = 0
the_number_day = 0
the_number_night = 0
for j in range(1, len(future_temperature_from_api.values[i])):
if future_temperature_from_api.values[i][j] != 'None':
if j < 9:
night_temperature += float(future_temperature_from_api.values[i][j][:-1])
the_number_night += 1
else:
day_temperature += float(future_temperature_from_api.values[i][j][:-1])
the_number_day += 1
day_temperature_list.append(round(day_temperature / the_number_day, 2))
night_temperature_list.append(round(night_temperature / the_number_night, 2))
future_dates.append(future_temperature_from_api.values[i][0])
data = {'Day Temperature': day_temperature_list, 'Night Temperature': night_temperature_list}
dataframe_future = pd.DataFrame(data, index=future_dates)
dataframe_future.to_csv("future_temperature_from_api_cleaned.csv", index=True, sep=',')
# past temperatures from api are converted to high and low temperature
past_temperature_from_api = pd.read_csv('past_temperature_from_api.csv')
high_temperature_list = list()
low_temperature_list = list()
past_dates_format = list()
for i in range(len(past_temperature_from_api.values)):
temperature_list = list()
for j in range(1, len(past_temperature_from_api.values[i])):
if past_temperature_from_api.values[i][j] != 'None':
temperature_list.append(float(past_temperature_from_api.values[i][j][:-1]))
high_temperature_list.append(max(temperature_list))
low_temperature_list.append(min(temperature_list))
past_dates_format.append(past_temperature_from_api.values[i][0])
past_data = {'Past High Temperature': high_temperature_list, 'Past Low Temperature': low_temperature_list}
dataframe_past = pd.DataFrame(past_data, index=past_dates_format)
dataframe_past.to_csv("past_temperature_from_api_cleaned.csv", index=True, sep=',')
return future_dates,past_dates_format
|
992,696 | a8e97d7cf95b17b77f8a91ca9a009f924a20a45e | from django.shortcuts import render
from django.views.generic import ListView
from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import IsAuthenticated
from .models import LectureHistory
from .serializers import LectureHistoryChangeSerializer, LectureHistorySerachSerializer
from student.mixin import DefaultMixin, LoginRequiredMixin
from lecture.models import Subject
from university.models import CompletionDivision
# Create your views here.
'''
UI CLASS
'''
class LectureHistoryLV(DefaultMixin, LoginRequiredMixin, ListView):
model = LectureHistory
template_name = 'lecture_history.html'
active = 'lectureHistoryActive'
def get_queryset(self):
return LectureHistory.objects.filter(user_id=self.request.user.id)
# 과목 데이터 로드
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['subjects'] = Subject.get_university_subject_list(self.request.user.id)
context['divisions'] = CompletionDivision.objects.filter(university=context['student'].university)
return context
'''
API ViewSet
'''
class LectureHistoryListViewset(ModelViewSet):
queryset = LectureHistory.objects.all()
serializer_class = LectureHistorySerachSerializer
permission_classes = (IsAuthenticated, )
def get_serializer_class(self):
if self.action == 'list':
return LectureHistorySerachSerializer
else:
return LectureHistoryChangeSerializer
def get_queryset(self):
return LectureHistory.get_user_history(self.request.user.id)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
|
992,697 | 78691aece7b8492c4bde013011b27a3d90cb9ddb | #!/usr/bin/python3.7
# 1. Самые активные участники. Таблица из 2 столбцов: login автора, количество его
# коммитов. Таблица отсортирована по количеству коммитов по убыванию. Не
# более 30 строк. Анализ производится на заданном периоде времени и заданной
# ветке.
import requests
import ApiBase
url = f"https://api.github.com/repos/{ApiBase.userName}/{ApiBase.repoName}/" +\
f"commits?since={ApiBase.dateStart}&until={ApiBase.dateStop}&sha={ApiBase.branchName}"
payload = {}
headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': f'token {ApiBase.token} '
}
response = requests.request("GET", url, headers=headers, data = payload)
resultDict = {}
# У айметов есть поле author - это JSON, в нём есть поле login
# Нужно посчитать количество коммитов для каждого login и отсортировать по убыванию
for item in response.json():
commitAuthorName = item["author"]["login"]
if resultDict.get( commitAuthorName ):
resultDict[ commitAuthorName ] += 1
else:
resultDict[ commitAuthorName ] = 1
for i in sorted( resultDict.items(), reverse=True ):
print( i )
##########################################################################################
#### response sample for debug - one commit item
##########################################################################################
{'sha': '6a3d7bf24713d08a2380bb3570ac38a678a7ce4f',
'node_id': 'MDY6Q29tbWl0MjY0MjU5ODc4OjZhM2Q3YmYyNDcxM2QwOGEyMzgwYmIzNTcwYWMzOGE2NzhhN2NlNGY=',
'commit': {'author': {'name': 'break1-Home', 'email': 'break1@yandex.ru', 'date': '2020-05-17T02:06:36Z'},
'committer': {'name': 'break1-Home', 'email': 'break1@yandex.ru', 'date': '2020-05-17T02:06:36Z'},
'message': 'test 1 start work', 'tree': {'sha': '46476a8800fdeeb18015447f23b34ea7b4bb8c0f',
'url': 'https://api.github.com/repos/break11/playrix_test/git/trees/46476a8800fdeeb18015447f23b34ea7b4bb8c0f'
},
'url': 'https://api.github.com/repos/break11/playrix_test/git/commits/6a3d7bf24713d08a2380bb3570ac38a678a7ce4f',
'comment_count': 0,
'verification': {'verified': False, 'reason': 'unsigned', 'signature': None, 'payload': None}
},
'url': 'https://api.github.com/repos/break11/playrix_test/commits/6a3d7bf24713d08a2380bb3570ac38a678a7ce4f',
'html_url': 'https://github.com/break11/playrix_test/commit/6a3d7bf24713d08a2380bb3570ac38a678a7ce4f',
'comments_url': 'https://api.github.com/repos/break11/playrix_test/commits/6a3d7bf24713d08a2380bb3570ac38a678a7ce4f/comments',
'author': {'login': 'break11',
'id': 32346580,
'node_id': 'MDQ6VXNlcjMyMzQ2NTgw',
'avatar_url': 'https://avatars3.githubusercontent.com/u/32346580?v=4',
'gravatar_id': '',
'url': 'https://api.github.com/users/break11',
'html_url': 'https://github.com/break11',
'followers_url': 'https://api.github.com/users/break11/followers',
'following_url': 'https://api.github.com/users/break11/following{/other_user}',
'gists_url': 'https://api.github.com/users/break11/gists{/gist_id}',
'starred_url': 'https://api.github.com/users/break11/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/break11/subscriptions',
'organizations_url': 'https://api.github.com/users/break11/orgs',
'repos_url': 'https://api.github.com/users/break11/repos',
'events_url': 'https://api.github.com/users/break11/events{/privacy}',
'received_events_url': 'https://api.github.com/users/break11/received_events',
'type': 'User',
'site_admin': False},
'committer': {'login': 'break11',
'id': 32346580,
'node_id': 'MDQ6VXNlcjMyMzQ2NTgw',
'avatar_url': 'https://avatars3.githubusercontent.com/u/32346580?v=4',
'gravatar_id': '',
'url': 'https://api.github.com/users/break11',
'html_url': 'https://github.com/break11',
'followers_url': 'https://api.github.com/users/break11/followers',
'following_url': 'https://api.github.com/users/break11/following{/other_user}',
'gists_url': 'https://api.github.com/users/break11/gists{/gist_id}',
'starred_url': 'https://api.github.com/users/break11/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/break11/subscriptions',
'organizations_url': 'https://api.github.com/users/break11/orgs',
'repos_url': 'https://api.github.com/users/break11/repos',
'events_url': 'https://api.github.com/users/break11/events{/privacy}',
'received_events_url': 'https://api.github.com/users/break11/received_events',
'type': 'User',
'site_admin': False
},
'parents': [{'sha': '0992c5d5edc4a9c0e8c2702dd9def4dbd85f76cd',
'url': 'https://api.github.com/repos/break11/playrix_test/commits/0992c5d5edc4a9c0e8c2702dd9def4dbd85f76cd',
'html_url': 'https://github.com/break11/playrix_test/commit/0992c5d5edc4a9c0e8c2702dd9def4dbd85f76cd'
}]
}
|
992,698 | f8ad2bc76e3cb5f49c85586cbca9b5ee3a96e8b9 | #!/usr/bin/env python
#encoding: utf-8
from PyQt4.QtGui import QWidget,QTableWidget,QTableWidgetItem,QApplication,QTableWidgetSelectionRange,QAbstractItemView
from PyQt4.QtCore import Qt, QString,QStringList
from PyQt4.QtTest import QTest
import unittest
import sys
sys.path.append("..")
import main
import random
from view.MainWindow import MainWindow
from view.Utils import initParent
class mytest(unittest.TestCase):
def setUp(self):
"test IMAC register read"
self.main = MainWindow()
self.inittablewidget = self.main.microcodeTableWidget
global condlist
condlist = ["","@(c)","@(!c)"]
def tearDown(self):
self.main = None
self.inittablewidget = None
def testImac_0(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["l","u,l","l,cr","l,b","l,h"]
biulist = ["biu0","biu1","biu2"]
for pre in prelist:
for biu in biulist:
for m in xrange(0,4):
for n in xrange(0,4):
for cond in condlist:
text = "mr+=t%s*t%s(%s)->%s%s"%(m,n,pre,biu,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"9-10")
def testImac_1(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["l","u,l","l,cr","l,b","l,h"]
#biulist = ["biu0","biu1","biu2"]
for pre in prelist:
for m in xrange(0,4):
for n in xrange(0,4):
for t in xrange(0,127):
for cond in condlist:
text = "mr+=t%s*t%s(%s)->m[%s]%s"%(m,n,pre,t,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"9-10")
def testImac_2(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["l","u,l","l,cr","l,b","l,h"]
macclist = ["ialu","imac","falu"]
for pre in prelist:
for macc in macclist:
for m in xrange(0,4):
for n in xrange(0,4):
for t in xrange(0,4):
for cond in condlist:
text = "mr += t%s*t%s(%s)->%s.t%s%s"%(m,n,pre,macc,t,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"8-9")
def testImac_3(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["l","u,l","l,cr","l,b","l,h"]
shulist = ["shu0","shu1"]
for pre in prelist:
for shu in shulist:
for m in xrange(0,4):
for n in xrange(0,4):
for t in xrange(0,4):
for cond in condlist:
text = "mr += t%s*t%s(%s)->%s.t%s%s"%(m,n,pre,shu,t,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"9-10")
def testImac_4(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["","(u)","(cr)","(b)","(h)","(u,cr)","(u,b)","(u,h)","(cr,b)","(cr,h)"]
biulist = ["biu0","biu1","biu2"]
for pre in prelist:
for biu in biulist:
for m in xrange(0,4):
for cond in condlist:
text = "mr += t%s%s->%s%s"%(m,pre,biu,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"5")
def testImac_5(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["","(u)","(cr)","(b)","(h)","(u,cr)","(u,b)","(u,h)","(cr,b)","(cr,h)"]
#biulist = ["biu0","biu1","biu2"]
for pre in prelist:
for m in xrange(0,4):
for t in xrange(0,127):
for cond in condlist:
text = "mr += t%s%s->m[%s]%s"%(m,pre,t,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"5")
def testImac_6(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["","(u)","(cr)","(b)","(h)","(u,cr)","(u,b)","(u,h)","(cr,b)","(cr,h)"]
macclist = ["ialu","imac","falu"]
for pre in prelist:
for macc in macclist:
for m in xrange(0,4):
for t in xrange(0,4):
for cond in condlist:
text = "mr += t%s%s->%s.t%s%s"%(m,pre,macc,t,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"4")
def testImac_7(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["","(u)","(cr)","(b)","(h)","(u,cr)","(u,b)","(u,h)","(cr,b)","(cr,h)"]
shulist = ["shu0","shu1"]
for pre in prelist:
for shu in shulist:
for m in xrange(0,4):
for t in xrange(0,4):
for cond in condlist:
text = "mr += t%s%s->%s.t%s%s"%(m,pre,shu,t,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"5")
def testImac_8(self):
self.main.newFile()
row = random.randint(0,1999)
column = random.randint(0,19)
prelist = ["","(u)","(cr)","(b)","(h)","(u,cr)","(u,b)","(u,h)","(cr,b)","(cr,h)"]
for pre in prelist:
for m in xrange(0,4):
for cond in condlist:
text = "mr += t%s%s%s"%(m,pre,cond)
#print text
selranges = QTableWidgetSelectionRange(row, column, row, column)
self.inittablewidget.setRangeSelected(selranges, True)
self.inittablewidget.setItem(row, column,QTableWidgetItem(text))
#print self.inittablewidget.item(row,column).text()
self.inittablewidget.dataParser(row, column)
self.assertEqual(self.inittablewidget.item(row, column).background(),self.inittablewidget.defaultBackgroundColor)
self.assertEqual(self.inittablewidget.database.searchMcc(self.inittablewidget.mmpulite.result),"1")
suite = unittest.TestLoader().loadTestsFromTestCase(mytest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
992,699 | 59a3423cf4f0314258910eb508e29cef97f37c4b | import threading
from project.ctrl.Controller import Controller
from project.model.exception.problemException import ProblemException
from project.model.problem.EvolutionaryProblem import EvolutionaryProblem
from project.model.problem.Problem import Problem
from project.model.state.State import State
class ProblemController(Controller):
def __init__(self, problem: Problem):
self.__problem = problem
self.lock = threading.Lock()
self.solution = State()
self.generationNumber = -1
self.attemptValidity = -1
self.validities = []
def setProblem(self, problem: Problem):
self.__problem = problem
def getProblem(self):
return self.__problem
def evolutionary(self):
self.__saveSolution("No algorithm is running", -1, -1)
if not isinstance(self.__problem, EvolutionaryProblem):
raise ProblemException("Cannot perform Evolutionary Algorithm on non Evolutionary Problem")
number = 0
self.validities = []
thread = threading.current_thread()
# do while thread attribute is not set to false
while getattr(thread, "continue_run", True):
number += 1
self.__problem.nextGeneration()
current = self.__problem.getBest()
validity = self.__problem.validity(current)
wait = self.__saveSolution(current, number, validity)
if validity == 0: return
def hillClimbing(self):
self.__saveSolution("No algorithm is running", -1, -1)
if not isinstance(self.__problem, EvolutionaryProblem):
raise ProblemException("Cannot perform Hill Climbing Algorithm on non Problem")
current = self.__problem.getRandomPermutationSet()
number = 0
self.validities = []
thread = threading.current_thread()
# do while thread attribute is not set to false
while getattr(thread, "continue_run", True):
number += 1
self.__problem.setNeighborhood(current=current)
current = self.__problem.getBest()
validity = self.__problem.validity(current)
wait = self.__saveSolution(current, number, validity)
if validity == 0: return
def pso(self):
self.__saveSolution("No algoritm is running", -1, -1)
if not isinstance(self.__problem, EvolutionaryProblem):
raise ProblemException("Cannot perform Particle Swarm Optimisation Algorithm on non Problem")
self.__problem.makeParticles()
number = 0
self.validities = []
thread = threading.current_thread()
# do while thread attribute is not set to false
while getattr(thread, "continue_run", True):
number += 1
self.__problem.psoNextStep()
current = self.__problem.getBestParticle().getPersonalBest()
validity = self.__problem.validity(current)
wait = self.__saveSolution(current, number, validity)
if validity == 0: return
def aco(self):
self.__saveSolution("No algorithm is running", -1, -1)
if not isinstance(self.__problem, EvolutionaryProblem):
raise ProblemException("Cannot perform Anc Colony Optimisation Algorithm on non Problem")
# self.__problem.initializeNullGeneration()
# or without initializeNullGeneration to avoid same solution everywhere
number = 0
self.validities = []
thread = threading.current_thread()
# do while attribute is not set to false
pheromoneMatrix = self.__problem.getPheromoneSolution()
while getattr(thread, "continue_run", True):
number += 1
self.__problem.acoNextStep(pheromoneMatrix)
self.__problem.updatePheromone(pheromoneMatrix)
current = self.__problem.getBest()
validity = self.__problem.validity(current)
wait = self.__saveSolution(current, number, validity)
if validity == 0: return
def __saveSolution(self, solution, generation, validity):
with self.lock:
self.solution = solution
self.generationNumber = generation
self.attemptValidity = validity
self.validities.append(validity)
return True
#print("saved solution")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.